僅使用了數字0-9python
#coding:utf-8
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
from PIL import Image
import random,sys
# 驗證碼中的字符, 就不用漢字了
number = ['0','1','2','3','4','5','6','7','8','9']
#alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
# 驗證碼通常都無視大小寫;驗證碼長度4個字符
def random_captcha_text(char_set=number, captcha_size=4):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
# 生成字符對應的驗證碼
def gen_captcha_text_and_image():
image = ImageCaptcha()
captcha_text = random_captcha_text()
captcha_text = ''.join(captcha_text) #把驗證碼列表轉爲字符串
captcha = image.generate(captcha_text)
image.write(captcha_text, 'captcha/images/' + captcha_text + '.jpg') # 寫到文件
num = 5000
if __name__ == '__main__':
for i in range(num):
gen_captcha_text_and_image()
sys.stdout.write('\r>> Creating image %d/%d' % (i+1, num))
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
print('生成完畢')
複製代碼
import tensorflow as tf
import os
import random
import math
import sys
from PIL import Image
import numpy as np
#驗證集數量
_NUM_TEST = 500
#隨機種子
_RANDOM_SEED = 0
#數據集路徑
DATASET_DIR = "E:/tf3/captcha/images/"
#tfrecord文件存放路徑
TFRECORD_DIR = "E:/tf3/captcha/"
#判斷tfrecord文件是否是存在
def _dataset_exists(dataset_dir):
for split_name in ['train', 'test']:
output_filename = os.path.join(dataset_dir, split_name + '.tfrecords')
if not tf.gfile.Exists(output_filename):
return False
return True
#獲取全部的驗證碼圖片
def _get_filenames_and_classes(dataset_dir):
photo_filenames = []
for filename in os.listdir(dataset_dir):
path = os.path.join(dataset_dir, filename)
photo_filenames.append(path)
return photo_filenames
def int64_feature(values):
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data, label0, label1, label2, label3):
return tf.train.Example(features=tf.train.Features(feature={
'image':bytes_feature(image_data),
'label0':int64_feature(label0),
'label1':int64_feature(label1),
'label2':int64_feature(label2),
'label3':int64_feature(label3),
}))
#把數據轉爲tfrecord格式
def _convert_dataset(split_name, filenames, dataset_dir):
assert split_name in ['train', 'test']
with tf.Session() as sess:
#定義tfrecord文件的路徑和名字
output_filename = os.path.join(TFRECORD_DIR, split_name + '.tfrecords')
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for i, filename in enumerate(filenames):
try:
#sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
#sys.stdout.flush()
#讀取照片
image_data = Image.open(filename)
#根據模型的結構resize
image_data = image_data.resize((224, 224))
#灰度化
image_data = np.array(image_data.convert('L'))
image_data = image_data.tobytes()
#獲取labels
labels = filename.split('/')[-1][0:4]
num_labels = []
for j in range(4):
num_labels.append(int(labels[j]))
#生成protocol數據類型
example = image_to_tfexample(image_data,num_labels[0], num_labels[1], num_labels[2], num_labels[3])
tfrecord_writer.write(example.SerializeToString())
except IOError as e:
print('could not read: ', filename)
print('error: ', e)
print('skip it \n')
#sys.stdout.write('\n')
#sys.stdout.flush()
if _dataset_exists(TFRECORD_DIR):
print("wenjiancunzai")
else:
#獲取全部的圖片
photo_filenames = _get_filenames_and_classes(DATASET_DIR)
#把數據切分爲訓練集和測試機斌打亂
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_TEST:]
testing_filenames = photo_filenames[:_NUM_TEST]
#數據轉化
_convert_dataset('train', training_filenames, DATASET_DIR)
_convert_dataset('test', testing_filenames, DATASET_DIR)
print('生成tfrecord文件')
複製代碼
在使用以前須要配置一下環境,由於在下面訓練的代碼中,要在nets中import nets_factory, 因此:git
而且在用的的alexnet網絡中修改定存輸出import os
import tensorflow as tf
import numpy as np
from PIL import Image
from nets import nets_factory
#不一樣字符數量
CHAR_SET_LEN = 10
#圖片高度、寬度
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
#批次
BATCH_SIZE = 1
#tfrecord文件存放路徑
TFRECORD_FILE = "E:/tf3/captcha/test.tfrecords"
#佔位符
x = tf.placeholder(tf.float32, [None, 224, 224])
y0 = tf.placeholder(tf.float32, [None])
y1 = tf.placeholder(tf.float32, [None])
y2 = tf.placeholder(tf.float32, [None])
y3 = tf.placeholder(tf.float32, [None])
#學習率
lr = tf.Variable(0.03, dtype=tf.float32)
#從tfrecord讀取數據
def read_and_decode(filename):
#根據文件名生成一個隊列
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
#返回文件名和文件
_,serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label0': tf.FixedLenFeature([], tf.int64),
'label1': tf.FixedLenFeature([], tf.int64),
'label2': tf.FixedLenFeature([], tf.int64),
'label3': tf.FixedLenFeature([], tf.int64),
})
#獲取圖片數據
image = tf.decode_raw(features['image'], tf.uint8)
#tf.train.shuffle_batch必須肯定shape
image = tf.reshape(image, [224, 224])
#圖片預處理
image = tf.cast(image, tf.float32) / 255.0
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
#獲取label
label0 = tf.cast(features['label0'], tf.int32)
label1 = tf.cast(features['label0'], tf.int32)
label2 = tf.cast(features['label0'], tf.int32)
label3 = tf.cast(features['label0'], tf.int32)
return image, label0, label1, label2, label3
#獲取圖片數據和標籤
image, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)
#使用shuffle_batch能夠隨機打亂
image_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
[image, label0, label1, label2, label3], batch_size = BATCH_SIZE,
capacity = 50000, min_after_dequeue=10000, num_threads=1)
#定義網絡結構
train_network_fn = nets_factory.get_network_fn(
'alexnet_v2',
num_classes=CHAR_SET_LEN,
weight_decay=0.0005,
is_training=True)
with tf.Session() as sess:
#inputs: a tensor of size [batch_size, height, width, channels]
X = tf.reshape(x, [BATCH_SIZE, 224,224,1])
#數據輸入網絡獲得輸出值
logits0, logits1, logits2, logits3, end_points = train_network_fn(X)
#把標籤轉成one_hot的形式
one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=CHAR_SET_LEN)
one_hot_labels1 = tf.one_hot(indices=tf.cast(y1, tf.int32), depth=CHAR_SET_LEN)
one_hot_labels2 = tf.one_hot(indices=tf.cast(y2, tf.int32), depth=CHAR_SET_LEN)
one_hot_labels3 = tf.one_hot(indices=tf.cast(y3, tf.int32), depth=CHAR_SET_LEN)
#計算loss
loss0 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits0, labels=one_hot_labels0))
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits1, labels=one_hot_labels1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits2, labels=one_hot_labels2))
loss3 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits3, labels=one_hot_labels3))
#計算總的loss
total_loss = (loss0+loss1+loss2+loss3)/4.0
#優化total_loss
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(total_loss)
#計算準確率
correct_prediction0 = tf.equal(tf.argmax(one_hot_labels0, 1), tf.argmax(logits0, 1))
accuracy0 = tf.reduce_mean(tf.cast(correct_prediction0, tf.float32))
correct_prediction1 = tf.equal(tf.argmax(one_hot_labels1, 1), tf.argmax(logits1, 1))
accuracy1 = tf.reduce_mean(tf.cast(correct_prediction1, tf.float32))
correct_prediction2 = tf.equal(tf.argmax(one_hot_labels2, 1), tf.argmax(logits2, 1))
accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, tf.float32))
correct_prediction3 = tf.equal(tf.argmax(one_hot_labels3, 1), tf.argmax(logits3, 1))
accuracy3 = tf.reduce_mean(tf.cast(correct_prediction3, tf.float32))
#用於保存模型
saver = tf.train.Saver()
#初始化
sess.run(tf.global_variables_initializer())
#建立一個協調器,管理線程
coord = tf.train.Coordinator()
#啓動queuerunner, 此時文件名隊列已經進入隊
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6001):
#獲取一個批次的數據和標籤
b_image, b_label0, b_label1, b_label2, b_label3 = sess.run([image_batch, label_batch0, label_batch1, label_batch2, label_batch3])
#優化模型
sess.run(optimizer, feed_dict={x: b_image, y0: b_label0, y1: b_label1, y2: b_label2, y3: b_label3})
# 每迭代20詞計算一次loss和準確率
if i % 20 == 0:
#每迭代2000次下降一次學習率
if i%2000 == 0:
sess.run(tf.assign(lr, lr/3))
acc0,acc1,acc2,acc3,loss_ = sess.run([accuracy0,accuracy1,accuracy2,accuracy3,total_loss],feed_dict={x:b_image,
y0:b_label0,
y1:b_label1,
y2:b_label2,
y3:b_label3})
learning_rate = sess.run(lr)
print("Iter:%d Loss:%.3f Accuracy:%.2f,%.2f,%.2f,%.2f Learning_rate:%.4f" % (i,loss_,acc0,acc1,acc2,acc3,learning_rate))
#保存模型
#if acc0 > 0.9 and acc1>0.9and
if i == 6000:
saver.save(sess, "./captcha/models/crack_captcha.model", gloabl_step=i)
break
#通知其餘線程關閉
coord.request_stop()
#其餘全部線程關閉以後,這一函數才能返回
coord.join(threads)
複製代碼
最後會獲得一個models參數。bash
import os
import tensorflow as tf
import numpy as np
from PIL import Image
from nets import nets_factory
import matplotlib.pyplot as plt
#不一樣字符數量
CHAR_SET_LEN = 10
#圖片高度、寬度
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
#批次
BATCH_SIZE = 1
#tfrecord文件存放路徑
TFRECORD_FILE = "E:/tf3/captcha/test.tfrecords"
#佔位符
x = tf.placeholder(tf.float32, [None, 224, 224])
#從tfrecord讀取數據
def read_and_decode(filename):
#根據文件名生成一個隊列
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
#返回文件名和文件
_,serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label0': tf.FixedLenFeature([], tf.int64),
'label1': tf.FixedLenFeature([], tf.int64),
'label2': tf.FixedLenFeature([], tf.int64),
'label3': tf.FixedLenFeature([], tf.int64),
})
#獲取圖片數據
image = tf.decode_raw(features['image'], tf.uint8)
#沒有通過預處理的灰度圖
image_raw = tf.reshape(image, [224, 224])
#tf.train.shuffle_batch必須肯定shape
image = tf.reshape(image, [224, 224])
#圖片預處理
image = tf.cast(image, tf.float32) / 255.0
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
#獲取label
label0 = tf.cast(features['label0'], tf.int32)
label1 = tf.cast(features['label0'], tf.int32)
label2 = tf.cast(features['label0'], tf.int32)
label3 = tf.cast(features['label0'], tf.int32)
return image, image_raw, label0, label1, label2, label3
#獲取圖片數據和標籤
image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)
#使用shuffle_batch能夠隨機打亂
image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
[image, image_raw, label0, label1, label2, label3], batch_size = BATCH_SIZE,
capacity = 50000, min_after_dequeue=10000, num_threads=1)
#定義網絡結構
train_network_fn = nets_factory.get_network_fn(
'alexnet_v2',
num_classes=CHAR_SET_LEN,
weight_decay=0.0005,
is_training=False)
with tf.Session() as sess:
#inputs: a tensor of size [batch_size, height, width, channels]
X = tf.reshape(x, [BATCH_SIZE, 224,224,1])
#數據輸入網絡獲得輸出值
logits0, logits1, logits2, logits3, end_points = train_network_fn(X)
#預測值
prediction0 = tf.reshape(logits0, [-1, CHAR_SET_LEN])
prediction0 = tf.argmax(prediction0, 1)
prediction1 = tf.reshape(logits1, [-1, CHAR_SET_LEN])
prediction1 = tf.argmax(prediction1, 1)
prediction2 = tf.reshape(logits2, [-1, CHAR_SET_LEN])
prediction2 = tf.argmax(prediction2, 1)
prediction3 = tf.reshape(logits3, [-1, CHAR_SET_LEN])
prediction3 = tf.argmax(prediction3, 1)
#初始化
sess.run(tf.global_variables_initializer())
#載入訓練好的模型
saver = tf.train.Saver()
saver.restore(sess, './captcha/models/crack_captcha.model-20')
#建立一個協調器,管理線程
coord = tf.train.Coordinator()
#啓動queuerunner, 此時文件名隊列已經進入隊
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(5):
#獲取一個批次的數據和標籤
b_image, b_image_raw, b_label0, b_label1, b_label2, b_label3 = sess.run([image_batch,
image_raw_batch,
label_batch0,
label_batch1,
label_batch2,
label_batch3])
#顯示圖片
img = Image.fromarray(b_image_raw[0], 'L')
plt.imshow(img)
plt.axis('off')
plt.show()
#打印標籤
print('label: ', b_label0, b_label1, b_label2, b_label3)
#預測
label0, label1, label2, label3 = sess.run([prediction0, prediction1, prediction2, prediction3], feed_dict={x: b_image})
#打印預測值
print("predict: ", label0, label1, label2, label3)
#通知其餘線程關閉
coord.request_stop()
#其餘全部線程關閉以後,這一函數才能返回
coord.join(threads)
複製代碼