學習谷歌的深度學習終于有點眉目了,給大家分享我的Tensorflow學習歷程。
tensorflow的官方中文文檔比較生澀,數據集一直采用的MNIST二進制數據集。并沒有過多講述怎么構建自己的圖片數據集tfrecords。
流程是:制作數據集—讀取數據集—-加入隊列
先貼完整的代碼:
#encoding=utf-8import osimport tensorflow as tffrom PIL import Imagecwd = os.getcwd()classes = {'test','test1','test2'}#制作二進制數據def create_record(): writer = tf.python_io.TFRecordWriter("train.tfrecords") for index, name in enumerate(classes): class_path = cwd +"/"+ name+"/" for img_name in os.listdir(class_path): img_path = class_path + img_name img = Image.open(img_path) img = img.resize((64, 64)) img_raw = img.tobytes() #將圖片轉化為原生bytes print index,img_raw example = tf.train.Example( features=tf.train.Features(feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])) })) writer.write(example.SerializeToString()) writer.close()data = create_record()#讀取二進制數據def read_and_decode(filename): # 創建文件隊列,不限讀取的數量 filename_queue = tf.train.string_input_producer([filename]) # create a reader from file queue reader = tf.TFRecordReader() # reader從文件隊列中讀入一個序列化的樣本 _, serialized_example = reader.read(filename_queue) # get feature from serialized example # 解析符號化的樣本 features = tf.parse_single_example( serialized_example, features={ 'label': tf.FixedLenFeature([], tf.int64), 'img_raw': tf.FixedLenFeature([], tf.string) } ) label = features['label'] img = features['img_raw'] img = tf.decode_raw(img, tf.uint8) img = tf.reshape(img, [64, 64, 3]) img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 label = tf.cast(label, tf.int32) return img, labelif __name__ == '__main__': if 0: data = create_record("train.tfrecords") else: img, label = read_and_decode("train.tfrecords") print "tengxing",img,label #使用shuffle_batch可以隨機打亂輸入 next_batch挨著往下取 # shuffle_batch才能實現[img,label]的同步,也即特征和label的同步,不然可能輸入的特征和label不匹配 # 比如只有這樣使用,才能使img和label一一對應,每次提取一個image和對應的label # shuffle_batch返回的值就是RandomShuffleQueue.dequeue_many()的結果 # Shuffle_batch構建了一個RandomShuffleQueue,并不斷地把單個的[img,label],送入隊列中 img_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=4, capacity=2000, min_after_dequeue=1000) # 初始化所有的op init = tf.initialize_all_variables() with tf.Session() as sess: sess.run(init) # 啟動隊列 threads = tf.train.start_queue_runners(sess=sess) for i in range(5): print img_batch.shape,label_batch val, l = sess.run([img_batch, label_batch]) # l = to_categorical(l, 12) print(val.shape, l)
新聞熱點
疑難解答