国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 學院 > 開發設計 > 正文

Tensorflow學習記錄10--tensorboard的使用

2019-11-11 01:06:51
字體:
來源:轉載
供稿:網友

1 簡介

tensorboard可以追蹤loss以及accuracy的變化,追蹤參數值w以及b的變化,以及可以顯示卷積過程中的圖像等等。

2 使用方式

1 首先標記要記錄的參數

圖像,值以及變量:

#圖像tf.image_summary(tag, tensor, max_images=3, collections=None, name=None)#值tf.scalar_summary(tags, values, collections=None, name=None)#變量

例如:

#圖像tf.image_summary("x", x_new, max_images=1)#值cost_summary = tf.scalar_summary(cost.op.name,cost)

2 定義統計的op以及定義寫入操作

# Merge all summaries into a single opmerged_summary_op = tf.merge_all_summaries()# op to write logs to Tensorboardsummary_writer = tf.train.SummaryWriter("./log/",graph=tf.get_default_graph())

3 再會話sess中喂入數據得到統計的op的值,然后用寫入操作把op的值用寫入操作summary_writer寫入指定目錄即可。

summary = sess.run(merged_summar_op,feed_dict={x:batch_x}) summary_writer.add_summary(summary)

3 實例

1 圖像

顯示mnist圖像

import numpy as npimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)x = tf.placeholder(tf.float32, [None, 784])if __name__ == '__main__': with tf.session() as sess: x_new = tf.reshape(x, shape=[-1, 28, 28, 1]) tf.image_summary("x", x_new, max_images=1) # Merge all summaries into a single op merged_summary_op = tf.merge_all_summaries() # op to write logs to Tensorboard summary_writer = tf.train.SummaryWriter("./log/",graph=tf.get_default_graph()) choose = np.random.randint(len(mnist.test.images)) batch_x = mnist.test.images[choose].reshape([-1, 784]) summary = sess.run(merged_summary_op,feed_dict={x: batch_x}) summary_writer.add_summary(summary)

顯示自定義圖像

import numpy as npimport tensorflow as tfx = tf.placeholder(tf.float32, [None, 784])batch_x = np.random.randint(256,size=[1,784]).astype(np.uint8)if __name__ == '__main__': with tf.Session() as sess: x_new = tf.reshape(x, shape=[-1, 28, 28, 1]) tf.image_summary("x", x_new, max_images=1) # Merge all summaries into a single op merged_summary_op = tf.merge_all_summaries() # op to write logs to Tensorboard summary_writer = tf.train.SummaryWriter("./log/",graph=tf.get_default_graph()) #choose = np.random.randint(len(mnist.test.images)) #batch_x = mnist.test.images[choose].reshape([-1, 784]) summary = sess.run(merged_summary_op,feed_dict={x: batch_x}) summary_writer.add_summary(summary)

2 loss,精度等

代碼

# -*- coding: utf-8 -*-# 輸入數據import input_dataimport pdbmnist = input_data.read_data_sets("/tmp/data/", one_hot=True)import tensorflow as tf# 定義網絡超參數learning_rate = 0.001training_iters = 200000batch_size = 64display_step = 20# 定義網絡參數n_input = 784 # 輸入的維度n_classes = 10 # 標簽的維度dropout = 0.8 # Dropout 的概率# 占位符輸入x = tf.placeholder(tf.float32, [None, n_input])y = tf.placeholder(tf.float32, [None, n_classes])keep_PRob = tf.placeholder(tf.float32)# 卷積操作def conv2d(name, l_input, w, b): return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)# 最大下采樣操作def max_pool(name, l_input, k): return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)# 歸一化操作def norm(name, l_input, lsize=4): return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)# 定義整個網絡 def alex_net(_X, _weights, _biases, _dropout): # 向量轉為矩陣 # 這個是把x的-1維即最后一維,即每一副圖像從一維變為28*28*1維的圖像 _X = tf.reshape(_X, shape=[-1, 28, 28, 1]) # 卷積層 conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1']) # 下采樣層 pool1 = max_pool('pool1', conv1, k=2) # 歸一化層 norm1 = norm('norm1', pool1, lsize=4) # Dropout norm1 = tf.nn.dropout(norm1, _dropout) # 卷積 conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2']) # 下采樣 pool2 = max_pool('pool2', conv2, k=2) # 歸一化 norm2 = norm('norm2', pool2, lsize=4) # Dropout norm2 = tf.nn.dropout(norm2, _dropout) # 卷積 conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3']) # 下采樣 pool3 = max_pool('pool3', conv3, k=2) # 歸一化 norm3 = norm('norm3', pool3, lsize=4) # Dropout norm3 = tf.nn.dropout(norm3, _dropout) # 全連接層,先把特征圖轉為向量 dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # 全連接層 dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation # 網絡輸出層 out = tf.matmul(dense2, _weights['out']) + _biases['out'] return out# 存儲所有的網絡參數weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])), 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])), 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])), 'wd1': tf.Variable(tf.random_normal([4096, 1024])), #'wd1': tf.Variable(tf.random_normal([4/*4/*256, 1024])), 'wd2': tf.Variable(tf.random_normal([1024, 1024])), 'out': tf.Variable(tf.random_normal([1024, 10]))}biases = { 'bc1': tf.Variable(tf.random_normal([64])), 'bc2': tf.Variable(tf.random_normal([128])), 'bc3': tf.Variable(tf.random_normal([256])), 'bd1': tf.Variable(tf.random_normal([1024])), 'bd2': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([n_classes]))}# 構建模型pred = alex_net(x, weights, biases, keep_prob)# 定義損失函數和學習步驟cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# 測試網絡correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))# 初始化所有的共享變量init = tf.initialize_all_variables()# add summarycost_summary = tf.scalar_summary(cost.op.name,cost)accuracy_summary = tf.scalar_summary(accuracy.op.name,accuracy)# 開啟一個訓練with tf.Session() as sess: sess.run(init) step = 1 summary_op = tf.merge_summary([cost_summary,accuracy_summary]) summary_writer = tf.train.SummaryWriter("./log/",sess.graph) # Keep training until reach max iterations #while step /* batch_size < training_iters: while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 獲取批數據 sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout}) if step % display_step == 0: # 計算精度 acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) # 計算損失值 loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) #print "Iter " + str(step/*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc) print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc) summary_op_out = sess.run(summary_op, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) summary_writer.add_summary(summary_op_out,step) step += 1 print "Optimization Finished!" # 計算測試精度 print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

3 綜合應用(顯示卷積過程的中間特性)

"""A Convolutional Network implementation example using TensorFlow library.This example is using the MNIST database of handwritten digits(http://yann.lecun.com/exdb/mnist/)Author: Aymeric DamienProject: https://github.com/aymericdamien/TensorFlow-Examples/"""import shutil as shimport numpy as npimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataprint("Downloading MNIST data ...")mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)# Log directory path.log_path = "./log/test"model_path = "./models/cnn_28x28_model.npy"# Remove logging path.sh.rmtree(log_path, ignore_errors=True)# Parametersbatch_size = 1# Network Parametersn_input = 784 # MNIST data input (img shape: 28*28)n_classes = 10 # MNIST total classes (0-9 digits)# tf Graph inputx = tf.placeholder(tf.float32, [None, n_input])y = tf.placeholder(tf.float32, [None, n_classes])keep_prob = 1.print ("Loading pre-trained model ...")restore_data = np.load(model_path).item()# Store layers weight & biasweights = { # 5x5 conv, 1 input, 32 outputs 'wc1': tf.constant(restore_data["weights"]["wc1"]), # 5x5 conv, 32 inputs, 64 outputs 'wc2': tf.constant(restore_data["weights"]["wc2"]), # fully connected, 7*7*64 inputs, 1024 outputs 'wd1': tf.constant(restore_data["weights"]["wd1"]), # 1024 inputs, 10 outputs (class prediction) 'out': tf.constant(restore_data["weights"]["out"])}biases = { 'bc1': tf.constant(restore_data["biases"]["bc1"]), 'bc2': tf.constant(restore_data["biases"]["bc2"]), 'bd1': tf.constant(restore_data["biases"]["bd1"]), 'out': tf.constant(restore_data["biases"]["out"])}def visualize_conv_layer(x, ix, iy, channels, cx=8): """ Aggregate the feature maps to an image from the given tensor of a convolution layer. Reference: http://stackoverflow.com/questions/33802336/visualizing-output-of -convolutional-layer-in-tensorflow :param x: The tensor of a convolution layer. :param ix: The width. :param iy: The height. :param channels: The depth (channel number). :param cx: The number of how many feature maps in a row. :return: The aggregated feature map. """ cy = channels / cx print("ix=%d, iy=%d, channels=%d, cx=%d, cy=%d" % (ix, iy, channels, cx, cy)) # First slice off 1 image and remove the image dimension. img = tf.slice(x, [0, 0, 0, 0], [1, -1, -1, -1]) img = tf.reshape(img, [iy, ix, channels]) # Add a couple of pixels of zero padding around the image ix += 4 iy += 4 img = tf.image.resize_image_with_crop_or_pad(img, iy, ix) img = tf.reshape(img, [iy, ix, cy, cx]) img = tf.transpose(img, perm=[2, 0, 3, 1]) img = tf.reshape(img, [1, cy * iy, cx * ix, 1]) return img# Create some wrappers for simplicitydef conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x)def maxpool2d(x, k=2): # MaxPool2D wrapper return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')# Create modeldef conv_net(x, weights, biases): # Reshape input picture x = tf.reshape(x, shape=[-1, 28, 28, 1]) tf.image_summary("x", x, max_images=batch_size) # Convolution Layer conv1 = conv2d(x, weights['wc1'], biases['bc1']) print "" print ("conv1=%s" % conv1) tf.image_summary("conv1", visualize_conv_layer(conv1, conv1.get_shape().as_list()[1], conv1.get_shape().as_list()[2], conv1.get_shape().as_list()[3], 8), max_images=1) # Max Pooling (down-sampling) conv1 = maxpool2d(conv1, k=2) print ("maxpool2d(conv1)=%s" % conv1) tf.image_summary("maxpool2d(conv1)", visualize_conv_layer(conv1, conv1.get_shape().as_list()[1], conv1.get_shape().as_list()[2], conv1.get_shape().as_list()[3], 8), max_images=1) # Convolution Layer conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) print "" print ("conv2=%s" % conv2) tf.image_summary("conv2", visualize_conv_layer(conv2, conv2.get_shape().as_list()[1], conv2.get_shape().as_list()[2], conv2.get_shape().as_list()[3], 8), max_images=1) # Max Pooling (down-sampling) conv2 = maxpool2d(conv2, k=2) print ("maxpool2d(conv2)=%s" % conv2) tf.image_summary("maxpool2d(conv2)", visualize_conv_layer(conv2, conv2.get_shape().as_list()[1], conv2.get_shape().as_list()[2], conv2.get_shape().as_list()[3], 8), max_images=1) # Fully connected layer # Reshape conv2 output to fit fully connected layer input fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) fc1 = tf.nn.relu(fc1) # Output, class prediction out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) out = tf.nn.softmax(out) print "" return out# Construct modelpred = conv_net(x, weights, biases)# Initializing the variablesinit = tf.initialize_all_variables()# Launch the graphif __name__ == '__main__': with tf.Session() as sess: sess.run(init) # Merge all summaries into a single op merged_summary_op = tf.merge_all_summaries() # op to write logs to Tensorboard summary_writer = tf.train.SummaryWriter(log_path, graph=tf.get_default_graph()) # Prepare the test data randomly. choose = np.random.randint(len(mnist.test.images)) batch_x = mnist.test.images[choose].reshape([-1, 784]) # Run the prediction. final_pred, summary = sess.run([pred, merged_summary_op], feed_dict={x: batch_x}) print ("The outcome is %s" % final_pred) # Write logs at every iteration summary_writer.add_summary(summary) print ("Use /"tensorboard --logdir=./log/" to launch the TensorBoard.")

參考: https://github.com/boyw165/tensorflow-vgg.git 這里有個vgg的可視化 https://github.com/woodrush/vgg-visualizer-tf


發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 丰原市| 灵武市| 年辖:市辖区| 南郑县| 黄梅县| 兴山县| 海盐县| 汉源县| 襄垣县| 南雄市| 萨迦县| 海宁市| 衢州市| 繁峙县| 新津县| 静宁县| 剑河县| 思茅市| 衡阳市| 大安市| 正镶白旗| SHOW| 申扎县| 来安县| 江达县| 阿坝| 延吉市| 雷州市| 贵港市| 金塔县| 孟连| 浑源县| 文安县| 张家口市| 贺州市| 柘荣县| 潼关县| 荥阳市| 三原县| 腾冲县| 唐山市|