Source code for dropout optimization

Dropout optimization completes our journey in feed-forward networks. Here is the source code for further analysis and implementations:

import mnist_data 
import tensorflow as tf
import math


logs_path = 'log_simple_stats_5_lyers_dropout'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = mnist_data.read_data_sets("data")

X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Y_ = tf.placeholder(tf.float32, [None, 10])
lr = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)

L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))

# The model, with dropout at each layer
XX = tf.reshape(X, [-1, 28*28])

Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y1d = tf.nn.dropout(Y1, pkeep)

Y2 = tf.nn.relu(tf.matmul(Y1d, W2) + B2)
Y2d = tf.nn.dropout(Y2, pkeep)

Y3 = tf.nn.relu(tf.matmul(Y2d, W3) + B3)
Y3d = tf.nn.dropout(Y3, pkeep)

Y4 = tf.nn.relu(tf.matmul(Y3d, W4) + B4)
Y4d = tf.nn.dropout(Y4, pkeep)

Ylogits = tf.matmul(Y4d, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)cross_entropy = tf.reduce_mean(cross_entropy)*100

correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)tf.summary.scalar("accuracy", accuracy)summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()sess = tf.Session()
sess.run(init)

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter (logs_path,
graph=tf.get_default_graph())

for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples/batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000
learning_rate = min_learning_rate+
(max_learning_rate - min_learning_rate)
* math.exp(-i/decay_speed)
_, summary = sess.run([train_step, summary_op],
{X: batch_x, Y_: batch_y,
pkeep: 0.75, lr: learning_rate})
writer.add_summary(summary,
epoch * batch_count + i)
print "Epoch: ", epoch

print "Accuracy: ", accuracy.eval
(feed_dict={X: mnist.test.images,
Y_: mnist.test.labels, pkeep: 0.75})
print "done"
..................Content has been hidden....................

You can't read the all page of ebook, please click here login for view all page.
Reset