# coding: utf-8 # In[1]: from tensorflow.examples.tutorials.mnist import input_data # In[2]: mnist = input_data.read_data_sets('./mnist_data', one_hot=True) # In[3]: xtrain,xtest = mnist.train,mnist.test import tensorflow as tf import math # mnist.train. # In[ ]: # In[28]: learning_rate = tf.constant(0.01,name='learning_rate') beta = tf.constant(0.01,name='regularization_beta') # In[5]: x = tf.placeholder(tf.float32, [None, xtrain.images.shape[1]]) y = tf.placeholder(tf.float32, [None, 10]) # In[6]: W1 = tf.Variable(tf.random_normal([784, 512],stddev=2.0/28.0),name='layer_1_weights') b1 = tf.Variable(tf.random_normal([512]),name='bias_1_weights') W2 = tf.Variable(tf.random_normal([512, 128],stddev=2.0/math.sqrt(512)),name='layer_2_weights') b2 = tf.Variable(tf.random_normal([128]),name='bias_2_weights') W_o = tf.Variable(tf.random_normal([128, 10],stddev=2.0/math.sqrt(128)),name='layer_output_weights') b_o = tf.Variable(tf.random_normal([10]),name='bias_output_weights') # In[20]: layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1)) layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1,W2),b2)) #y_ = tf.nn.softmax(tf.add(tf.matmul(layer_2,W_o),b_o))+1e-6 y_ = tf.add(tf.matmul(layer_2,W_o),b_o) # In[38]: #cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_))) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_)) total_loss = cross_entropy+beta*(tf.nn.l2_loss(W1)+tf.nn.l2_loss(W2)+tf.nn.l2_loss(W_o)+tf.nn.l2_loss(b1)+tf.nn.l2_loss(b2)+tf.nn.l2_loss(b_o)) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(tf.nn.softmax(y_),1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # In[39]: train_step = tf.train.GradientDescentOptimizer(0.1).minimize(total_loss) # In[40]: with tf.Session() as s: tf.global_variables_initializer().run() for i in range(20000): batch_xs, batch_ys = xtrain.next_batch(100) [_] = s.run([train_step],feed_dict={x:batch_xs,y:batch_ys}) if i%1000 == 0: print(s.run(accuracy, feed_dict={x: xtest.images, y: xtest.labels})) # In[ ]: