# coding: utf-8 # In[40]: from tensorflow.examples.tutorials.mnist import input_data # In[41]: mnist = input_data.read_data_sets('./mnist_data', one_hot=True) # In[42]: xtrain,xtest = mnist.train,mnist.test import tensorflow as tf # mnist.train. # In[43]: learning_rate = tf.constant(0.01,name='learning_rate') xtrain.images.shape[1] # In[44]: x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) # In[45]: W1 = tf.Variable(tf.zeros([784, 512]),name='layer_1_weights') b1 = tf.Variable(tf.zeros([512]),name='bias_1_weights') W2 = tf.Variable(tf.zeros([512, 128]),name='layer_2_weights') b2 = tf.Variable(tf.zeros([128]),name='bias_2_weights') W_o = tf.Variable(tf.zeros([128, 10]),name='layer_output_weights') b_o = tf.Variable(tf.zeros([10]),name='bias_output_weights') # In[46]: layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1)) layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1,W2),b2)) output_layer = tf.nn.softmax(tf.add(tf.matmul(layer_2,W_o),b_o)) # In[47]: cross_entropy = tf.reduce_mean(-tf.reduce_sum(output_layer * tf.log(y), reduction_indices=[1])) # In[48]: train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # In[39]: with tf.Session() as s: tf.global_variables_initializer() [_,val] = s.run([train_step,cross_entropy],feed_dict={x:xtrain.images,y:xtrain.labels}) # In[ ]: