implemented conv2d of mnist from scratch added fourth week notes
This commit is contained in:
@@ -65,14 +65,17 @@ def create_model(input_dim,output_dim):
|
||||
error_lower_bound = tf.constant(0.9,name='lower_bound')
|
||||
x = tf.placeholder(tf.float32, [None,input_dim])
|
||||
y = tf.placeholder(tf.float32, [None,output_dim])
|
||||
|
||||
W1 = tf.Variable(tf.random_normal([input_dim, 512],stddev=2.0/math.sqrt(input_dim)),name='layer_1_weights')
|
||||
b1 = tf.Variable(tf.random_normal([512]),name='bias_1_weights')
|
||||
layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1))
|
||||
|
||||
W2 = tf.Variable(tf.random_normal([512, 128],stddev=2.0/math.sqrt(512)),name='layer_2_weights')
|
||||
b2 = tf.Variable(tf.random_normal([128]),name='bias_2_weights')
|
||||
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,W2),b2))
|
||||
|
||||
W_o = tf.Variable(tf.random_normal([128, output_dim],stddev=2.0/math.sqrt(128)),name='layer_output_weights')
|
||||
b_o = tf.Variable(tf.random_normal([output_dim]),name='bias_output_weights')
|
||||
layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1))
|
||||
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,W2),b2))
|
||||
#y_ = tf.nn.softmax(tf.add(tf.matmul(layer_2,W_o),b_o))+1e-6
|
||||
y_ = tf.add(tf.matmul(layer_2,W_o),b_o)#tf.nn.relu()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user