included mnist examples
parent
338312e5bf
commit
b27939044b
|
|
@ -0,0 +1,176 @@
|
|||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""A deep MNIST classifier using convolutional layers.
|
||||
|
||||
See extensive documentation at
|
||||
https://www.tensorflow.org/get_started/mnist/pros
|
||||
"""
|
||||
# Disable linter warnings to maintain consistency with tutorial.
|
||||
# pylint: disable=invalid-name
|
||||
# pylint: disable=g-bad-import-order
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from tensorflow.examples.tutorials.mnist import input_data
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
FLAGS = None
|
||||
|
||||
|
||||
def deepnn(x):
|
||||
"""deepnn builds the graph for a deep net for classifying digits.
|
||||
|
||||
Args:
|
||||
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
|
||||
number of pixels in a standard MNIST image.
|
||||
|
||||
Returns:
|
||||
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
|
||||
equal to the logits of classifying the digit into one of 10 classes (the
|
||||
digits 0-9). keep_prob is a scalar placeholder for the probability of
|
||||
dropout.
|
||||
"""
|
||||
# Reshape to use within a convolutional neural net.
|
||||
# Last dimension is for "features" - there is only one here, since images are
|
||||
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
|
||||
with tf.name_scope('reshape'):
|
||||
x_image = tf.reshape(x, [-1, 28, 28, 1])
|
||||
|
||||
# First convolutional layer - maps one grayscale image to 32 feature maps.
|
||||
with tf.name_scope('conv1'):
|
||||
W_conv1 = weight_variable([5, 5, 1, 32])
|
||||
b_conv1 = bias_variable([32])
|
||||
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
|
||||
|
||||
# Pooling layer - downsamples by 2X.
|
||||
with tf.name_scope('pool1'):
|
||||
h_pool1 = max_pool_2x2(h_conv1)
|
||||
|
||||
# Second convolutional layer -- maps 32 feature maps to 64.
|
||||
with tf.name_scope('conv2'):
|
||||
W_conv2 = weight_variable([5, 5, 32, 64])
|
||||
b_conv2 = bias_variable([64])
|
||||
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
|
||||
|
||||
# Second pooling layer.
|
||||
with tf.name_scope('pool2'):
|
||||
h_pool2 = max_pool_2x2(h_conv2)
|
||||
|
||||
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
|
||||
# is down to 7x7x64 feature maps -- maps this to 1024 features.
|
||||
with tf.name_scope('fc1'):
|
||||
W_fc1 = weight_variable([7 * 7 * 64, 1024])
|
||||
b_fc1 = bias_variable([1024])
|
||||
|
||||
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
|
||||
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
|
||||
|
||||
# Dropout - controls the complexity of the model, prevents co-adaptation of
|
||||
# features.
|
||||
with tf.name_scope('dropout'):
|
||||
keep_prob = tf.placeholder(tf.float32)
|
||||
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
|
||||
|
||||
# Map the 1024 features to 10 classes, one for each digit
|
||||
with tf.name_scope('fc2'):
|
||||
W_fc2 = weight_variable([1024, 10])
|
||||
b_fc2 = bias_variable([10])
|
||||
|
||||
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
|
||||
return y_conv, keep_prob
|
||||
|
||||
|
||||
def conv2d(x, W):
|
||||
"""conv2d returns a 2d convolution layer with full stride."""
|
||||
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
|
||||
|
||||
|
||||
def max_pool_2x2(x):
|
||||
"""max_pool_2x2 downsamples a feature map by 2X."""
|
||||
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
|
||||
strides=[1, 2, 2, 1], padding='SAME')
|
||||
|
||||
|
||||
def weight_variable(shape):
|
||||
"""weight_variable generates a weight variable of a given shape."""
|
||||
initial = tf.truncated_normal(shape, stddev=0.1)
|
||||
return tf.Variable(initial)
|
||||
|
||||
|
||||
def bias_variable(shape):
|
||||
"""bias_variable generates a bias variable of a given shape."""
|
||||
initial = tf.constant(0.1, shape=shape)
|
||||
return tf.Variable(initial)
|
||||
|
||||
|
||||
def main(_):
|
||||
# Import data
|
||||
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
|
||||
|
||||
# Create the model
|
||||
x = tf.placeholder(tf.float32, [None, 784])
|
||||
|
||||
# Define loss and optimizer
|
||||
y_ = tf.placeholder(tf.float32, [None, 10])
|
||||
|
||||
# Build the graph for the deep net
|
||||
y_conv, keep_prob = deepnn(x)
|
||||
|
||||
with tf.name_scope('loss'):
|
||||
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
|
||||
logits=y_conv)
|
||||
cross_entropy = tf.reduce_mean(cross_entropy)
|
||||
|
||||
with tf.name_scope('adam_optimizer'):
|
||||
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
|
||||
|
||||
with tf.name_scope('accuracy'):
|
||||
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
|
||||
correct_prediction = tf.cast(correct_prediction, tf.float32)
|
||||
accuracy = tf.reduce_mean(correct_prediction)
|
||||
|
||||
graph_location = tempfile.mkdtemp()
|
||||
print('Saving graph to: %s' % graph_location)
|
||||
train_writer = tf.summary.FileWriter(graph_location)
|
||||
train_writer.add_graph(tf.get_default_graph())
|
||||
|
||||
with tf.Session() as sess:
|
||||
sess.run(tf.global_variables_initializer())
|
||||
for i in range(20000):
|
||||
batch = mnist.train.next_batch(50)
|
||||
if i % 100 == 0:
|
||||
train_accuracy = accuracy.eval(feed_dict={
|
||||
x: batch[0], y_: batch[1], keep_prob: 1.0})
|
||||
print('step %d, training accuracy %g' % (i, train_accuracy))
|
||||
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
|
||||
|
||||
print('test accuracy %g' % accuracy.eval(feed_dict={
|
||||
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data_dir', type=str,
|
||||
default='/tmp/tensorflow/mnist/input_data',
|
||||
help='Directory for storing input data')
|
||||
FLAGS, unparsed = parser.parse_known_args()
|
||||
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""A very simple MNIST classifier.
|
||||
|
||||
See extensive documentation at
|
||||
https://www.tensorflow.org/get_started/mnist/beginners
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from tensorflow.examples.tutorials.mnist import input_data
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
FLAGS = None
|
||||
|
||||
|
||||
def main(_):
|
||||
# Import data
|
||||
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
|
||||
|
||||
# Create the model
|
||||
x = tf.placeholder(tf.float32, [None, 784])
|
||||
W = tf.Variable(tf.zeros([784, 10]))
|
||||
b = tf.Variable(tf.zeros([10]))
|
||||
y = tf.matmul(x, W) + b
|
||||
|
||||
# Define loss and optimizer
|
||||
y_ = tf.placeholder(tf.float32, [None, 10])
|
||||
|
||||
# The raw formulation of cross-entropy,
|
||||
#
|
||||
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
|
||||
# reduction_indices=[1]))
|
||||
#
|
||||
# can be numerically unstable.
|
||||
#
|
||||
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
|
||||
# outputs of 'y', and then average across the batch.
|
||||
cross_entropy = tf.reduce_mean(
|
||||
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
|
||||
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
|
||||
|
||||
sess = tf.InteractiveSession()
|
||||
tf.global_variables_initializer().run()
|
||||
# Train
|
||||
for _ in range(1000):
|
||||
batch_xs, batch_ys = mnist.train.next_batch(100)
|
||||
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
|
||||
|
||||
# Test trained model
|
||||
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
|
||||
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
||||
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
|
||||
y_: mnist.test.labels}))
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
|
||||
help='Directory for storing input data')
|
||||
FLAGS, unparsed = parser.parse_known_args()
|
||||
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
import tensorflow as tf
|
||||
|
||||
node1 = tf.constant(3.0, dtype=tf.float32)
|
||||
node2 = tf.constant(4.0) # also tf.float32 implicitly
|
||||
print(node1, node2)
|
||||
|
||||
sess = tf.Session()
|
||||
print(sess.run([node1, node2]))
|
||||
|
||||
node3 = tf.add(node1, node2)
|
||||
print("node3:", node3)
|
||||
print("sess.run(node3):", sess.run(node3))
|
||||
|
||||
|
||||
a = tf.placeholder(tf.float32)
|
||||
b = tf.placeholder(tf.float32)
|
||||
adder_node = a + b # + provides a shortcut for tf.add(a, b)
|
||||
|
||||
|
||||
print(sess.run(adder_node, {a: 3, b: 4.5}))
|
||||
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
|
||||
|
||||
add_and_triple = adder_node * 3.
|
||||
print(sess.run(add_and_triple, {a: 3, b: 4.5}))
|
||||
|
||||
W = tf.Variable([.3], dtype=tf.float32)
|
||||
b = tf.Variable([-.3], dtype=tf.float32)
|
||||
x = tf.placeholder(tf.float32)
|
||||
linear_model = W * x + b
|
||||
|
||||
init = tf.global_variables_initializer()
|
||||
sess.run(init)
|
||||
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
|
||||
|
||||
y = tf.placeholder(tf.float32)
|
||||
squared_deltas = tf.square(linear_model - y)
|
||||
loss = tf.reduce_sum(squared_deltas)
|
||||
|
||||
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
|
||||
|
||||
fixW = tf.assign(W, [-1.])
|
||||
fixb = tf.assign(b, [1.])
|
||||
sess.run([fixW, fixb])
|
||||
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
|
||||
|
||||
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
||||
train = optimizer.minimize(loss)
|
||||
|
||||
sess.run(init) # reset values to incorrect defaults.
|
||||
for i in range(1000):
|
||||
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
|
||||
|
||||
print(sess.run([W, b]))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
# Model parameters
|
||||
W = tf.Variable([.3], dtype=tf.float32)
|
||||
b = tf.Variable([-.3], dtype=tf.float32)
|
||||
# Model input and output
|
||||
x = tf.placeholder(tf.float32)
|
||||
linear_model = W * x + b
|
||||
y = tf.placeholder(tf.float32)
|
||||
|
||||
# loss
|
||||
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
|
||||
# optimizer
|
||||
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
||||
train = optimizer.minimize(loss)
|
||||
|
||||
# training data
|
||||
x_train = [1, 2, 3, 4]
|
||||
y_train = [0, -1, -2, -3]
|
||||
# training loop
|
||||
init = tf.global_variables_initializer()
|
||||
sess = tf.Session()
|
||||
sess.run(init) # reset values to wrong
|
||||
for i in range(1000):
|
||||
sess.run(train, {x: x_train, y: y_train})
|
||||
|
||||
# evaluate training accuracy
|
||||
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
|
||||
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
import tensorflow as tf
|
||||
# NumPy is often used to load, manipulate and preprocess data.
|
||||
import numpy as np
|
||||
|
||||
# Declare list of features. We only have one numeric feature. There are many
|
||||
# other types of columns that are more complicated and useful.
|
||||
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
|
||||
|
||||
# An estimator is the front end to invoke training (fitting) and evaluation
|
||||
# (inference). There are many predefined types like linear regression,
|
||||
# linear classification, and many neural network classifiers and regressors.
|
||||
# The following code provides an estimator that does linear regression.
|
||||
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
|
||||
|
||||
# TensorFlow provides many helper methods to read and set up data sets.
|
||||
# Here we use two data sets: one for training and one for evaluation
|
||||
# We have to tell the function how many batches
|
||||
# of data (num_epochs) we want and how big each batch should be.
|
||||
x_train = np.array([1., 2., 3., 4.])
|
||||
y_train = np.array([0., -1., -2., -3.])
|
||||
x_eval = np.array([2., 5., 8., 1.])
|
||||
y_eval = np.array([-1.01, -4.1, -7, 0.])
|
||||
input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
|
||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
|
||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
|
||||
|
||||
# We can invoke 1000 training steps by invoking the method and passing the
|
||||
# training data set.
|
||||
estimator.train(input_fn=input_fn, steps=1000)
|
||||
|
||||
# Here we evaluate how well our model did.
|
||||
train_metrics = estimator.evaluate(input_fn=train_input_fn)
|
||||
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
|
||||
print("train metrics: %r"% train_metrics)
|
||||
print("eval metrics: %r"% eval_metrics)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
# Declare list of features, we only have one real-valued feature
|
||||
def model_fn(features, labels, mode):
|
||||
# Build a linear model and predict values
|
||||
W = tf.get_variable("W", [1], dtype=tf.float64)
|
||||
b = tf.get_variable("b", [1], dtype=tf.float64)
|
||||
y = W * features['x'] + b
|
||||
# Loss sub-graph
|
||||
loss = tf.reduce_sum(tf.square(y - labels))
|
||||
# Training sub-graph
|
||||
global_step = tf.train.get_global_step()
|
||||
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
||||
train = tf.group(optimizer.minimize(loss),
|
||||
tf.assign_add(global_step, 1))
|
||||
# EstimatorSpec connects subgraphs we built to the
|
||||
# appropriate functionality.
|
||||
return tf.estimator.EstimatorSpec(
|
||||
mode=mode,
|
||||
predictions=y,
|
||||
loss=loss,
|
||||
train_op=train)
|
||||
|
||||
estimator = tf.estimator.Estimator(model_fn=model_fn)
|
||||
# define our data sets
|
||||
x_train = np.array([1., 2., 3., 4.])
|
||||
y_train = np.array([0., -1., -2., -3.])
|
||||
x_eval = np.array([2., 5., 8., 1.])
|
||||
y_eval = np.array([-1.01, -4.1, -7, 0.])
|
||||
input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
|
||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
|
||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
|
||||
|
||||
# train
|
||||
estimator.train(input_fn=input_fn, steps=1000)
|
||||
# Here we evaluate how well our model did.
|
||||
train_metrics = estimator.evaluate(input_fn=train_input_fn)
|
||||
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
|
||||
print("train metrics: %r"% train_metrics)
|
||||
print("eval metrics: %r"% eval_metrics)
|
||||
Loading…
Reference in New Issue