Compare commits
2 Commits
2c91fa6eb5
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e792d29ba1 | ||
|
|
64b89c326b |
59
FourthSaturday/Notes.md
Normal file
59
FourthSaturday/Notes.md
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
Convolutional Neural Network
|
||||||
|
============================
|
||||||
|
Hubel and Weisel(1962) experiment -> inspiration for CNN
|
||||||
|
single neuron detects edges oriented at 45degress
|
||||||
|
|
||||||
|
filter kernel -> (a patch in image - matrix) (typical to 3x3|5x5|7x7
|
||||||
|
smaller the better)
|
||||||
|
returns a feature map
|
||||||
|
CNN -> multiple layers of kernels(1st layer computes on the input image,
|
||||||
|
subsequent layers computes on the feature maps generated by the previous
|
||||||
|
layer)
|
||||||
|
strides -> amount of pixels to overlap between kernel computation on the same
|
||||||
|
layer
|
||||||
|
(max) pooling kernel -> looks at a patch of image and
|
||||||
|
returns the (maximum) value in that patch
|
||||||
|
(doesn't have any learnable parameters)
|
||||||
|
usually the number of feature maps is doubled after a
|
||||||
|
pooling layer is computed
|
||||||
|
maps (n,n)eg.[(28x28)x128] -> (mxm)eg.[(14,14)x128] -> (x256)
|
||||||
|
|
||||||
|
No of weight required per layer = (k1xk1)xc1xc2 (c1 is channels in input layer)
|
||||||
|
(k1,k1) is the dimension of filter kernel
|
||||||
|
(c2 is number of feature maps in first layer)
|
||||||
|
-> in 1st layer
|
||||||
|
(k2,k2)xc2xc3 (c3) number of feature maps
|
||||||
|
|
||||||
|
conv2d -> padding 'same' adds 0's at the borders to make the output
|
||||||
|
dimension same as image size
|
||||||
|
'valid' does the convolution one actual pixels alone -> will return
|
||||||
|
a smaller dimension relative to the image
|
||||||
|
|
||||||
|
|
||||||
|
technique: use a smaller train/test data and try to overfit the model
|
||||||
|
(100% on train to verify that the model is expressive enough
|
||||||
|
to learn the data)
|
||||||
|
|
||||||
|
Deconvolutional Layers(misnomer):
|
||||||
|
upsampling an image using this layer
|
||||||
|
(tf.layers.conv2d_transpose,tf.nn.conv2d_transpose)
|
||||||
|
|
||||||
|
|
||||||
|
Transfer Learning:
|
||||||
|
==================
|
||||||
|
using pretrained networks as starting point for a task (using a subset of layers)
|
||||||
|
eg. VGG(Visual Geometry Group) networks (224x224 -> 1000 classes)
|
||||||
|
-> classification(what) & localization(where)
|
||||||
|
CNN works great for classification(since it is invariant to location)
|
||||||
|
to predict the location (use the earlier layers(cotains locality info)
|
||||||
|
for final output)
|
||||||
|
using it to identify a class not in the 1000 pretrained classes
|
||||||
|
using it to identify a class with input size 64x64(depends on the first layer filter size)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Regularization:
|
||||||
|
===============
|
||||||
|
Dropout based regularization is great for image classification application.
|
||||||
|
(Warning: not to be used on data without redundancy(image data has lot of redundancy
|
||||||
|
eg. identifing a partial face is quite easy))
|
||||||
96
FourthSaturday/mnist_conv.py
Normal file
96
FourthSaturday/mnist_conv.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
import tensorflow as tf
|
||||||
|
from tensorflow.examples.tutorials.mnist import input_data
|
||||||
|
mnist = input_data.read_data_sets('../mnist_data', one_hot=True)
|
||||||
|
|
||||||
|
x = tf.placeholder(tf.float32,shape=[None,28*28])
|
||||||
|
y_ = tf.placeholder(tf.float32,shape=[None,10])
|
||||||
|
|
||||||
|
# W = tf.Variable(tf.zeros([28*28,10]))
|
||||||
|
# b = tf.Variable(tf.zeros([10]))
|
||||||
|
#
|
||||||
|
# y = tf.matmul(x,W) + b
|
||||||
|
#
|
||||||
|
# cross_entropy = tf.reduce_mean(
|
||||||
|
# tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
|
||||||
|
#
|
||||||
|
# train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
|
||||||
|
#
|
||||||
|
# with tf.Session() as sess:
|
||||||
|
# sess.run(tf.global_variables_initializer())
|
||||||
|
# for _ in range(1000):
|
||||||
|
# batch = mnist.train.next_batch(100)
|
||||||
|
# sess.run([train_step],feed_dict={x: batch[0], y_: batch[1]})
|
||||||
|
# correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
|
||||||
|
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
||||||
|
# print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
|
||||||
|
|
||||||
|
|
||||||
|
def weight_variable(shape):
|
||||||
|
initial = tf.truncated_normal(shape, stddev=0.1)
|
||||||
|
return tf.Variable(initial)
|
||||||
|
|
||||||
|
def bias_variable(shape):
|
||||||
|
initial = tf.constant(0.1, shape=shape)
|
||||||
|
return tf.Variable(initial)
|
||||||
|
|
||||||
|
def conv2d(x, W):
|
||||||
|
return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='VALID')
|
||||||
|
|
||||||
|
# def max_pool_3x3(x):
|
||||||
|
# return tf.nn.max_pool(x, ksize=[1, 5, 5, 1],
|
||||||
|
# strides=[1, 2, 2, 1], padding='SAME')
|
||||||
|
|
||||||
|
|
||||||
|
x_image = tf.reshape(x, [-1, 28, 28, 1])
|
||||||
|
|
||||||
|
W_conv1 = weight_variable([4, 4, 1, 128])
|
||||||
|
b_conv1 = bias_variable([128])
|
||||||
|
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
|
||||||
|
W_conv1
|
||||||
|
|
||||||
|
h_conv1
|
||||||
|
# h_pool1 = max_pool_3x3(h_conv1)
|
||||||
|
# h_pool1
|
||||||
|
h_conv1
|
||||||
|
W_conv2 = weight_variable([5, 5, 128, 64])
|
||||||
|
b_conv2 = bias_variable([64])
|
||||||
|
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
|
||||||
|
h_conv2
|
||||||
|
# h_pool2 = max_pool_3x3(h_conv2)
|
||||||
|
# h_pool2
|
||||||
|
|
||||||
|
W_fc1 = weight_variable([5 * 5 * 64, 512])
|
||||||
|
W_fc1
|
||||||
|
b_fc1 = bias_variable([512])
|
||||||
|
h_pool2_flat = tf.reshape(h_conv2, [-1, 5*5*64])
|
||||||
|
h_pool2_flat
|
||||||
|
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
|
||||||
|
h_fc1
|
||||||
|
|
||||||
|
keep_prob = tf.placeholder(tf.float32)
|
||||||
|
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
|
||||||
|
h_fc1_drop
|
||||||
|
W_fc2 = weight_variable([512, 10])
|
||||||
|
b_fc2 = bias_variable([10])
|
||||||
|
|
||||||
|
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
|
||||||
|
y_conv
|
||||||
|
|
||||||
|
cross_entropy = tf.reduce_mean(
|
||||||
|
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
|
||||||
|
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
|
||||||
|
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
|
||||||
|
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
||||||
|
|
||||||
|
with tf.Session() as sess:
|
||||||
|
sess.run(tf.global_variables_initializer())
|
||||||
|
for i in range(20000):
|
||||||
|
batch = mnist.train.next_batch(50)
|
||||||
|
if i % 100 == 0:
|
||||||
|
train_accuracy = accuracy.eval(feed_dict={
|
||||||
|
x: batch[0], y_: batch[1], keep_prob: 1.0})
|
||||||
|
print('step %d, training accuracy %g' % (i, train_accuracy))
|
||||||
|
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
|
||||||
|
|
||||||
|
print('test accuracy %g' % accuracy.eval(feed_dict={
|
||||||
|
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
||||||
176
FourthSaturday/mnist_deep.py
Normal file
176
FourthSaturday/mnist_deep.py
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
"""A deep MNIST classifier using convolutional layers.
|
||||||
|
|
||||||
|
See extensive documentation at
|
||||||
|
https://www.tensorflow.org/get_started/mnist/pros
|
||||||
|
"""
|
||||||
|
# Disable linter warnings to maintain consistency with tutorial.
|
||||||
|
# pylint: disable=invalid-name
|
||||||
|
# pylint: disable=g-bad-import-order
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from tensorflow.examples.tutorials.mnist import input_data
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
FLAGS = None
|
||||||
|
|
||||||
|
|
||||||
|
def deepnn(x):
|
||||||
|
"""deepnn builds the graph for a deep net for classifying digits.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
|
||||||
|
number of pixels in a standard MNIST image.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
|
||||||
|
equal to the logits of classifying the digit into one of 10 classes (the
|
||||||
|
digits 0-9). keep_prob is a scalar placeholder for the probability of
|
||||||
|
dropout.
|
||||||
|
"""
|
||||||
|
# Reshape to use within a convolutional neural net.
|
||||||
|
# Last dimension is for "features" - there is only one here, since images are
|
||||||
|
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
|
||||||
|
with tf.name_scope('reshape'):
|
||||||
|
x_image = tf.reshape(x, [-1, 28, 28, 1])
|
||||||
|
|
||||||
|
# First convolutional layer - maps one grayscale image to 32 feature maps.
|
||||||
|
with tf.name_scope('conv1'):
|
||||||
|
W_conv1 = weight_variable([5, 5, 1, 32])
|
||||||
|
b_conv1 = bias_variable([32])
|
||||||
|
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
|
||||||
|
|
||||||
|
# Pooling layer - downsamples by 2X.
|
||||||
|
with tf.name_scope('pool1'):
|
||||||
|
h_pool1 = max_pool_2x2(h_conv1)
|
||||||
|
|
||||||
|
# Second convolutional layer -- maps 32 feature maps to 64.
|
||||||
|
with tf.name_scope('conv2'):
|
||||||
|
W_conv2 = weight_variable([5, 5, 32, 64])
|
||||||
|
b_conv2 = bias_variable([64])
|
||||||
|
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
|
||||||
|
|
||||||
|
# Second pooling layer.
|
||||||
|
with tf.name_scope('pool2'):
|
||||||
|
h_pool2 = max_pool_2x2(h_conv2)
|
||||||
|
|
||||||
|
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
|
||||||
|
# is down to 7x7x64 feature maps -- maps this to 1024 features.
|
||||||
|
with tf.name_scope('fc1'):
|
||||||
|
W_fc1 = weight_variable([7 * 7 * 64, 1024])
|
||||||
|
b_fc1 = bias_variable([1024])
|
||||||
|
|
||||||
|
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
|
||||||
|
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
|
||||||
|
|
||||||
|
# Dropout - controls the complexity of the model, prevents co-adaptation of
|
||||||
|
# features.
|
||||||
|
with tf.name_scope('dropout'):
|
||||||
|
keep_prob = tf.placeholder(tf.float32)
|
||||||
|
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
|
||||||
|
|
||||||
|
# Map the 1024 features to 10 classes, one for each digit
|
||||||
|
with tf.name_scope('fc2'):
|
||||||
|
W_fc2 = weight_variable([1024, 10])
|
||||||
|
b_fc2 = bias_variable([10])
|
||||||
|
|
||||||
|
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
|
||||||
|
return y_conv, keep_prob
|
||||||
|
|
||||||
|
|
||||||
|
def conv2d(x, W):
|
||||||
|
"""conv2d returns a 2d convolution layer with full stride."""
|
||||||
|
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
|
||||||
|
|
||||||
|
|
||||||
|
def max_pool_2x2(x):
|
||||||
|
"""max_pool_2x2 downsamples a feature map by 2X."""
|
||||||
|
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
|
||||||
|
strides=[1, 2, 2, 1], padding='SAME')
|
||||||
|
|
||||||
|
|
||||||
|
def weight_variable(shape):
|
||||||
|
"""weight_variable generates a weight variable of a given shape."""
|
||||||
|
initial = tf.truncated_normal(shape, stddev=0.1)
|
||||||
|
return tf.Variable(initial)
|
||||||
|
|
||||||
|
|
||||||
|
def bias_variable(shape):
|
||||||
|
"""bias_variable generates a bias variable of a given shape."""
|
||||||
|
initial = tf.constant(0.1, shape=shape)
|
||||||
|
return tf.Variable(initial)
|
||||||
|
|
||||||
|
|
||||||
|
def main(_):
|
||||||
|
# Import data
|
||||||
|
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
|
||||||
|
|
||||||
|
# Create the model
|
||||||
|
x = tf.placeholder(tf.float32, [None, 784])
|
||||||
|
|
||||||
|
# Define loss and optimizer
|
||||||
|
y_ = tf.placeholder(tf.float32, [None, 10])
|
||||||
|
|
||||||
|
# Build the graph for the deep net
|
||||||
|
y_conv, keep_prob = deepnn(x)
|
||||||
|
|
||||||
|
with tf.name_scope('loss'):
|
||||||
|
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
|
||||||
|
logits=y_conv)
|
||||||
|
cross_entropy = tf.reduce_mean(cross_entropy)
|
||||||
|
|
||||||
|
with tf.name_scope('adam_optimizer'):
|
||||||
|
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
|
||||||
|
|
||||||
|
with tf.name_scope('accuracy'):
|
||||||
|
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
|
||||||
|
correct_prediction = tf.cast(correct_prediction, tf.float32)
|
||||||
|
accuracy = tf.reduce_mean(correct_prediction)
|
||||||
|
|
||||||
|
graph_location = tempfile.mkdtemp()
|
||||||
|
print('Saving graph to: %s' % graph_location)
|
||||||
|
train_writer = tf.summary.FileWriter(graph_location)
|
||||||
|
train_writer.add_graph(tf.get_default_graph())
|
||||||
|
|
||||||
|
with tf.Session() as sess:
|
||||||
|
sess.run(tf.global_variables_initializer())
|
||||||
|
for i in range(20000):
|
||||||
|
batch = mnist.train.next_batch(50)
|
||||||
|
if i % 100 == 0:
|
||||||
|
train_accuracy = accuracy.eval(feed_dict={
|
||||||
|
x: batch[0], y_: batch[1], keep_prob: 1.0})
|
||||||
|
print('step %d, training accuracy %g' % (i, train_accuracy))
|
||||||
|
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
|
||||||
|
|
||||||
|
print('test accuracy %g' % accuracy.eval(feed_dict={
|
||||||
|
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--data_dir', type=str,
|
||||||
|
default='/tmp/tensorflow/mnist/input_data',
|
||||||
|
help='Directory for storing input data')
|
||||||
|
FLAGS, unparsed = parser.parse_known_args()
|
||||||
|
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
||||||
1
FourthSunday/sentiment.py
Normal file
1
FourthSunday/sentiment.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
18
Notes.md
Normal file
18
Notes.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
Deep Learning:
|
||||||
|
==============
|
||||||
|
Creating a model such that we don't have to hand engineer features, instead
|
||||||
|
architecting the model such that it is capable of inferring the features
|
||||||
|
on its own with large number of datasets and layers.
|
||||||
|
|
||||||
|
Input of softmax layer is called logits( classifier )
|
||||||
|
|
||||||
|
Optimization Momentum:
|
||||||
|
======================
|
||||||
|
using averaged gradients computed in previous iterations to identify how much
|
||||||
|
weight is given to the gradient descent.
|
||||||
|
|
||||||
|
Weight initialization:
|
||||||
|
======================
|
||||||
|
create a smaller network -> compute weights
|
||||||
|
use the weights and add new layer and -> compute weights
|
||||||
|
iterate and grow the network by using precomputed weights for deeper networks.
|
||||||
@@ -2,11 +2,9 @@
|
|||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 65,
|
"execution_count": 2,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"collapsed": false,
|
"collapsed": false
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -26,11 +24,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 67,
|
"execution_count": 3,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"collapsed": false,
|
"collapsed": true
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -79,11 +75,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 68,
|
"execution_count": 4,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"collapsed": false,
|
"collapsed": true,
|
||||||
"deletable": true,
|
|
||||||
"editable": true,
|
|
||||||
"scrolled": true
|
"scrolled": true
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@@ -126,21 +120,17 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 56,
|
"execution_count": 5,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"collapsed": false,
|
"collapsed": false
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"distance on validation set 0.0457288585603\n",
|
"distance on validation set 0.123010858893\n",
|
||||||
"distance on validation set 0.0384670570493\n",
|
"distance on validation set 0.0423361249268\n"
|
||||||
"distance on validation set 0.0463402196765\n",
|
|
||||||
"distance on validation set 0.0418722033501\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -148,16 +138,16 @@
|
|||||||
"evalue": "",
|
"evalue": "",
|
||||||
"output_type": "error",
|
"output_type": "error",
|
||||||
"traceback": [
|
"traceback": [
|
||||||
"\u001b[0;31m\u001b[0m",
|
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
"\u001b[0;31mKeyboardInterrupt\u001b[0mTraceback (most recent call last)",
|
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||||
"\u001b[0;32m<ipython-input-56-ee62be87709e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"distance on validation set {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0macc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;31m#,'saved to ',save_path)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0my_\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtrain_step\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0maccuracy\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmerged_summary\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
|
"\u001b[1;32m<ipython-input-5-ee62be87709e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[1;32mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"distance on validation set {}\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;31m#,'saved to ',save_path)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 22\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 23\u001b[1;33m \u001b[0mtrain_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mg\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0my_\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrain_step\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0maccuracy\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmerged_summary\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||||
"\u001b[0;32m<ipython-input-56-ee62be87709e>\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(g, x, y, y_, train_step, accuracy, merged_summary)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m20000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mbatch_xs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mbatch_ys\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtr_head_imgs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtr_head_crds\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m128\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mtrain_step\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_xs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_ys\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m%\u001b[0m\u001b[0;36m100\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mt_batch_xs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mt_batch_ys\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtr_head_imgs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtr_head_crds\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
"\u001b[1;32m<ipython-input-5-ee62be87709e>\u001b[0m in \u001b[0;36mtrain_model\u001b[1;34m(g, x, y, y_, train_step, accuracy, merged_summary)\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m20000\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 13\u001b[0m \u001b[0mbatch_xs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mbatch_ys\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbatch_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtr_head_imgs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtr_head_crds\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m128\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 14\u001b[1;33m \u001b[0ms\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mtrain_step\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mbatch_xs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mbatch_ys\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 15\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m%\u001b[0m\u001b[1;36m100\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 16\u001b[0m \u001b[0mt_batch_xs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mt_batch_ys\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbatch_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtr_head_imgs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtr_head_crds\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m32\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 765\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 766\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 767\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 768\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 769\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
"\u001b[1;32m/media/Data/Test/py/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 887\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 888\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 889\u001b[1;33m run_metadata_ptr)\n\u001b[0m\u001b[0;32m 890\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 891\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 964\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 965\u001b[0;31m feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m 966\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 967\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
"\u001b[1;32m/media/Data/Test/py/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 1118\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1119\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1120\u001b[1;33m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m 1121\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1122\u001b[0m \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1013\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1014\u001b[0m return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1015\u001b[0;31m target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m 1016\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1017\u001b[0m return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
|
"\u001b[1;32m/media/Data/Test/py/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m 1315\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1316\u001b[0m return self._do_call(_run_fn, self._session, feeds, fetches, targets,\n\u001b[1;32m-> 1317\u001b[1;33m options, run_metadata)\n\u001b[0m\u001b[0;32m 1318\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1319\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1021\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1022\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1023\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1024\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
"\u001b[1;32m/media/Data/Test/py/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m 1321\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1322\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1323\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1324\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1325\u001b[0m \u001b[0mmessage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1002\u001b[0m return tf_session.TF_Run(session, options,\n\u001b[1;32m 1003\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1004\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1005\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1006\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
"\u001b[1;32m/media/Data/Test/py/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m 1300\u001b[0m return tf_session.TF_Run(session, options,\n\u001b[0;32m 1301\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1302\u001b[1;33m status, run_metadata)\n\u001b[0m\u001b[0;32m 1303\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1304\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msession\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -191,9 +181,7 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"collapsed": true,
|
"collapsed": true
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": []
|
"source": []
|
||||||
@@ -216,7 +204,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython2",
|
"pygments_lexer": "ipython2",
|
||||||
"version": "2.7.6"
|
"version": "2.7.14"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|||||||
@@ -16,9 +16,13 @@ m.patch()
|
|||||||
def load_face_files():
|
def load_face_files():
|
||||||
all_data = [msg.load(open('./face_images/face_images{}.bin'.format(i),'rb')) for i in range(1,6)]
|
all_data = [msg.load(open('./face_images/face_images{}.bin'.format(i),'rb')) for i in range(1,6)]
|
||||||
images = np.vstack([i[b'images'] for i in all_data])
|
images = np.vstack([i[b'images'] for i in all_data])
|
||||||
gray_images = np.dot(images,np.array([0.2125,0.7154,0.0721]))
|
gray_images = np.dot(images,np.array([0.2125,0.7154,0.0721]))/255.0
|
||||||
|
# print(gray_images.shape)
|
||||||
|
# scaled_gray_images = resize(,(32,32))/255.0
|
||||||
|
# import pdb;pdb.set_trace()
|
||||||
coords = np.vstack([i[b'co-ords'] for i in all_data])
|
coords = np.vstack([i[b'co-ords'] for i in all_data])
|
||||||
return gray_images,coords
|
coords_norm = coords/255.0
|
||||||
|
return gray_images,coords_norm
|
||||||
|
|
||||||
images,coords = load_face_files()
|
images,coords = load_face_files()
|
||||||
|
|
||||||
@@ -61,14 +65,17 @@ def create_model(input_dim,output_dim):
|
|||||||
error_lower_bound = tf.constant(0.9,name='lower_bound')
|
error_lower_bound = tf.constant(0.9,name='lower_bound')
|
||||||
x = tf.placeholder(tf.float32, [None,input_dim])
|
x = tf.placeholder(tf.float32, [None,input_dim])
|
||||||
y = tf.placeholder(tf.float32, [None,output_dim])
|
y = tf.placeholder(tf.float32, [None,output_dim])
|
||||||
|
|
||||||
W1 = tf.Variable(tf.random_normal([input_dim, 512],stddev=2.0/math.sqrt(input_dim)),name='layer_1_weights')
|
W1 = tf.Variable(tf.random_normal([input_dim, 512],stddev=2.0/math.sqrt(input_dim)),name='layer_1_weights')
|
||||||
b1 = tf.Variable(tf.random_normal([512]),name='bias_1_weights')
|
b1 = tf.Variable(tf.random_normal([512]),name='bias_1_weights')
|
||||||
|
layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1))
|
||||||
|
|
||||||
W2 = tf.Variable(tf.random_normal([512, 128],stddev=2.0/math.sqrt(512)),name='layer_2_weights')
|
W2 = tf.Variable(tf.random_normal([512, 128],stddev=2.0/math.sqrt(512)),name='layer_2_weights')
|
||||||
b2 = tf.Variable(tf.random_normal([128]),name='bias_2_weights')
|
b2 = tf.Variable(tf.random_normal([128]),name='bias_2_weights')
|
||||||
|
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,W2),b2))
|
||||||
|
|
||||||
W_o = tf.Variable(tf.random_normal([128, output_dim],stddev=2.0/math.sqrt(128)),name='layer_output_weights')
|
W_o = tf.Variable(tf.random_normal([128, output_dim],stddev=2.0/math.sqrt(128)),name='layer_output_weights')
|
||||||
b_o = tf.Variable(tf.random_normal([output_dim]),name='bias_output_weights')
|
b_o = tf.Variable(tf.random_normal([output_dim]),name='bias_output_weights')
|
||||||
layer_1 = tf.nn.relu(tf.add(tf.matmul(x,W1),b1))
|
|
||||||
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,W2),b2))
|
|
||||||
#y_ = tf.nn.softmax(tf.add(tf.matmul(layer_2,W_o),b_o))+1e-6
|
#y_ = tf.nn.softmax(tf.add(tf.matmul(layer_2,W_o),b_o))+1e-6
|
||||||
y_ = tf.add(tf.matmul(layer_2,W_o),b_o)#tf.nn.relu()
|
y_ = tf.add(tf.matmul(layer_2,W_o),b_o)#tf.nn.relu()
|
||||||
|
|
||||||
@@ -115,7 +122,4 @@ def train_model(g,x,y,y_,train_step,accuracy,merged_summary):
|
|||||||
print(y_vals,t_batch_ys)
|
print(y_vals,t_batch_ys)
|
||||||
print("Accuracy on validation set {}".format(acc))#,'saved to ',save_path)
|
print("Accuracy on validation set {}".format(acc))#,'saved to ',save_path)
|
||||||
|
|
||||||
|
|
||||||
bb = np.array([[100,200,300,400], [100,200,300,400], [100,200,300,400], [100,200,300,400]])
|
|
||||||
bb[:,1]
|
|
||||||
train_model(g,x,y,y_,train_step,accuracy,merged_summary)
|
train_model(g,x,y,y_,train_step,accuracy,merged_summary)
|
||||||
|
|||||||
10
ThirdSunday/SiameseData.py
Normal file
10
ThirdSunday/SiameseData.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
import itertools
|
||||||
|
# def siamese_data(group1,group2):
|
||||||
|
def get_true_false(group1,group2):
|
||||||
|
f = [(g1,g2) for g2 in group2 for g1 in group1]
|
||||||
|
t = [i for i in itertools.combinations(group1,2)]+[i for i in itertools.combinations(group2,2)]
|
||||||
|
return (t,f)
|
||||||
|
|
||||||
|
group1 = ['a','b','c','d']
|
||||||
|
group2 = ['A','B','C','D']
|
||||||
|
get_true_false(group1,group2)
|
||||||
Reference in New Issue
Block a user