From 40c8675e4aac2bc69cf20a5228fbb6b4c79002c2 Mon Sep 17 00:00:00 2001 From: Malar Kannan Date: Fri, 13 Oct 2017 17:59:53 +0530 Subject: [PATCH] included keras - mnist siamese example --- mnist_siamese.py | 140 ++++++++++++++++++++ siamese_network.py => siamese_network_tf.py | 0 2 files changed, 140 insertions(+) create mode 100644 mnist_siamese.py rename siamese_network.py => siamese_network_tf.py (100%) diff --git a/mnist_siamese.py b/mnist_siamese.py new file mode 100644 index 0000000..a5258ca --- /dev/null +++ b/mnist_siamese.py @@ -0,0 +1,140 @@ +'''Train a Siamese MLP on pairs of digits from the MNIST dataset. + +It follows Hadsell-et-al.'06 [1] by computing the Euclidean distance on the +output of the shared network and by optimizing the contrastive loss (see paper +for mode details). + +[1] "Dimensionality Reduction by Learning an Invariant Mapping" + http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf + +Gets to 97.2% test accuracy after 20 epochs. +2 seconds per epoch on a Titan X Maxwell GPU +''' +from __future__ import absolute_import +from __future__ import print_function +import numpy as np + +import random +from keras.datasets import mnist +from keras.models import Model +from keras.layers import Dense, Dropout, Input, Lambda, Recurrent +from keras.optimizers import RMSprop +from keras import backend as K + +num_classes = 10 + + +def euclidean_distance(vects): + x, y = vects + return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon())) + + +def eucl_dist_output_shape(shapes): + shape1, shape2 = shapes + return (shape1[0], 1) + + +def contrastive_loss(y_true, y_pred): + '''Contrastive loss from Hadsell-et-al.'06 + http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf + ''' + margin = 1 + return K.mean(y_true * K.square(y_pred) + + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0))) + + +def create_pairs(x, digit_indices): + '''Positive and negative pair creation. + Alternates between positive and negative pairs. + ''' + pairs = [] + labels = [] + n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1 + for d in range(num_classes): + for i in range(n): + z1, z2 = digit_indices[d][i], digit_indices[d][i + 1] + pairs += [[x[z1], x[z2]]] + inc = random.randrange(1, num_classes) + dn = (d + inc) % num_classes + z1, z2 = digit_indices[d][i], digit_indices[dn][i] + pairs += [[x[z1], x[z2]]] + labels += [1, 0] + return np.array(pairs), np.array(labels) + + +def create_base_network(input_dim): + '''Base network to be shared (eq. to feature extraction). + ''' + input = Input(shape=(input_dim,)) + x = Dense(128, activation='relu')(input) + x = Dropout(0.1)(x) + x = Dense(128, activation='relu')(x) + x = Dropout(0.1)(x) + x = Dense(128, activation='relu')(x) + return Model(input, x) + + +def compute_accuracy(y_true, y_pred): + '''Compute classification accuracy with a fixed threshold on distances. + ''' + pred = y_pred.ravel() < 0.5 + return np.mean(pred == y_true) + + +def accuracy(y_true, y_pred): + '''Compute classification accuracy with a fixed threshold on distances. + ''' + return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype))) + + +# the data, shuffled and split between train and test sets +(x_train, y_train), (x_test, y_test) = mnist.load_data() +x_train = x_train.reshape(60000, 784) +x_test = x_test.reshape(10000, 784) +x_train = x_train.astype('float32') +x_test = x_test.astype('float32') +x_train /= 255 +x_test /= 255 +input_dim = 784 +epochs = 20 + +# create training+test positive and negative pairs +digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)] +tr_pairs, tr_y = create_pairs(x_train, digit_indices) + +digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)] +te_pairs, te_y = create_pairs(x_test, digit_indices) + +# network definition +base_network = create_base_network(input_dim) + +input_a = Input(shape=(input_dim,)) +input_b = Input(shape=(input_dim,)) + +# because we re-use the same instance `base_network`, +# the weights of the network +# will be shared across the two branches +processed_a = base_network(input_a) +processed_b = base_network(input_b) + +distance = Lambda(euclidean_distance, + output_shape=eucl_dist_output_shape)([processed_a, processed_b]) + +model = Model([input_a, input_b], distance) + +# train +rms = RMSprop() +model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy]) +model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y, + batch_size=128, + epochs=epochs, + validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)) + +# compute final accuracy on training and test sets +y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]]) +tr_acc = compute_accuracy(tr_y, y_pred) +y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) +te_acc = compute_accuracy(te_y, y_pred) + +print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc)) +print('* Accuracy on test set: %0.2f%%' % (100 * te_acc)) diff --git a/siamese_network.py b/siamese_network_tf.py similarity index 100% rename from siamese_network.py rename to siamese_network_tf.py