We talked about some examples of CNN application with KeRas for Image Recognition and Quick Example of CNN with KeRas with Iris Data.
Actually, TensorFlow itself in Python is mature enough to conduct deep learning activities and KeRas is even faster and more simple to train with than TensorFlow only in deep learning activities.
You can find more details on Valentino Zocca, Gianmario Spacagna, Daniel Slater’s book Python Deep Learning.
############################ ###DNN in TensorFlow Only### ############################ import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data ###1. Load data set, and split it if necessary mnist = input_data.read_data_sets("MNIST_data/") ###2. we create a holder, a container to place the computation activities in tensorflow ###identifying format and tensor's r/c, null means any kind VISIBLE_NODES = 784 HIDDEN_NODES = 400 x = tf.placeholder("float", shape=[None, VISIBLE_NODES]) y = tf.placeholder("float", shape=[None, 10]) ###3. We identify weights and biases with tensor shape, start with 0 weights = tf.Variable(tf.random_normal((VISIBLE_NODES, HIDDEN_NODES), mean=0.0, stddev=1. / VISIBLE_NODES)) hidden_bias = tf.Variable(tf.zeros([HIDDEN_NODES])) visible_bias = tf.Variable(tf.zeros([VISIBLE_NODES])) ###4. set up the sigmoid model and multiply x and W with matmul function, building the ###hidden layer and reconstruction layer hidden_activation = tf.nn.sigmoid(tf.matmul(x, weights) + hidden_bias) visible_reconstruction = tf.nn.sigmoid(tf.matmul(hidden_activation, tf.transpose(weights)) + visible_bias) final_hidden_activation = tf.nn.sigmoid(tf.matmul(visible_reconstruction, weights) + hidden_bias) ###5. This process can be understood as being two phases of learning ###positive and negative or, more poetically, waking and sleeping positive_phase = tf.matmul(tf.transpose(x), hidden_activation) negative_phase = tf.matmul(tf.transpose(visible_reconstruction), final_hidden_activation) LEARNING_RATE = 0.01 weight_update = weights.assign_add(LEARNING_RATE * (positive_phase - negative_phase)) visible_bias_update = visible_bias.assign_add(LEARNING_RATE * tf.reduce_mean(x - visible_reconstruction, 0)) hidden_bias_update = hidden_bias.assign_add(LEARNING_RATE * tf.reduce_mean(hidden_activation - final_hidden_activation, 0)) ###6. Now we create the operations for scaling the hidden and visible biases, with loss ###function feedback train_op = tf.group(weight_update, visible_bias_update, hidden_bias_update) loss_op = tf.reduce_sum(tf.square(x - visible_reconstruction)) ###7. We start the session session = tf.Session() session.run(tf.global_variables_initializer()) current_epochs = 0 ###8.Run the session for i in range(20): total_loss = 0 while mnist.train.epochs_completed == current_epochs: batch_inputs, batch_labels = mnist.train.next_batch(100) _, reconstruction_loss = session.run([train_op, loss_op], feed_dict={input_placeholder: batch_inputs}) total_loss += reconstruction_loss print("epochs %s loss %s" % (current_epochs, reconstruction_loss)) current_epochs = mnist.train.epochs_completed
We only demonstrated the training process above. The following example is for KeRas.
############################## ###DNN in TensorFlow KeRas ### ############################## ###1. Load Data and Splot Data from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.utils import np_utils (X_train, Y_train), (X_test, Y_test) = mnist.load_data() ###2.Preprocess X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 classes = 10 Y_train = np_utils.to_categorical(Y_train, classes) Y_test = np_utils.to_categorical(Y_test, classes) ###3. Set up parameters input_size = 784 batch_size = 100 hidden_neurons = 400 epochs = 30 ###4.Build the model model = Sequential() model.add(Dense(hidden_neurons, input_dim=input_size)) model.add(Activation('relu')) model.add(Dense(classes, input_dim=hidden_neurons)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adadelta') model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, verbose=1) ###5.Test score = model.evaluate(X_test, Y_test, verbose=1) print('\n''Test accuracy:', score[1]) #Test accuracy: 0.983 ############################## ###CNN in TensorFlow KeRas ### ############################## import numpy as np np.random.seed(0) #for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Activation from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Dropout, Flatten from keras.utils import np_utils input_size = 784 batch_size = 100 hidden_neurons = 200 classes = 10 epochs = 8 (X_train, Y_train), (X_test, Y_test) = mnist.load_data() X_train = X_train.reshape(60000, 28, 28, 1) X_test = X_test.reshape(10000, 28, 28, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 Y_train = np_utils.to_categorical(Y_train, classes) Y_test = np_utils.to_categorical(Y_test, classes) model = Sequential() model.add(Convolution2D(32, (3, 3), input_shape=(28, 28, 1))) model.add(Activation('relu')) model.add(Convolution2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(hidden_neurons)) model.add(Activation('relu')) model.add(Dense(classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adadelta') model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_split = 0.1, verbose=1) score = model.evaluate(X_test, Y_test, verbose=1) print('Test accuracy:', score[1]) #Test accuracy: 0.9906