SNABSuite  0.x
Spiking Neural Architecture Benchmark Suite
mnist_cnn_pool.py
Go to the documentation of this file.
1 '''Trains a simple convnet on the MNIST dataset.
2 
3 Gets to 99.25% test accuracy after 12 epochs
4 (there is still a lot of margin for parameter tuning).
5 16 seconds per epoch on a GRID K520 GPU.
6 '''
7 # Taken from
8 # https://raw.githubusercontent.com/keras-team/keras/master/examples/
9 
10 from __future__ import print_function
11 import keras
12 from keras.datasets import mnist
13 from keras.models import Sequential
14 from keras.layers import Dense, Dropout, Flatten
15 from keras.layers import Conv2D, MaxPooling2D
16 from keras import backend as K
17 
18 batch_size = 128
19 num_classes = 10
20 epochs = 100
21 
22 # input image dimensions
23 img_rows, img_cols = 28, 28
24 
25 # the data, split between train and test sets
26 (x_train, y_train), (x_test, y_test) = mnist.load_data()
27 
28 if K.image_data_format() == 'channels_first':
29  x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
30  x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
31  input_shape = (1, img_rows, img_cols)
32 else:
33  x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
34  x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
35  input_shape = (img_rows, img_cols, 1)
36 
37 x_train = x_train.astype('float32')
38 x_test = x_test.astype('float32')
39 x_train /= 255
40 x_test /= 255
41 print('x_train shape:', x_train.shape)
42 print(x_train.shape[0], 'train samples')
43 print(x_test.shape[0], 'test samples')
44 
45 # convert class vectors to binary class matrices
46 y_train = keras.utils.to_categorical(y_train, num_classes)
47 y_test = keras.utils.to_categorical(y_test, num_classes)
48 
49 # kernel_init = 'glorot_uniform'
50 kernel_init = 'he_uniform'
51 model = Sequential()
52 model.add(Conv2D(16, kernel_size=(3, 3),
53  activation='relu',
54  input_shape=input_shape, use_bias=False,
55  kernel_initializer=kernel_init))
56 model.add(MaxPooling2D(pool_size=(2, 2)))
57 # model.add(Dropout(0.25))
58 model.add(Flatten())
59 model.add(Dense(128, activation='relu', use_bias=False,
60  kernel_initializer=kernel_init))
61 # model.add(Dropout(0.5))
62 model.add(Dense(num_classes, activation='softmax', use_bias=False))
63 # model.add(Dense(num_classes, activation='relu'))
64 
65 model.compile(loss=keras.losses.categorical_crossentropy,
66  optimizer=keras.optimizers.Adadelta(),
67  metrics=['accuracy'])
68 # model.compile(loss='categorical_hinge',
69 # optimizer=keras.optimizers.Adadelta(),
70 # metrics=['accuracy'])
71 
72 model.fit(x_train, y_train,
73  batch_size=batch_size,
74  epochs=epochs,
75  verbose=1,
76  validation_data=(x_test, y_test))
77 score = model.evaluate(x_test, y_test, verbose=0)
78 print('Test loss:', score[0])
79 print('Test accuracy:', score[1])
80 
81 
82 model.save_weights('cnn_pool_he_100.h5')
83 json_string = model.to_json()
84 with open('cnn_pool_he_100.json', 'w') as file:
85  file.write(json_string)