Monday, March 23, 2020

Beginning Deep Learning - Working MNIST and Convolutional Neural Networks

This code is all part of my deep learning journey and as always, is being placed here so I can always revisit it as I continue to expand on my learning of this topic.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#!/usr/bin/env python3


'''
 Continuing my deep learning journey
 File: dlMNIST-convnet.py
 Author: Nik Alleyne
 Author Blog: www.securitynik.com
 Date: 2020-02-26
'''

from time import sleep
from keras.datasets import mnist
from keras import models
from keras.layers import (Dense, Activation, Conv2D, MaxPooling2D, Flatten)
from keras.utils import to_categorical
from matplotlib import pyplot as plt

def main():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    print('[*] Your tensor has "{}" dimensions'.format(X_train.ndim))
    print('[*] The shape of X_train is:{}'.format(X_train.shape))
    print('[*] The shape of y_train is:{}'.format(y_train.shape))
    print('[*] The shape of X_test is:{}'.format(X_test.shape))
    print('[*] The shape of y_test is:{}'.format(y_test.shape))
    
    # Reshape the training and testing data
    X_train = X_train.reshape((60000,28, 28, 1))
    X_test = X_test.reshape((10000,28, 28, 1))

    # Converting to training and testing data to type float
    X_train = X_train.astype('float32')/255
    X_test = X_test.astype('float32')/255

    # Convert the labels to categrical type
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    '''
    Build the convolution network
    The first layer has a feature map of 28, 28, 1
    starts with dept of 32 and ends with depth 64 (output depth)
    Size of the patches are (3x3) - (window height, window width)
    The 2*2 represents the max pooling window

    '''
    conv_nn = models.Sequential()
    conv_nn.add(Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)))
    conv_nn.add(MaxPooling2D(2,2))

    conv_nn.add(Conv2D(64, (3,3), activation='relu'))
    conv_nn.add(MaxPooling2D(2,2))

    conv_nn.add(Conv2D(64, (3,3), activation='relu'))

    # Flatten the network
    conv_nn.add(Flatten())
    conv_nn.add(Dense(64, activation='relu'))

    # Output layer
    conv_nn.add(Dense(10, activation='softmax'))
    

    # Compile the modell
    conv_nn.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
    print('[*] Here is your model summary \n{}'.format(conv_nn.summary()))

    # Fit on the training set
    conv_nn.fit(X_train, y_train, epochs=10, batch_size=64 )

    # Evaluate the model on the test data set
    testing_loss, testing_accuracy = conv_nn.evaluate(X_test, y_test)

    print('\n[*] Your testing accuracy is: {}'.format(testing_loss))
    print('[*] Your testing loss is: {}'.format(testing_accuracy))

    

if __name__ == '__main__':
    main()


'''
Referenes:
https://www.manning.com/books/deep-learning-with-python
https://keras.io/getting-started/sequential-model-guide/
https://keras.io/optimizers/

'''



No comments:

Post a Comment