conv_nn

Conv Net

Open In Colab

In [1]:
import tensorflow as tf

from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import numpy as np 
In [2]:
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()

# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
In [3]:
val_prc = 0.1
random_inds = np.arange(train_images.shape[0])
np.random.shuffle(random_inds)
cut_ind = int(train_images.shape[0]*val_prc)

val_images = train_images[random_inds[0:cut_ind]]
val_labels = train_labels[random_inds[0:cut_ind]]
train_images = train_images[random_inds[cut_ind:]]
train_labels = train_labels[random_inds[cut_ind:]]
In [4]:
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
               'dog', 'frog', 'horse', 'ship', 'truck']

plt.figure(figsize=(10,10))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    # The CIFAR labels happen to be arrays, 
    # which is why you need the extra index
    plt.xlabel(class_names[train_labels[i][0]])
plt.show()
In [5]:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
WARNING:tensorflow:From c:\users\chech\appdata\local\programs\python\cv_course_venv\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
In [6]:
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 30, 30, 32)        896       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 15, 15, 32)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 13, 13, 64)        18496     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 6, 6, 64)          0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 4, 4, 64)          36928     
_________________________________________________________________
flatten (Flatten)            (None, 1024)              0         
_________________________________________________________________
dense (Dense)                (None, 64)                65600     
_________________________________________________________________
dense_1 (Dense)              (None, 10)                650       
=================================================================
Total params: 122,570
Trainable params: 122,570
Non-trainable params: 0
_________________________________________________________________
In [7]:
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

history = model.fit(train_images, train_labels, epochs=10, 
                    validation_data=(val_images, val_labels))
Train on 45000 samples, validate on 5000 samples
Epoch 1/10
45000/45000 [==============================] - 9s 207us/sample - loss: 1.5365 - acc: 0.4377 - val_loss: 1.2597 - val_acc: 0.5468
Epoch 2/10
45000/45000 [==============================] - 7s 154us/sample - loss: 1.1537 - acc: 0.5914 - val_loss: 1.0451 - val_acc: 0.6314
Epoch 3/10
45000/45000 [==============================] - 7s 155us/sample - loss: 0.9964 - acc: 0.6501 - val_loss: 1.0021 - val_acc: 0.6524
Epoch 4/10
45000/45000 [==============================] - 7s 166us/sample - loss: 0.8991 - acc: 0.6836 - val_loss: 0.9369 - val_acc: 0.6694
Epoch 5/10
45000/45000 [==============================] - 7s 161us/sample - loss: 0.8259 - acc: 0.7114 - val_loss: 0.9123 - val_acc: 0.6776
Epoch 6/10
45000/45000 [==============================] - 7s 156us/sample - loss: 0.7669 - acc: 0.7318 - val_loss: 0.8803 - val_acc: 0.6954
Epoch 7/10
45000/45000 [==============================] - 7s 164us/sample - loss: 0.7095 - acc: 0.7530 - val_loss: 0.8826 - val_acc: 0.6992
Epoch 8/10
45000/45000 [==============================] - 8s 169us/sample - loss: 0.6638 - acc: 0.7658 - val_loss: 0.8947 - val_acc: 0.6966
Epoch 9/10
45000/45000 [==============================] - 8s 167us/sample - loss: 0.6210 - acc: 0.7818 - val_loss: 0.8986 - val_acc: 0.7054
Epoch 10/10
45000/45000 [==============================] - 7s 165us/sample - loss: 0.5803 - acc: 0.7950 - val_loss: 0.9445 - val_acc: 0.6956
In [8]:
plt.plot(history.history['acc'], label='accuracy')
plt.plot(history.history['val_acc'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
Out[8]:
<matplotlib.legend.Legend at 0x1e249a0a160>
In [9]:
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
 - 1s - loss: 0.9564 - acc: 0.6939
0.6939
In [ ]: