In this project I implemented deep learning models to solve a typical problem in satellite imaging using a benchmark dataset. The homework was designed to make you work on increasingly more complex models.
Visit the EuroSAT data description page and download the data: https://github.com/phelber/eurosat
Split the data into training (50%) and testing sets (50%), stratified on class labels (equal percentage of each class type in train and test sets).
Convert each RGB image to grayscale and flatten the images into a data matrix (n x p: n = #samples, p = #pixels in each image)
import tifffile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import os
import shutil
import random
from tqdm import tqdm
import glob
import PIL
from PIL import Image
from matplotlib.pyplot import imread, imshow, subplots, show
import matplotlib.image as mpimg
import cv2
from keras.layers import Input, Dense, Activation
from keras.models import Model
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Input, GlobalAveragePooling2D
import keras
from tensorflow.keras.optimizers import RMSprop, Adam, SGD, Adadelta
from keras import utils as np_utils
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
files=[]
filedir=r"EuroSAT/2750"
for file in glob.glob(filedir + os.sep + "*" + os.sep + "*.jpg"):
files.append(file)
print(files[0])
plt.imshow(plt.imread(files[0]))
EuroSAT/2750\AnnualCrop\AnnualCrop_1.jpg
<matplotlib.image.AxesImage at 0x25b67095f70>
image = PIL.Image.open(files[0])
image_size = image.size
print(image_size)
(64, 64)
#test: convert one single RGB picture to grey and then flatten
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
img = mpimg.imread(files[0])
gray = rgb2gray(img)
plt.imshow(gray, cmap = plt.get_cmap('gray'))
plt.show()
gray_img=gray.reshape(1, 4096).squeeze().flatten()
gray_img
array([132.858, 132.26 , 131.23 , ..., 102.291, 102.291, 102.878])
example = 'EuroSAT/2750\AnnualCrop\AnnualCrop_1.jpg'
temp = example.split('\\')
#label
temp[1]
'AnnualCrop'
#apply to the whole dataset
eurosat=[]
eurosat_label=[]
for img in files:
img_arr = imread(img)
gray_img = rgb2gray(img_arr).reshape(1, 4096).squeeze().flatten()
eurosat.append(gray_img)
label = img.split('\\')[1]
eurosat_label.append(label)
#change list of arrays into matrix
eurosat1=np.vstack(eurosat)
eurosat1.shape
(27000, 4096)
eurosat_label1=np.vstack(eurosat_label).reshape(27000,)
eurosat_label1
array(['AnnualCrop', 'AnnualCrop', 'AnnualCrop', ..., 'SeaLake', 'SeaLake', 'SeaLake'], dtype='<U20')
label_list = eurosat_label1.tolist()
#replace categorical name with labels in eurosat_label1
for i in range(len(label_list)):
if label_list[i] == 'AnnualCrop':
label_list[i] = 0
elif label_list[i] == 'Forest':
label_list[i] = 1
elif label_list[i] == 'HerbaceousVegetation':
label_list[i] = 2
elif label_list[i] == 'Highway':
label_list[i] = 3
elif label_list[i] == 'Industrial':
label_list[i] = 4
elif label_list[i] == 'Pasture':
label_list[i] = 5
elif label_list[i] == 'PermanentCrop':
label_list[i] = 6
elif label_list[i] == 'Residential':
label_list[i] = 7
elif label_list[i] == 'River':
label_list[i] = 8
elif label_list[i] == 'SeaLake':
label_list[i] = 9
eurosat_label2=np.vstack(label_list).reshape(27000,)
eurosat_label2
array([0, 0, 0, ..., 9, 9, 9])
#Split the data into training (50%) and testing sets (50%)
#stratified on class labels (equal percentage of each class type in train and test sets)
X_train, X_test, y_train, y_test = train_test_split(eurosat1, eurosat_label2, stratify = eurosat_label2, train_size = 0.5, random_state= 10)
X_train.shape
(13500, 4096)
y_train.shape
(13500,)
Q2.1: Calculate classification accuracy on the test data.
17.414814233779907%
batch_size = 1100
num_classes = 10
epochs = 10
def train_model(model, train, test, num_classes):
x_train = train[0].reshape((train[0].shape[0],) + input_shape)
x_test = test[0].reshape((test[0].shape[0],) + input_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.np_utils.to_categorical(train[1], num_classes)
y_test = keras.utils.np_utils.to_categorical(test[1], num_classes)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Implement a first deep learning model (M.1)
#a fully connected network with a single fully connected layer (i.e: input layer + fully connected layer as the output layer).
input_shape=(4096,)
model = Sequential()
model.add(Dense(num_classes, activation='softmax'))
train_model(model,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 4096) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 1s 36ms/step - loss: 2.4815 - accuracy: 0.1137 - val_loss: 2.3360 - val_accuracy: 0.1234 Epoch 2/10 13/13 [==============================] - 0s 26ms/step - loss: 2.2906 - accuracy: 0.1410 - val_loss: 2.2651 - val_accuracy: 0.1309 Epoch 3/10 13/13 [==============================] - 0s 29ms/step - loss: 2.2417 - accuracy: 0.1450 - val_loss: 2.2247 - val_accuracy: 0.1438 Epoch 4/10 13/13 [==============================] - 0s 30ms/step - loss: 2.2158 - accuracy: 0.1533 - val_loss: 2.2185 - val_accuracy: 0.1453 Epoch 5/10 13/13 [==============================] - 0s 28ms/step - loss: 2.2032 - accuracy: 0.1577 - val_loss: 2.2163 - val_accuracy: 0.1582 Epoch 6/10 13/13 [==============================] - 0s 30ms/step - loss: 2.1980 - accuracy: 0.1612 - val_loss: 2.2235 - val_accuracy: 0.1405 Epoch 7/10 13/13 [==============================] - 0s 25ms/step - loss: 2.2026 - accuracy: 0.1633 - val_loss: 2.2093 - val_accuracy: 0.1524 Epoch 8/10 13/13 [==============================] - 0s 27ms/step - loss: 2.1882 - accuracy: 0.1684 - val_loss: 2.2070 - val_accuracy: 0.1566 Epoch 9/10 13/13 [==============================] - 0s 26ms/step - loss: 2.1814 - accuracy: 0.1688 - val_loss: 2.2104 - val_accuracy: 0.1613 Epoch 10/10 13/13 [==============================] - 0s 25ms/step - loss: 2.1849 - accuracy: 0.1667 - val_loss: 2.1994 - val_accuracy: 0.1741 Test score: 2.1994082927703857 Test accuracy: 0.17414814233779907
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 10) 40970 ================================================================= Total params: 40,970 Trainable params: 40,970 Non-trainable params: 0 _________________________________________________________________
import pydotplus
import pydot_ng as pydot
import graphviz
from keras.utils.vis_utils import plot_model
from keras.utils.vis_utils import pydot
#keras.utils.vis_utils.pydot = pydotplus
pydot.find_graphviz()
plot_model(model, show_shapes=True, show_layer_names=True)
Q3.1: Calculate classification accuracy on the test data.
15.874074399471283%
model2 = Sequential()
model2.add(Dense(900, activation='relu', input_shape=(4096,)))
model2.add(Dense(num_classes, activation='softmax'))
model2.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_1 (Dense) (None, 900) 3687300 dense_2 (Dense) (None, 10) 9010 ================================================================= Total params: 3,696,310 Trainable params: 3,696,310 Non-trainable params: 0 _________________________________________________________________
plot_model(model2, show_shapes=True, show_layer_names=True)
train_model(model2,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 4096) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 4s 262ms/step - loss: 5.5126 - accuracy: 0.1046 - val_loss: 2.9482 - val_accuracy: 0.1056 Epoch 2/10 13/13 [==============================] - 4s 327ms/step - loss: 2.6098 - accuracy: 0.1036 - val_loss: 2.4503 - val_accuracy: 0.1296 Epoch 3/10 13/13 [==============================] - 3s 261ms/step - loss: 2.3274 - accuracy: 0.1287 - val_loss: 2.2960 - val_accuracy: 0.1685 Epoch 4/10 13/13 [==============================] - 3s 271ms/step - loss: 2.2527 - accuracy: 0.1395 - val_loss: 2.2373 - val_accuracy: 0.1404 Epoch 5/10 13/13 [==============================] - 3s 251ms/step - loss: 2.2256 - accuracy: 0.1530 - val_loss: 2.2229 - val_accuracy: 0.1530 Epoch 6/10 13/13 [==============================] - 3s 244ms/step - loss: 2.2106 - accuracy: 0.1504 - val_loss: 2.2153 - val_accuracy: 0.1397 Epoch 7/10 13/13 [==============================] - 3s 262ms/step - loss: 2.2018 - accuracy: 0.1543 - val_loss: 2.2059 - val_accuracy: 0.1394 Epoch 8/10 13/13 [==============================] - 3s 241ms/step - loss: 2.1941 - accuracy: 0.1523 - val_loss: 2.1995 - val_accuracy: 0.1474 Epoch 9/10 13/13 [==============================] - 3s 250ms/step - loss: 2.1845 - accuracy: 0.1600 - val_loss: 2.1967 - val_accuracy: 0.1458 Epoch 10/10 13/13 [==============================] - 3s 235ms/step - loss: 2.1802 - accuracy: 0.1594 - val_loss: 2.1875 - val_accuracy: 0.1587 Test score: 2.187528133392334 Test accuracy: 0.15874074399471283
Q4.1: Calculate classification accuracy on the test data.
Answer: 28.422221541404724%
Q4.2: Compare against previous models. Which model was the "best"? Why?
Model3 preforms the best since it contains more layers of neurons and drop outs, which can make the model more robust and less over-fitting.
model3 = Sequential()
model3.add(Dense(2700, activation='relu', input_shape=(4096,)))
model3.add(Dropout(0.2))
model3.add(Dense(1800, activation='relu'))
model3.add(Dropout(0.2))
model3.add(Dense(900, activation='relu'))
model3.add(Dropout(0.2))
model3.add(Dense(num_classes, activation='softmax'))
model3.summary()
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_3 (Dense) (None, 2700) 11061900 dropout (Dropout) (None, 2700) 0 dense_4 (Dense) (None, 1800) 4861800 dropout_1 (Dropout) (None, 1800) 0 dense_5 (Dense) (None, 900) 1620900 dropout_2 (Dropout) (None, 900) 0 dense_6 (Dense) (None, 10) 9010 ================================================================= Total params: 17,553,610 Trainable params: 17,553,610 Non-trainable params: 0 _________________________________________________________________
plot_model(model3, show_shapes=True, show_layer_names=True)
train_model(model3,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 4096) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 16s 1s/step - loss: 5.1000 - accuracy: 0.1016 - val_loss: 2.3043 - val_accuracy: 0.0926 Epoch 2/10 13/13 [==============================] - 16s 1s/step - loss: 2.2917 - accuracy: 0.1061 - val_loss: 2.2729 - val_accuracy: 0.1111 Epoch 3/10 13/13 [==============================] - 20s 2s/step - loss: 2.2772 - accuracy: 0.1067 - val_loss: 2.2659 - val_accuracy: 0.1111 Epoch 4/10 13/13 [==============================] - 18s 1s/step - loss: 2.2708 - accuracy: 0.1142 - val_loss: 2.2606 - val_accuracy: 0.1177 Epoch 5/10 13/13 [==============================] - 17s 1s/step - loss: 2.2578 - accuracy: 0.1250 - val_loss: 2.2420 - val_accuracy: 0.1254 Epoch 6/10 13/13 [==============================] - 17s 1s/step - loss: 2.2301 - accuracy: 0.1374 - val_loss: 2.3356 - val_accuracy: 0.1074 Epoch 7/10 13/13 [==============================] - 18s 1s/step - loss: 2.2285 - accuracy: 0.1339 - val_loss: 2.1959 - val_accuracy: 0.1939 Epoch 8/10 13/13 [==============================] - 17s 1s/step - loss: 2.1768 - accuracy: 0.1735 - val_loss: 2.1390 - val_accuracy: 0.2499 Epoch 9/10 13/13 [==============================] - 17s 1s/step - loss: 2.1257 - accuracy: 0.2261 - val_loss: 2.0773 - val_accuracy: 0.2736 Epoch 10/10 13/13 [==============================] - 18s 1s/step - loss: 2.0522 - accuracy: 0.2576 - val_loss: 1.9763 - val_accuracy: 0.2842 Test score: 1.9763363599777222 Test accuracy: 0.28422221541404724
Q5.1: Calculate classification accuracy on the test data.
Answer: 53.94074320793152%
Q5.2: Compare against previous models. Which model was the "best"? Why?
Model 4 is the best because it contains CNN, which detects the features of the image accurately.
RGB_eurosat=[]
for img in files:
img_arr = imread(img)
RGB_eurosat.append(img_arr)
RGB_eurosat1=np.vstack(RGB_eurosat).reshape(27000,64,64,3)
RGB_eurosat1.shape
(27000, 64, 64, 3)
eurosat_label2.shape
(27000,)
#Split the data into training (50%) and testing sets (50%)
#stratified on class labels (equal percentage of each class type in train and test sets)
X_train, X_test, y_train, y_test = train_test_split(RGB_eurosat1, eurosat_label2, stratify = eurosat_label2, train_size = 0.5, random_state= 10)
X_train.shape
(13500, 64, 64, 3)
#M4: CNN that includes the following layers: Conv2D, MaxPooling2D, Dropout, Flatten, Dense
# number of convolutional filters to use
filters = 32
# convolution kernel size
kernel_size = 3
# size of pooling area for max pooling
pool_size = 2
input_shape=(64,64,3)
feature_layers = [
Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape),
MaxPooling2D(pool_size=pool_size),
Dropout(0.25),
Flatten(),
Dense(num_classes, activation='softmax')
]
# create complete model
model4 = Sequential(feature_layers)
model4.summary()
Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 62, 62, 32) 896 max_pooling2d (MaxPooling2D (None, 31, 31, 32) 0 ) dropout_3 (Dropout) (None, 31, 31, 32) 0 flatten (Flatten) (None, 30752) 0 dense_7 (Dense) (None, 10) 307530 ================================================================= Total params: 308,426 Trainable params: 308,426 Non-trainable params: 0 _________________________________________________________________
plot_model(model4, show_shapes=True, show_layer_names=True)
train_model(model4,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 64, 64, 3) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 33s 2s/step - loss: 2.4700 - accuracy: 0.2176 - val_loss: 1.8838 - val_accuracy: 0.3673 Epoch 2/10 13/13 [==============================] - 37s 3s/step - loss: 1.7449 - accuracy: 0.3303 - val_loss: 1.6360 - val_accuracy: 0.3516 Epoch 3/10 13/13 [==============================] - 34s 3s/step - loss: 1.5762 - accuracy: 0.4041 - val_loss: 1.5251 - val_accuracy: 0.4440 Epoch 4/10 13/13 [==============================] - 33s 3s/step - loss: 1.4836 - accuracy: 0.4670 - val_loss: 1.4678 - val_accuracy: 0.4998 Epoch 5/10 13/13 [==============================] - 33s 3s/step - loss: 1.4200 - accuracy: 0.4990 - val_loss: 1.4010 - val_accuracy: 0.4989 Epoch 6/10 13/13 [==============================] - 32s 3s/step - loss: 1.3651 - accuracy: 0.5250 - val_loss: 1.3810 - val_accuracy: 0.4860 Epoch 7/10 13/13 [==============================] - 28s 2s/step - loss: 1.3261 - accuracy: 0.5287 - val_loss: 1.3285 - val_accuracy: 0.5251 Epoch 8/10 13/13 [==============================] - 24s 2s/step - loss: 1.2841 - accuracy: 0.5455 - val_loss: 1.2983 - val_accuracy: 0.5273 Epoch 9/10 13/13 [==============================] - 24s 2s/step - loss: 1.2482 - accuracy: 0.5533 - val_loss: 1.2761 - val_accuracy: 0.5185 Epoch 10/10 13/13 [==============================] - 25s 2s/step - loss: 1.2210 - accuracy: 0.5624 - val_loss: 1.2394 - val_accuracy: 0.5394 Test score: 1.2394036054611206 Test accuracy: 0.5394074320793152
Q6.1: Describe the model you built, and why you chose it.
I applied a CNN model that contains the Conv2D, MaxPooling2D, Dropout, Flatten, Dense because CNN captures the image feature well. I also tried adding more layers but the accuracy rate does not improve.
Q6.2: Calculate classification accuracy on the test data.
67.637038230896%
Q6.3: Compare against previous models. Which model was the "best"? Why?
Model5 is the best as the accuracy rate enhanced a lot.
Q6.4: What are the two classes with the highest labeling error? Explain using data and showing mis-classified examples.
All classes, espeicially for the label 2 (Vegetation), 3(Highway) and 6(PermanentCrop), are prone to be mislabeled as label 4 (Industrial).
feature_layers1 = [
Conv2D(256, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
data_format='channels_last', padding = "same"),
MaxPooling2D(pool_size=(3, 3), padding = "same"),
Dropout(0.25),
Flatten(),
Dense(num_classes, activation='softmax')
]
# create complete model
model5 = Sequential(feature_layers1)
train_model(model5,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 64, 64, 3) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 246s 16s/step - loss: 2.4325 - accuracy: 0.1984 - val_loss: 1.9735 - val_accuracy: 0.2919 Epoch 2/10 13/13 [==============================] - 190s 15s/step - loss: 1.7743 - accuracy: 0.3401 - val_loss: 1.5661 - val_accuracy: 0.4024 Epoch 3/10 13/13 [==============================] - 197s 15s/step - loss: 1.4852 - accuracy: 0.4432 - val_loss: 1.4178 - val_accuracy: 0.4719 Epoch 4/10 13/13 [==============================] - 208s 16s/step - loss: 1.3432 - accuracy: 0.5063 - val_loss: 1.3010 - val_accuracy: 0.5274 Epoch 5/10 13/13 [==============================] - 219s 17s/step - loss: 1.2402 - accuracy: 0.5533 - val_loss: 1.2075 - val_accuracy: 0.5451 Epoch 6/10 13/13 [==============================] - 199s 16s/step - loss: 1.1532 - accuracy: 0.5886 - val_loss: 1.1481 - val_accuracy: 0.5836 Epoch 7/10 13/13 [==============================] - 217s 17s/step - loss: 1.0778 - accuracy: 0.6219 - val_loss: 1.0771 - val_accuracy: 0.5973 Epoch 8/10 13/13 [==============================] - 237s 18s/step - loss: 1.0176 - accuracy: 0.6510 - val_loss: 1.0544 - val_accuracy: 0.6187 Epoch 9/10 13/13 [==============================] - 209s 16s/step - loss: 0.9676 - accuracy: 0.6722 - val_loss: 0.9693 - val_accuracy: 0.6671 Epoch 10/10 13/13 [==============================] - 223s 17s/step - loss: 0.9343 - accuracy: 0.6761 - val_loss: 0.9595 - val_accuracy: 0.6764 Test score: 0.9595135450363159 Test accuracy: 0.67637038230896
model5.summary()
Model: "sequential_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 64, 64, 256) 7168 max_pooling2d_1 (MaxPooling (None, 22, 22, 256) 0 2D) dropout_4 (Dropout) (None, 22, 22, 256) 0 flatten_1 (Flatten) (None, 123904) 0 dense_8 (Dense) (None, 10) 1239050 ================================================================= Total params: 1,246,218 Trainable params: 1,246,218 Non-trainable params: 0 _________________________________________________________________
plot_model(model5, show_shapes=True, show_layer_names=True)
# What are the two classes with the highest labeling error? Explain using data and showing mis-classified examples.
results = model5.predict(X_test)
results
array([[0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], ..., [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.]], dtype=float32)
y_pred = np.argmax(results,axis=1)
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
array = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(array, range(10), range(10))
plt.figure(figsize=(10,10))
sn.set(font_scale=1) # for label size
sn.heatmap(df_cm, annot=True, annot_kws={"size": 15}, cbar=True, square= True, fmt='.1f') # font size
plt.show()
#example: successfully predict as label 3
test3 = np.where((y_test== 3) & (y_pred == 3))[0][0]
tmpimg3 = imread(files[test3])
imshow(tmpimg3)
show()
#showing mis-classified examples
#mislabel 3 as 4
test34 = np.where((y_test== 3) & (y_pred == 4))[0][0]
test34_img = imread(files[test34])
imshow(test34_img)
show()
Q7.1: Calculate classification accuracy on the test data.
Q7.2: Compare against results using RGB images.
The accuracy rate using four bands from multispectral images is 63.59259486198425%, which is slightly lower than using RGB image (67.637038230896%).
#read multispectral images
files1=[]
filedir1=r"EuroSATallBands/ds/images/remote_sensing/otherDatasets/sentinel_2/tif"
for file in glob.glob(filedir1 + os.sep + "*" + os.sep + "*.tif"):
files1.append(file)
img = tifffile.imread(files1[0])
img.shape
(64, 64, 13)
#apply to the whole dataset
mutli_eurosat=[]
for img in files1:
img_arr = tifffile.imread(img)
mutli_eurosat.append(img_arr)
#change list of arrays into matrix
mutli_eurosat1=np.vstack(mutli_eurosat).reshape(27000,64,64,13)
mutli_eurosat1.shape
(27000, 64, 64, 13)
#just read four bands
mutli_eurosat2=mutli_eurosat1[:,:,:,:4]
mutli_eurosat2.shape
(27000, 64, 64, 4)
#Split the data into training (50%) and testing sets (50%)
#stratified on class labels (equal percentage of each class type in train and test sets)
X_train, X_test, y_train, y_test = train_test_split(mutli_eurosat2, eurosat_label2, stratify = eurosat_label2, train_size = 0.5, random_state= 10)
input_shape=(64,64,4)
feature_layers1 = [
Conv2D(256, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
data_format='channels_last', padding = "same"),
MaxPooling2D(pool_size=(3, 3), padding = "same"),
Dropout(0.25),
Flatten(),
Dense(num_classes, activation='softmax')
]
# create complete model
model6 = Sequential(feature_layers1)
train_model(model6,
(X_train, y_train),
(X_test, y_test), num_classes)
x_train shape: (13500, 64, 64, 4) 13500 train samples 13500 test samples Epoch 1/10 13/13 [==============================] - 236s 16s/step - loss: 15.7357 - accuracy: 0.1756 - val_loss: 2.7092 - val_accuracy: 0.2718 Epoch 2/10 13/13 [==============================] - 189s 15s/step - loss: 2.1533 - accuracy: 0.2366 - val_loss: 1.9301 - val_accuracy: 0.2378 Epoch 3/10 13/13 [==============================] - 178s 14s/step - loss: 1.6905 - accuracy: 0.3535 - val_loss: 1.5656 - val_accuracy: 0.4126 Epoch 4/10 13/13 [==============================] - 174s 13s/step - loss: 1.4407 - accuracy: 0.4626 - val_loss: 1.3485 - val_accuracy: 0.4836 Epoch 5/10 13/13 [==============================] - 170s 13s/step - loss: 1.2326 - accuracy: 0.5670 - val_loss: 1.2807 - val_accuracy: 0.5078 Epoch 6/10 13/13 [==============================] - 170s 13s/step - loss: 1.1065 - accuracy: 0.6109 - val_loss: 1.1588 - val_accuracy: 0.5901 Epoch 7/10 13/13 [==============================] - 188s 15s/step - loss: 1.0020 - accuracy: 0.6600 - val_loss: 1.0548 - val_accuracy: 0.6363 Epoch 8/10 13/13 [==============================] - 156s 12s/step - loss: 0.9362 - accuracy: 0.6827 - val_loss: 1.0289 - val_accuracy: 0.6299 Epoch 9/10 13/13 [==============================] - 179s 14s/step - loss: 0.8689 - accuracy: 0.7130 - val_loss: 0.9113 - val_accuracy: 0.7005 Epoch 10/10 13/13 [==============================] - 157s 12s/step - loss: 0.8828 - accuracy: 0.6956 - val_loss: 1.0129 - val_accuracy: 0.6359 Test score: 1.0128611326217651 Test accuracy: 0.6359259486198425