Examples

Keras model sketch

from keras.layers import Input, Dense, BatchNormalization, Dropout
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
import keras.backend as K

K.clear_session()

a = EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=1, mode='auto')#will stop the model if val_loss does not improve for 2 consecutive epochs
b = ModelCheckpoint(filepath='../models/course5.hdf5', verbose=1, save_best_only=True)#save model weights after each epoch if val_loss improves
c = TensorBoard(log_dir='./logs/')#saves a log file for tensorboard; remember to save different runs to different subdirectories

callbacks = [a, b, c]

model = Sequential()
model.add(Dense(8, input_shape=(13,), activation='tanh', kernel_initializer='lecun_uniform'))
model.add(BatchNormalization())
model.add(Dense(5, activation='tanh', kernel_initializer='lecun_uniform'))
model.add(Dense(2, activation='tanh',kernel_initializer='lecun_uniform'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax',kernel_initializer='lecun_uniform'))

model.compile(optimizer=eval('Adam(lr=0.04)'),loss='categorical_crossentropy',metrics=['accuracy'])

model.fit(X_train, y_train, epochs=100, validation_data=(X_test, y_test),callbacks=callbacks)  # starts training
result = model.evaluate(X_test, y_test)

print(result)

Keras embeddings and LSTM

from keras.layers import Embedding, Dense, LSTM

model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

Keras CNN with Inception module

from keras.layers import Input, MaxPooling2D, Flatten, Activation, Conv2D, AvgPool2D, Dense, Dropout
from keras.optimizers import SGD
import keras
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
import keras.backend as K

i = i+1

!mkdir models
!mkdir mnist-conv-logs

a = EarlyStopping(monitor='val_acc', min_delta=0, patience=5, verbose=1, mode='auto')#will stop the model if val_loss does not improve for 2 consecutive epochs
b = ModelCheckpoint(monitor='val_acc', filepath='./models/course6-cifar10-inception-'+str(i)+'.hdf5', verbose=1, save_best_only=True)#save model weights after each epoch if val_loss improves
c = TensorBoard(log_dir='./mnist-conv-logs/cifar10-inception-'+str(i))#saves a log file for tensorboard; remember to save different runs to different subdirectories
callbacks=[a,b,c]

epochs = 25
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)

input_img = Input(shape = (32, 32, 3))

tower_1 = Conv2D(9, (1,1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(9, (3,3), padding='same', activation='relu')(tower_1)

tower_2 = Conv2D(9, (1,1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(9, (5,5), padding='same', activation='relu')(tower_2)

tower_3 = MaxPooling2D((3,3), strides=(1,1), padding='same')(input_img)
tower_3 = Conv2D(9, (1,1), padding='same', activation='relu')(tower_3)

output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis = 3)

output = Flatten()(output )

dense = Dense(64)(drop)

out = Dense(10, activation='softmax')(dense)

model = Model(inputs = input_img, outputs = out)

model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

model.summary()

Augment image dataset

from keras.preprocessing.image import ImageDataGenerator

generator = ImageDataGenerator(rescale = 1./255,
                               width_shift_range=0.1,
                               height_shift_range=0.1,
                               rotation_range = 20,
                               shear_range = 0.3,
                               zoom_range = 0.3,
                               horizontal_flip = True)

train = generator.flow_from_directory('../data/generator',
                                      target_size = (128, 128),
                                      batch_size = 32,
                                      class_mode = 'binary')

img, label = train.next()

Load and save model

from keras.models import model_from_json

#save model and weights
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")

# load model and weights
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
#load weights
loaded_model.load_weights("model.h5")
print("Loaded model from disk")

model = loaded_model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

Calculate memory usage of model

def get_model_memory_usage(batch_size, model):
    import numpy as np
    from keras import backend as K

    shapes_mem_count = 0
    for l in model.layers:
        single_layer_mem = 1
        for s in l.output_shape:
            if s is None:
                continue
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    trainable_count = np.sum([K.count_params(p) for p in set(model.trainable_weights)])
    non_trainable_count = np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])

    total_memory = 4.0*batch_size*(shapes_mem_count + trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0 ** 3), 3)
    return gbytes

print("Memory usage (GB): ", get_model_memory_usage(128,model))

Image data visualization with Matplotlib

plt.figure(figsize=(16, 16))

from random import randint

rnd = randint(0, len(X_orig)-16)

j = 0
for i in range(rnd, rnd+16):
    img = np.array(X_orig.iloc[i])
    img = img.reshape(28,28)

    plt.subplot(4, 4, j+1)
    j=j+1
    s = 'L: ' +str(y_orig.iloc[i])
    plt.gca().set_title(s)
    plt.imshow(img)

plt.tight_layout()