lab
lab

Running Keras models with Tensorboard

Lab integrates into a typical keras workflow.

WARNING: model persistence in Keras can be complicated, especially when working with complext models. It is recommended to checkpoint each training epoch independently from Lab’s log_model API.

Bering by creating a new Lab Project:

>>> echo "keras" > requirements.txt
>>> lab init --name simple-keras
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard

import tempfile

from sklearn.metrics import accuracy_score, precision_score

from lab.experiment import Experiment

batch_size = 128
num_classes = 10
epochs = 20

# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

e = Experiment()


@e.start_run
def train():

    # Create a temporary directory for tensorboard logs
    output_dir = tempfile.mkdtemp()
    print("Writing TensorBoard events locally to %s\n" % output_dir)
    tensorboard = TensorBoard(log_dir=output_dir)

    # During Experiment execution, tensorboard can be viewed through:
    # tensorboard --logdir=[output_dir]

    model.fit(x_train, y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test),
              callbacks=[tensorboard])

    y_prob = model.predict(x_test)
    y_classes = y_prob.argmax(axis=-1)
    actual = y_test.argmax(axis=-1)

    accuracy = accuracy_score(y_true=actual, y_pred=y_classes)
    precision = precision_score(y_true=actual, y_pred=y_classes,
                                average='macro')

    # Log tensorboard
    e.log_artifacts('tensorboard', output_dir)

    # Log all metrics
    e.log_metric('accuracy_score', accuracy)
    e.log_metric('precision_score', precision)

    # Log parameters
    e.log_parameter('batch_size', batch_size)

    # Save model
    e.log_model('mnist-mlp', model)

Total running time of the script: ( 0 minutes 0.000 seconds)

Gallery generated by Sphinx-Gallery