261 lines
No EOL
6.5 KiB
Python
Executable file
261 lines
No EOL
6.5 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Created on Thu Jan 23 09:59:50 2020
|
|
|
|
@author: tanu
|
|
"""
|
|
#%%
|
|
# LIDO ML: tensorflow
|
|
#%%
|
|
from sklearn.datasets import load_boston
|
|
from sklearn.pipeline import Pipeline
|
|
from sklearn.preprocessing import MinMaxScaler
|
|
import jrpytensorflow
|
|
import tensorflow as tf
|
|
from sklearn.model_selection import train_test_split
|
|
from tensorflow import keras
|
|
import matplotlib.pyplot as plt
|
|
from sklearn.model_selection import train_test_split
|
|
from sklearn.datasets import load_digits
|
|
from sklearn.preprocessing import LabelBinarizer
|
|
import numpy as np
|
|
#load_ext tensorboard
|
|
#%%
|
|
#def numModel(input_size, output_size):
|
|
# model = tf.keras.models.Sequential([
|
|
# tf.keras.layers.Dense(20,
|
|
# input_shape = (input_size,), activation = 'relu'),
|
|
# tf.keras.layers.Dense(output_size, activation = 'softmax')
|
|
# ])
|
|
# model.compile(optimizer = 'sgd',
|
|
# loss = 'categorical_crossentropy',
|
|
# metrics = ['accuracy'])
|
|
|
|
# return model
|
|
|
|
# load data
|
|
digits = load_digits()
|
|
|
|
X, y = digits.data, digits.target
|
|
|
|
# split into training and test
|
|
X_train, X_test, y_train, y_test = train_test_split(
|
|
X, y, test_size = 0.2
|
|
)
|
|
|
|
prep = LabelBinarizer()
|
|
y_train_bin = prep.fit_transform(y_train)
|
|
y_test_bin = prep.transform(y_test)
|
|
|
|
#%%
|
|
#Practical 3
|
|
|
|
X_train, y_train, X_test, y_test, labels = jrpytensorflow.datasets.load_fashion_mnist()
|
|
|
|
# flatten the array which currently is 60000, 28, 28
|
|
|
|
def numModel(input_size, output_size):
|
|
model = tf.keras.models.Sequential([
|
|
keras.layers.Flatten(input_shape = (28, 28)),
|
|
tf.keras.layers.Dense(20, #no of perceptron
|
|
input_shape = (input_size,), activation = 'relu'),
|
|
tf.keras.layers.Dense(output_size, activation = 'softmax')
|
|
])
|
|
model.compile(optimizer = 'sgd',
|
|
loss = 'sparse_categorical_crossentropy',
|
|
metrics = ['accuracy'])
|
|
|
|
return model
|
|
|
|
|
|
# call model
|
|
model = numModel(784, 10)
|
|
|
|
# call fit
|
|
model.fit(X_train, y_train, epochs = 10)
|
|
|
|
# check loss and accuracy
|
|
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
|
print('test accuracy', acc)
|
|
print('Loss',loss)
|
|
|
|
|
|
# print predictions
|
|
pred = model.predict(X_test)
|
|
pred[0]
|
|
|
|
import numpy as np
|
|
print('prediction:', np.argmax(pred[0]))
|
|
|
|
# change optimisers
|
|
def numModel(input_size, output_size):
|
|
model = tf.keras.models.Sequential([
|
|
keras.layers.Flatten(input_shape = (28, 28)),
|
|
tf.keras.layers.Dense(20, #no of perceptron
|
|
input_shape = (input_size,), activation = 'relu'),
|
|
tf.keras.layers.Dense(output_size, activation = 'softmax')
|
|
])
|
|
model.compile(optimizer = 'adam',
|
|
loss = 'sparse_categorical_crossentropy',
|
|
metrics = ['accuracy'])
|
|
return model
|
|
|
|
|
|
# call model
|
|
model = numModel(784, 10)
|
|
|
|
# call fit
|
|
model.fit(X_train, y_train, epochs = 10)
|
|
|
|
# check loss and accuracy
|
|
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
|
print('test accuracy', acc)
|
|
print('Loss',loss)
|
|
|
|
|
|
# print predictions
|
|
pred = model.predict(X_test)
|
|
pred[0]
|
|
|
|
import numpy as np
|
|
print('prediction:', np.argmax(pred[0]))
|
|
print('truth:', y_test[0])
|
|
|
|
model.save('entireModel.h5')
|
|
entireModel = tf.keras.models.load_model('entireModel.h5')
|
|
entireModel.summary()
|
|
|
|
|
|
model.save_weights('./weights')
|
|
|
|
model = numModel(784, 10)
|
|
|
|
model.load_weights('./weights')
|
|
|
|
# check loss and accuracy
|
|
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
|
print('Restored test accuracy', acc)
|
|
print('Reestored Loss',loss)
|
|
|
|
|
|
callback = tf.keras.callbacks.ModelCheckpoint(
|
|
filepath = './checkpointPath',
|
|
save_weights_only = True,
|
|
verbose = 1
|
|
|
|
)
|
|
|
|
model.fit(X_train, y_train, epochs = 10,
|
|
callbacks = [callback])
|
|
|
|
|
|
modell = numModel(784, 10)
|
|
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
|
print('No checkpoint Test loss :', loss)
|
|
|
|
|
|
model = numModel(784, 10)
|
|
model.load_weights('./checkpointPath')
|
|
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
|
print('With checkpoint Test loss :', loss)
|
|
|
|
#%%
|
|
# Visualisation
|
|
#%%
|
|
|
|
#load_ext tensorboard
|
|
|
|
tensorBoardCallback = keras.callbacks.TensorBoard( log_dir = "logs/fit")
|
|
|
|
model = numModel(784, 10)
|
|
model.fit(X_train, y_train, epochs = 10,
|
|
validation_data = (X_test, y_test),
|
|
callbacks = [tensorBoardCallback])
|
|
|
|
##tensorboard --logdir logs
|
|
|
|
#%%
|
|
#%Practical 4
|
|
#%%
|
|
|
|
walking = jrpytensorflow.datasets.load_walking()
|
|
|
|
#import matplotlib.pyplot as plt
|
|
#import numpy as np
|
|
|
|
sub = walking[walking['sample'] == 400]
|
|
sub['time'] = np.arange(260)
|
|
|
|
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (18, 10))
|
|
sub.plot(x = 'time', y = 'acc_x', ax = ax1)
|
|
sub.plot(x = 'time', y = 'acc_y', ax = ax2)
|
|
sub.plot(x = 'time', y = 'acc_z', ax = ax3)
|
|
plt.show()
|
|
|
|
dims = ['acc_x', 'acc_y', 'acc_z']
|
|
#x = np.dstack([walking[[d]].values.reshape(-1,260) for d in dims])
|
|
X = np.dstack([walking[[d]].values.reshape(-1,260) for d in dims])
|
|
y = walking['person'].values[::260] - 1
|
|
|
|
|
|
# load data
|
|
# split into training and test
|
|
X_train, X_test, y_train, y_test = train_test_split(
|
|
X, y, test_size = 0.2
|
|
)
|
|
|
|
# Binarise labels
|
|
X, y, test_size = 0.2
|
|
)
|
|
prep = LabelBinarizer()
|
|
y_train_bin = prep.fit_transform(y_train)
|
|
y_test_bin = prep.transform(y_test)
|
|
|
|
# normalise
|
|
#X_train, X_test = X_train / 255.0, X_test / 255.0
|
|
|
|
|
|
# model architecture
|
|
from tensorflow.keras.optimizers import Adam
|
|
|
|
def convModel():
|
|
model = tf.keras.models.Sequential ([
|
|
tf.keras.layers.Conv1D (6, 4,
|
|
activation = 'relu', input_shape = (260,3)),
|
|
|
|
tf.keras.layers.MaxPooling1D(2),
|
|
|
|
tf.keras.layers.Conv1D(16, 3, activation = 'relu'),
|
|
tf.keras.layers.Flatten(),# model architecture
|
|
|
|
tf.keras.layers.Dense (150, activation = 'relu'),
|
|
tf.keras.layers.Dense(50, activation = 'relu'),
|
|
tf.keras.layers.Dense(15, activation = 'softmax')
|
|
])
|
|
|
|
opt = Adam(lr = 0.001)
|
|
|
|
model.compile(optimizer = opt,
|
|
loss = 'categorical_crossentropy', # sparse for non-binarised labels
|
|
metrics = ['accuracy'])
|
|
|
|
return model
|
|
|
|
model = convModel()
|
|
history = model.fit(X_train, y_train_bin, epochs = 10,
|
|
validation_data = (X_test, y_test_bin))
|
|
|
|
historyDict = history.history
|
|
|
|
# check loss and accuracy
|
|
loss, acc = model.evaluate(X_test, y_test_bin, verbose = 0)
|
|
print('test accuracy', acc)
|
|
print('Loss',loss)
|
|
|
|
# image for P4: Refer to Page 10 from notes
|
|
## Notes: Check the dim of your data
|
|
# dim of your filter MUST be the same as the dim of your data
|
|
|
|
|
|
|