added tensorflow stuff
This commit is contained in:
parent
b4c86b4385
commit
aeb0bee7e1
6 changed files with 391 additions and 0 deletions
2
tensorflow/checkpoint
Normal file
2
tensorflow/checkpoint
Normal file
|
@ -0,0 +1,2 @@
|
|||
model_checkpoint_path: "weights"
|
||||
all_model_checkpoint_paths: "weights"
|
BIN
tensorflow/entireModel.h5
Normal file
BIN
tensorflow/entireModel.h5
Normal file
Binary file not shown.
128
tensorflow/lidoML_TF.py
Normal file
128
tensorflow/lidoML_TF.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thu Jan 23 09:59:50 2020
|
||||
|
||||
@author: tanu
|
||||
"""
|
||||
#%%
|
||||
# LIDO ML: tensorflow
|
||||
#%%
|
||||
|
||||
from sklearn.datasets import load_boston
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import jrpytensorflow
|
||||
import tensorflow as tf
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
boston = load_boston()
|
||||
#X, y = boston.data, boston.target
|
||||
|
||||
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
from sklearn.datasets import load_digits
|
||||
|
||||
digits = load_digits()
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size = 0.2
|
||||
)
|
||||
#%%
|
||||
# P1
|
||||
X,y = jrpytensorflow.datasets.load_circles()
|
||||
|
||||
plt.figure()
|
||||
plt.scatter(X[:,0], X[:,1], c = y, edgecolor = 'black')
|
||||
|
||||
#2)
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size = 0.2
|
||||
)
|
||||
|
||||
|
||||
preprocess = Pipeline(
|
||||
steps = [
|
||||
('rescale', MinMaxScaler())
|
||||
]
|
||||
)
|
||||
|
||||
preprocess.fit(X_train)
|
||||
|
||||
X_train = preprocess.transform(X_train)
|
||||
X_test = preprocess.transform(X_test)
|
||||
|
||||
import tensorflow as tf
|
||||
logModel = tf.keras.models.Sequential([
|
||||
tf.keras.layers.Dense(1, activation = 'sigmoid')
|
||||
|
||||
])
|
||||
|
||||
logModel.compile(optimizer = 'sgd',
|
||||
loss = 'binary_crossentropy')
|
||||
history = logModel.fit(X, y, epochs = 100)
|
||||
|
||||
logModel.summary()
|
||||
|
||||
|
||||
#4) predicted probability: for X
|
||||
model(X) # tf object
|
||||
model.predict(X) # array
|
||||
|
||||
|
||||
model(X).numpy().ravel() > 0.5
|
||||
|
||||
sum(model(X).numpy().ravel() > 0.5)
|
||||
#101
|
||||
|
||||
sum( ( model(X).numpy().ravel() > 0.5) == y)
|
||||
#97
|
||||
|
||||
# 4*) predicted probability: for X_test
|
||||
|
||||
logModel.compile(optimizer = 'sgd',
|
||||
loss = 'binary_crossentropy')
|
||||
history = logModel.fit(X_test, y_test, epochs = 100)
|
||||
|
||||
logModel.summary()
|
||||
|
||||
|
||||
|
||||
model(X_test) # tf object
|
||||
model.predict(X_test) # array
|
||||
|
||||
|
||||
model(X_test).numpy().ravel() > 0.5
|
||||
|
||||
sum( model(X_test).numpy().ravel() > 0.5)
|
||||
#22
|
||||
|
||||
sum((model(X_test).numpy().ravel() > 0.5) == y_test)
|
||||
#21
|
||||
|
||||
#%%
|
||||
# Practical 2
|
||||
#%%
|
||||
|
||||
from tensorflow import keras
|
||||
|
||||
def smallModel():
|
||||
model = tf.keras.models.Sequential([
|
||||
tf.keras.layers.Dense(20, activation = 'relu'),
|
||||
tf.keras.layers.Dense(10, activation = 'relu'),
|
||||
tf.keras.layers.Dense(1, activation = 'softmax')
|
||||
])
|
||||
model.compile(optimizer = 'sgd',
|
||||
loss = 'binary_crossentropy',
|
||||
metrics = ['accuracy'])
|
||||
|
||||
return model
|
||||
|
||||
model = smallModel()
|
||||
model.summary()
|
||||
|
||||
|
261
tensorflow/lidoML_TF_D2.py
Executable file
261
tensorflow/lidoML_TF_D2.py
Executable file
|
@ -0,0 +1,261 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thu Jan 23 09:59:50 2020
|
||||
|
||||
@author: tanu
|
||||
"""
|
||||
#%%
|
||||
# LIDO ML: tensorflow
|
||||
#%%
|
||||
from sklearn.datasets import load_boston
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import jrpytensorflow
|
||||
import tensorflow as tf
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.datasets import load_digits
|
||||
from sklearn.preprocessing import LabelBinarizer
|
||||
import numpy as np
|
||||
#load_ext tensorboard
|
||||
#%%
|
||||
#def numModel(input_size, output_size):
|
||||
# model = tf.keras.models.Sequential([
|
||||
# tf.keras.layers.Dense(20,
|
||||
# input_shape = (input_size,), activation = 'relu'),
|
||||
# tf.keras.layers.Dense(output_size, activation = 'softmax')
|
||||
# ])
|
||||
# model.compile(optimizer = 'sgd',
|
||||
# loss = 'categorical_crossentropy',
|
||||
# metrics = ['accuracy'])
|
||||
|
||||
# return model
|
||||
|
||||
# load data
|
||||
digits = load_digits()
|
||||
|
||||
X, y = digits.data, digits.target
|
||||
|
||||
# split into training and test
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size = 0.2
|
||||
)
|
||||
|
||||
prep = LabelBinarizer()
|
||||
y_train_bin = prep.fit_transform(y_train)
|
||||
y_test_bin = prep.transform(y_test)
|
||||
|
||||
#%%
|
||||
#Practical 3
|
||||
|
||||
X_train, y_train, X_test, y_test, labels = jrpytensorflow.datasets.load_fashion_mnist()
|
||||
|
||||
# flatten the array which currently is 60000, 28, 28
|
||||
|
||||
def numModel(input_size, output_size):
|
||||
model = tf.keras.models.Sequential([
|
||||
keras.layers.Flatten(input_shape = (28, 28)),
|
||||
tf.keras.layers.Dense(20, #no of perceptron
|
||||
input_shape = (input_size,), activation = 'relu'),
|
||||
tf.keras.layers.Dense(output_size, activation = 'softmax')
|
||||
])
|
||||
model.compile(optimizer = 'sgd',
|
||||
loss = 'sparse_categorical_crossentropy',
|
||||
metrics = ['accuracy'])
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# call model
|
||||
model = numModel(784, 10)
|
||||
|
||||
# call fit
|
||||
model.fit(X_train, y_train, epochs = 10)
|
||||
|
||||
# check loss and accuracy
|
||||
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
||||
print('test accuracy', acc)
|
||||
print('Loss',loss)
|
||||
|
||||
|
||||
# print predictions
|
||||
pred = model.predict(X_test)
|
||||
pred[0]
|
||||
|
||||
import numpy as np
|
||||
print('prediction:', np.argmax(pred[0]))
|
||||
|
||||
# change optimisers
|
||||
def numModel(input_size, output_size):
|
||||
model = tf.keras.models.Sequential([
|
||||
keras.layers.Flatten(input_shape = (28, 28)),
|
||||
tf.keras.layers.Dense(20, #no of perceptron
|
||||
input_shape = (input_size,), activation = 'relu'),
|
||||
tf.keras.layers.Dense(output_size, activation = 'softmax')
|
||||
])
|
||||
model.compile(optimizer = 'adam',
|
||||
loss = 'sparse_categorical_crossentropy',
|
||||
metrics = ['accuracy'])
|
||||
return model
|
||||
|
||||
|
||||
# call model
|
||||
model = numModel(784, 10)
|
||||
|
||||
# call fit
|
||||
model.fit(X_train, y_train, epochs = 10)
|
||||
|
||||
# check loss and accuracy
|
||||
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
||||
print('test accuracy', acc)
|
||||
print('Loss',loss)
|
||||
|
||||
|
||||
# print predictions
|
||||
pred = model.predict(X_test)
|
||||
pred[0]
|
||||
|
||||
import numpy as np
|
||||
print('prediction:', np.argmax(pred[0]))
|
||||
print('truth:', y_test[0])
|
||||
|
||||
model.save('entireModel.h5')
|
||||
entireModel = tf.keras.models.load_model('entireModel.h5')
|
||||
entireModel.summary()
|
||||
|
||||
|
||||
model.save_weights('./weights')
|
||||
|
||||
model = numModel(784, 10)
|
||||
|
||||
model.load_weights('./weights')
|
||||
|
||||
# check loss and accuracy
|
||||
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
||||
print('Restored test accuracy', acc)
|
||||
print('Reestored Loss',loss)
|
||||
|
||||
|
||||
callback = tf.keras.callbacks.ModelCheckpoint(
|
||||
filepath = './checkpointPath',
|
||||
save_weights_only = True,
|
||||
verbose = 1
|
||||
|
||||
)
|
||||
|
||||
model.fit(X_train, y_train, epochs = 10,
|
||||
callbacks = [callback])
|
||||
|
||||
|
||||
modell = numModel(784, 10)
|
||||
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
||||
print('No checkpoint Test loss :', loss)
|
||||
|
||||
|
||||
model = numModel(784, 10)
|
||||
model.load_weights('./checkpointPath')
|
||||
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
|
||||
print('With checkpoint Test loss :', loss)
|
||||
|
||||
#%%
|
||||
# Visualisation
|
||||
#%%
|
||||
|
||||
#load_ext tensorboard
|
||||
|
||||
tensorBoardCallback = keras.callbacks.TensorBoard( log_dir = "logs/fit")
|
||||
|
||||
model = numModel(784, 10)
|
||||
model.fit(X_train, y_train, epochs = 10,
|
||||
validation_data = (X_test, y_test),
|
||||
callbacks = [tensorBoardCallback])
|
||||
|
||||
##tensorboard --logdir logs
|
||||
|
||||
#%%
|
||||
#%Practical 4
|
||||
#%%
|
||||
|
||||
walking = jrpytensorflow.datasets.load_walking()
|
||||
|
||||
#import matplotlib.pyplot as plt
|
||||
#import numpy as np
|
||||
|
||||
sub = walking[walking['sample'] == 400]
|
||||
sub['time'] = np.arange(260)
|
||||
|
||||
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (18, 10))
|
||||
sub.plot(x = 'time', y = 'acc_x', ax = ax1)
|
||||
sub.plot(x = 'time', y = 'acc_y', ax = ax2)
|
||||
sub.plot(x = 'time', y = 'acc_z', ax = ax3)
|
||||
plt.show()
|
||||
|
||||
dims = ['acc_x', 'acc_y', 'acc_z']
|
||||
#x = np.dstack([walking[[d]].values.reshape(-1,260) for d in dims])
|
||||
X = np.dstack([walking[[d]].values.reshape(-1,260) for d in dims])
|
||||
y = walking['person'].values[::260] - 1
|
||||
|
||||
|
||||
# load data
|
||||
# split into training and test
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size = 0.2
|
||||
)
|
||||
|
||||
# Binarise labels
|
||||
X, y, test_size = 0.2
|
||||
)
|
||||
prep = LabelBinarizer()
|
||||
y_train_bin = prep.fit_transform(y_train)
|
||||
y_test_bin = prep.transform(y_test)
|
||||
|
||||
# normalise
|
||||
#X_train, X_test = X_train / 255.0, X_test / 255.0
|
||||
|
||||
|
||||
# model architecture
|
||||
from tensorflow.keras.optimizers import Adam
|
||||
|
||||
def convModel():
|
||||
model = tf.keras.models.Sequential ([
|
||||
tf.keras.layers.Conv1D (6, 4,
|
||||
activation = 'relu', input_shape = (260,3)),
|
||||
|
||||
tf.keras.layers.MaxPooling1D(2),
|
||||
|
||||
tf.keras.layers.Conv1D(16, 3, activation = 'relu'),
|
||||
tf.keras.layers.Flatten(),# model architecture
|
||||
|
||||
tf.keras.layers.Dense (150, activation = 'relu'),
|
||||
tf.keras.layers.Dense(50, activation = 'relu'),
|
||||
tf.keras.layers.Dense(15, activation = 'softmax')
|
||||
])
|
||||
|
||||
opt = Adam(lr = 0.001)
|
||||
|
||||
model.compile(optimizer = opt,
|
||||
loss = 'categorical_crossentropy', # sparse for non-binarised labels
|
||||
metrics = ['accuracy'])
|
||||
|
||||
return model
|
||||
|
||||
model = convModel()
|
||||
history = model.fit(X_train, y_train_bin, epochs = 10,
|
||||
validation_data = (X_test, y_test_bin))
|
||||
|
||||
historyDict = history.history
|
||||
|
||||
# check loss and accuracy
|
||||
loss, acc = model.evaluate(X_test, y_test_bin, verbose = 0)
|
||||
print('test accuracy', acc)
|
||||
print('Loss',loss)
|
||||
|
||||
# image for P4: Refer to Page 10 from notes
|
||||
## Notes: Check the dim of your data
|
||||
# dim of your filter MUST be the same as the dim of your data
|
||||
|
||||
|
||||
|
BIN
tensorflow/weights.data-00000-of-00001
Normal file
BIN
tensorflow/weights.data-00000-of-00001
Normal file
Binary file not shown.
BIN
tensorflow/weights.index
Normal file
BIN
tensorflow/weights.index
Normal file
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue