added file containing model names and hyperaprams to run for all models inc FS
This commit is contained in:
parent
9c07ad3ce8
commit
5d6dccfc09
6 changed files with 536 additions and 299 deletions
46
UQ_FS_fn.py
46
UQ_FS_fn.py
|
@ -10,19 +10,26 @@ Created on Mon May 23 23:25:26 2022
|
||||||
def fsgs(input_df
|
def fsgs(input_df
|
||||||
, target
|
, target
|
||||||
, blind_test_df = pd.DataFrame()
|
, blind_test_df = pd.DataFrame()
|
||||||
|
, blind_test_target = pd.Series(dtype = 'int64')
|
||||||
#, y_trueS = pd.Series()
|
#, y_trueS = pd.Series()
|
||||||
, estimator = LogisticRegression(**rs)
|
, estimator = LogisticRegression(**rs)
|
||||||
, param_gridLd = {}
|
, param_gridLd = {}
|
||||||
, cv_method = 10
|
, cv_method = StratifiedKFold(n_splits = 10
|
||||||
|
, shuffle = True,**rs)
|
||||||
, var_type = ['numerical'
|
, var_type = ['numerical'
|
||||||
, 'categorical'
|
, 'categorical'
|
||||||
, 'mixed']
|
, 'mixed']
|
||||||
, fs_estimator = [LogisticRegression(**rs)]
|
, fs_estimator = [LogisticRegression(**rs)]
|
||||||
, fs = RFECV(DecisionTreeClassifier(**rs) , cv = 10, scoring = 'matthews_corrcoef')
|
, fs = RFECV(DecisionTreeClassifier(**rs)
|
||||||
|
, cv = StratifiedKFold(n_splits = 10
|
||||||
|
, shuffle = True,**rs)
|
||||||
|
, scoring = 'matthews_corrcoef')
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
returns
|
returns
|
||||||
Dict containing results from FS and hyperparam tuning
|
Dict containing results from FS and hyperparam tuning for a given estiamtor
|
||||||
|
>>> ADD MORE <<<
|
||||||
|
optimised/selected based on mcc
|
||||||
'''
|
'''
|
||||||
# Determine categorical and numerical features
|
# Determine categorical and numerical features
|
||||||
numerical_ix = input_df.select_dtypes(include=['int64', 'float64']).columns
|
numerical_ix = input_df.select_dtypes(include=['int64', 'float64']).columns
|
||||||
|
@ -68,11 +75,10 @@ def fsgs(input_df
|
||||||
############################################################################
|
############################################################################
|
||||||
# Create Pipeline object
|
# Create Pipeline object
|
||||||
pipe = Pipeline([
|
pipe = Pipeline([
|
||||||
#('pre', MinMaxScaler()),
|
('pre', col_transform),
|
||||||
('pre', col_transform),
|
('fs', fs),
|
||||||
('fs', fs),
|
#('clf', LogisticRegression(**rs))])
|
||||||
#('clf', LogisticRegression(**rs))])
|
('clf', estimator)])
|
||||||
('clf', estimator)])
|
|
||||||
############################################################################
|
############################################################################
|
||||||
# Define GridSearchCV
|
# Define GridSearchCV
|
||||||
gscv_fs = GridSearchCV(pipe
|
gscv_fs = GridSearchCV(pipe
|
||||||
|
@ -119,8 +125,8 @@ def fsgs(input_df
|
||||||
#tp = gscv_fs.predict(X_bts)
|
#tp = gscv_fs.predict(X_bts)
|
||||||
tp = gscv_fs.predict(blind_test_df)
|
tp = gscv_fs.predict(blind_test_df)
|
||||||
|
|
||||||
print('\nMCC on Blind test:' , round(matthews_corrcoef(y_bts, tp),2))
|
print('\nMCC on Blind test:' , round(matthews_corrcoef(blind_test_target, tp),2))
|
||||||
print('\nAccuracy on Blind test:', round(accuracy_score(y_bts, tp),2))
|
print('\nAccuracy on Blind test:', round(accuracy_score(blind_test_target, tp),2))
|
||||||
|
|
||||||
#=================
|
#=================
|
||||||
# info extraction
|
# info extraction
|
||||||
|
@ -191,9 +197,9 @@ def fsgs(input_df
|
||||||
#bts_predict = gscv_fs.predict(X_bts)
|
#bts_predict = gscv_fs.predict(X_bts)
|
||||||
bts_predict = gscv_fs.predict(blind_test_df)
|
bts_predict = gscv_fs.predict(blind_test_df)
|
||||||
|
|
||||||
print('\nMCC on Blind test:' , round(matthews_corrcoef(y_bts, bts_predict),2))
|
print('\nMCC on Blind test:' , round(matthews_corrcoef(blind_test_target, bts_predict),2))
|
||||||
print('\nAccuracy on Blind test:', round(accuracy_score(y_bts, bts_predict),2))
|
print('\nAccuracy on Blind test:', round(accuracy_score(blind_test_target, bts_predict),2))
|
||||||
bts_mcc_score = round(matthews_corrcoef(y_bts, bts_predict),2)
|
bts_mcc_score = round(matthews_corrcoef(blind_test_target, bts_predict),2)
|
||||||
|
|
||||||
# Diff b/w train and bts test scores
|
# Diff b/w train and bts test scores
|
||||||
train_test_diff = train_bscore - bts_mcc_score
|
train_test_diff = train_bscore - bts_mcc_score
|
||||||
|
@ -213,12 +219,12 @@ def fsgs(input_df
|
||||||
|
|
||||||
lr_btsD
|
lr_btsD
|
||||||
#lr_btsD['bts_mcc'] = bts_mcc_score
|
#lr_btsD['bts_mcc'] = bts_mcc_score
|
||||||
lr_btsD['bts_fscore'] = round(f1_score(y_bts, bts_predict),2)
|
lr_btsD['bts_fscore'] = round(f1_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD['bts_precision'] = round(precision_score(y_bts, bts_predict),2)
|
lr_btsD['bts_precision'] = round(precision_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD['bts_recall'] = round(recall_score(y_bts, bts_predict),2)
|
lr_btsD['bts_recall'] = round(recall_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD['bts_accuracy'] = round(accuracy_score(y_bts, bts_predict),2)
|
lr_btsD['bts_accuracy'] = round(accuracy_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD['bts_roc_auc'] = round(roc_auc_score(y_bts, bts_predict),2)
|
lr_btsD['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD['bts_jaccard'] = round(jaccard_score(y_bts, bts_predict),2)
|
lr_btsD['bts_jaccard'] = round(jaccard_score(blind_test_target, bts_predict),2)
|
||||||
lr_btsD
|
lr_btsD
|
||||||
|
|
||||||
#===========================
|
#===========================
|
||||||
|
@ -229,7 +235,7 @@ def fsgs(input_df
|
||||||
fs_methodf = str(gscv_fs.best_estimator_.named_steps['fs'])
|
fs_methodf = str(gscv_fs.best_estimator_.named_steps['fs'])
|
||||||
all_featuresL = list(all_features)
|
all_featuresL = list(all_features)
|
||||||
fs_res_arrayf = str(list( gscv_fs.best_estimator_.named_steps['fs'].get_support()))
|
fs_res_arrayf = str(list( gscv_fs.best_estimator_.named_steps['fs'].get_support()))
|
||||||
fs_res_array_rankf = list( gscv_fs.best_estimator_.named_steps['fs'].ranking_)
|
fs_res_array_rankf = str(list( gscv_fs.best_estimator_.named_steps['fs'].ranking_))
|
||||||
sel_featuresf = list(sel_features)
|
sel_featuresf = list(sel_features)
|
||||||
n_sf = int(n_sf)
|
n_sf = int(n_sf)
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,27 @@ Created on Tue May 24 08:11:05 2022
|
||||||
@author: tanu
|
@author: tanu
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# my function
|
||||||
|
#import fsgs from UQ_FS_fn
|
||||||
|
|
||||||
|
fsgs(input_df = X
|
||||||
|
, target = y
|
||||||
|
, param_gridLd = param_grid_abc
|
||||||
|
, blind_test_df = X_bts
|
||||||
|
, blind_test_target = y_bts
|
||||||
|
, estimator = AdaBoostClassifier(**rs)
|
||||||
|
, var_type = 'mixed')
|
||||||
|
|
||||||
|
ds_lrD = fsgs(input_df = X
|
||||||
|
, target = y
|
||||||
|
, param_gridLd = param_grid_lr
|
||||||
|
, blind_test_df = X_bts
|
||||||
|
, blind_test_target = y_bts
|
||||||
|
, estimator = LogisticRegression(**rs)
|
||||||
|
, var_type = 'mixed')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import fsgs from
|
|
||||||
fsgs(X,y,param_gridLd=param_grid_abc, blind_test_df = X_bts, estimator=AdaBoostClassifier(**rs), var_type = 'mixed')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,13 +35,13 @@ fsgs(X,y,param_gridLd=param_grid_abc, blind_test_df = X_bts, estimator=AdaBoostC
|
||||||
# Write final output file
|
# Write final output file
|
||||||
# https://stackoverflow.com/questions/19201290/how-to-save-a-dictionary-to-a-file
|
# https://stackoverflow.com/questions/19201290/how-to-save-a-dictionary-to-a-file
|
||||||
#========================================
|
#========================================
|
||||||
#output final dict as a json
|
# #output final dict as a json
|
||||||
outFile = 'LR_FS.json'
|
# outFile = 'LR_FS.json'
|
||||||
with open(outFile, 'w') as f:
|
# with open(outFile, 'w') as f:
|
||||||
f.write(json.dumps(output_modelD,cls=NpEncoder))
|
# f.write(json.dumps(output_modelD,cls=NpEncoder))
|
||||||
|
|
||||||
# read json
|
# # read json
|
||||||
file = 'LR_FS.json'
|
# file = 'LR_FS.json'
|
||||||
with open(file, 'r') as f:
|
# with open(file, 'r') as f:
|
||||||
data = json.load(f)
|
# data = json.load(f)
|
||||||
##############################################################################
|
##############################################################################
|
|
@ -1,266 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
Created on Fri Mar 18 09:47:48 2022
|
|
||||||
|
|
||||||
@author: tanu
|
|
||||||
"""
|
|
||||||
|
|
||||||
#%% Useful links
|
|
||||||
# https://stackoverflow.com/questions/41844311/list-of-all-classification-algorithms
|
|
||||||
# https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
|
|
||||||
# https://github.com/davidsbatista/machine-learning-notebooks/blob/master/hyperparameter-across-models.ipynb
|
|
||||||
# https://scikit-learn.org/stable/modules/svm.html#classification
|
|
||||||
# https://machinelearningmastery.com/hyperparameters-for-classification-machine-learning-algorithms/ # [params]
|
|
||||||
# https://uk.mathworks.com/help/stats/hyperparameter-optimization-in-classification-learner-app.html [ algo]
|
|
||||||
# As a general rule of thumb, it is required to run baseline models on the dataset. I know H2O- AutoML and other AutoML packages do this. But I want to try using Scikit-learn Pipeline,
|
|
||||||
# https://codereview.stackexchange.com/questions/256934/model-pipeline-to-run-multiple-classifiers-for-ml-classification
|
|
||||||
# https://uk.mathworks.com/help/stats/hyperparameter-optimization-in-classification-learner-app.html
|
|
||||||
# QDA: https://www.geeksforgeeks.org/quadratic-discriminant-analysis/
|
|
||||||
|
|
||||||
names = [
|
|
||||||
"Nearest Neighbors",
|
|
||||||
"Linear SVM",
|
|
||||||
"RBF SVM",
|
|
||||||
"Gaussian Process",
|
|
||||||
"Decision Tree",
|
|
||||||
"Random Forest",
|
|
||||||
"Neural Net",
|
|
||||||
"AdaBoost",
|
|
||||||
"Naive Bayes",
|
|
||||||
"QDA",
|
|
||||||
]
|
|
||||||
|
|
||||||
classifiers = [
|
|
||||||
KNeighborsClassifier(5),
|
|
||||||
SVC(kernel="linear", C=0.025),
|
|
||||||
SVC(gamma=2, C=1),
|
|
||||||
GaussianProcessClassifier(1.0 * RBF(1.0)),
|
|
||||||
DecisionTreeClassifier(max_depth=5),
|
|
||||||
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
|
|
||||||
MLPClassifier(alpha=1, max_iter=1000),
|
|
||||||
AdaBoostClassifier(),
|
|
||||||
GaussianNB(),
|
|
||||||
QuadraticDiscriminantAnalysis(),
|
|
||||||
]
|
|
||||||
|
|
||||||
# NOTE Logistic regression
|
|
||||||
# The choice of the algorithm depends on the penalty chosen: Supported penalties by solver:
|
|
||||||
# ‘newton-cg’ - [‘l2’, ‘none’]
|
|
||||||
# ‘lbfgs’ - [‘l2’, ‘none’]
|
|
||||||
# ‘liblinear’ - [‘l1’, ‘l2’]
|
|
||||||
# ‘sag’ - [‘l2’, ‘none’]
|
|
||||||
# ‘saga’ - [‘elasticnet’, ‘l1’, ‘l2’, ‘none’]
|
|
||||||
|
|
||||||
# SVR?
|
|
||||||
# estimator=SVR(kernel='rbf')
|
|
||||||
# param_grid={
|
|
||||||
# 'C': [1.1, 5.4, 170, 1001],
|
|
||||||
# 'epsilon': [0.0003, 0.007, 0.0109, 0.019, 0.14, 0.05, 8, 0.2, 3, 2, 7],
|
|
||||||
# 'gamma': [0.7001, 0.008, 0.001, 3.1, 1, 1.3, 5]
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
|
||||||
#%% Classification algorithms param grid
|
|
||||||
#%% LogisticRegression()
|
|
||||||
#https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
|
|
||||||
gs_lr = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', LogisticRegression(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
gs_lr_params = {
|
|
||||||
'clf__C' : [0.0001, 0.001, 0.01, 0.1 ,1, 10, 100]
|
|
||||||
#'C': np.logspace(-4, 4, 50)
|
|
||||||
, 'clf__penalty': ['l1', 'l2', 'elasticnet', 'none']
|
|
||||||
, 'clf__solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
|
|
||||||
}
|
|
||||||
#%% DecisionTreeClassifier()
|
|
||||||
|
|
||||||
gs_dt = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
, ('clf', DecisionTreeClassifier(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
gs_dt_params = {
|
|
||||||
'clf__max_depth': [ 2, 4, 6, 8, 10]
|
|
||||||
, 'clf__criterion':['gini','entropy']
|
|
||||||
, "clf__max_features":["auto", None]
|
|
||||||
, "clf__max_leaf_nodes":[10,20,30,40]
|
|
||||||
}
|
|
||||||
#%% KNeighborsClassifier()
|
|
||||||
#https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
|
|
||||||
gs_knn = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', KNeighborsClassifier(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_knn_params = {
|
|
||||||
'clf__n_neighbors': [5, 7, 11]
|
|
||||||
#, 'clf__n_neighbors': range(1, 21, 2)
|
|
||||||
,'clf__metric' : ['euclidean', 'manhattan', 'minkowski']
|
|
||||||
, 'clf__weights' : ['uniform', 'distance']
|
|
||||||
}
|
|
||||||
#%% RandomForestClassifier()
|
|
||||||
|
|
||||||
gs_rf = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', RandomForestClassifier(**rs
|
|
||||||
, **njobs
|
|
||||||
, bootstrap = True
|
|
||||||
, oob_score = True))
|
|
||||||
))
|
|
||||||
gs_rf_params = {
|
|
||||||
'clf__max_depth': [4, 6, 8, 10, 12, 16, 20, None]
|
|
||||||
, 'clf__class_weight':['balanced','balanced_subsample']
|
|
||||||
, 'clf__n_estimators': [10, 100, 1000]
|
|
||||||
, 'clf__criterion': ['gini', 'entropy']
|
|
||||||
, 'clf__max_features': ['auto', 'sqrt']
|
|
||||||
, 'clf__min_samples_leaf': [2, 4, 8, 50]
|
|
||||||
, 'clf__min_samples_split': [10, 20]
|
|
||||||
}
|
|
||||||
#%% XGBClassifier() # SPNT
|
|
||||||
# https://stackoverflow.com/questions/34674797/xgboost-xgbclassifier-defaults-in-python
|
|
||||||
# https://stackoverflow.com/questions/34674797/xgboost-xgbclassifier-defaults-in-python
|
|
||||||
gs_xgb = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', XGBClassifier(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_xgb_params = {
|
|
||||||
'clf__learning_rate': [0.01, 0.05, 0.1, 0.2]
|
|
||||||
, 'clf__max_depth': [4, 6, 8, 10, 12, 16, 20]
|
|
||||||
, 'clf__min_samples_leaf': [4, 8, 12, 16, 20]
|
|
||||||
, 'clf__max_features': ['auto', 'sqrt']
|
|
||||||
}
|
|
||||||
|
|
||||||
#%% MLPClassifier()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
|
|
||||||
gs_mlp = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', MLPClassifier(**rs
|
|
||||||
, **njobs
|
|
||||||
, max_iter = 500))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_mlp_params = {
|
|
||||||
'clf__hidden_layer_sizes': [(1), (2), (3)]
|
|
||||||
, 'clf__max_features': ['auto', 'sqrt']
|
|
||||||
, 'clf__min_samples_leaf': [2, 4, 8]
|
|
||||||
, 'clf__min_samples_split': [10, 20]
|
|
||||||
}
|
|
||||||
#%% RidgeClassifier()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifier.html
|
|
||||||
gs_rc = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', RidgeClassifier(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_rc_params = {
|
|
||||||
'clf__alpha': [0.1, 0.2, 0.5, 0.8, 1.0]
|
|
||||||
}
|
|
||||||
|
|
||||||
#%% SVC()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
|
|
||||||
gs_svc = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', SVC(**rs
|
|
||||||
, **njobs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_svc_params = {
|
|
||||||
'clf__kernel': ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'}
|
|
||||||
, 'clf__C' : [50, 10, 1.0, 0.1, 0.01]
|
|
||||||
, 'clf__gamma': ['scale', 'auto'] }
|
|
||||||
|
|
||||||
#%% BaggingClassifier()
|
|
||||||
#https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
|
|
||||||
gs_bdt = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', BaggingClassifier(**rs
|
|
||||||
, **njobs
|
|
||||||
, bootstrap = True
|
|
||||||
, oob_score = True))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_bdt_params = {
|
|
||||||
'clf__n_estimators' : [10, 100, 1000]
|
|
||||||
# If None, then the base estimator is a DecisionTreeClassifier.
|
|
||||||
, 'clf__base_estimator' : ['None', 'SVC()', 'KNeighborsClassifier()']# if none, DT is used
|
|
||||||
, 'clf__gamma': ['scale', 'auto'] }
|
|
||||||
|
|
||||||
#%% GradientBoostingClassifier()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
|
|
||||||
gs_gb = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', GradientBoostingClassifier(**rs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_bdt_params = {
|
|
||||||
'clf__n_estimators' : [10, 100, 200, 500, 1000]
|
|
||||||
, 'clf__n_estimators' : [10, 100, 1000]
|
|
||||||
, 'clf__learning_rate': [0.001, 0.01, 0.1]
|
|
||||||
, 'clf__subsample' : [0.5, 0.7, 1.0]
|
|
||||||
, 'clf__max_depth' : [3, 7, 9]
|
|
||||||
}
|
|
||||||
#%% AdaBoostClassifier()
|
|
||||||
#https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html#sklearn.ensemble.AdaBoostClassifier
|
|
||||||
gs_gb = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', AdaBoostClassifier(**rs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_bdt_params = {
|
|
||||||
'clf__n_estimators': [none, 1, 2]
|
|
||||||
, 'clf__base_estiamtor' : ['None', 1*SVC(), 1*KNeighborsClassifier()]
|
|
||||||
#, 'clf___splitter' : ["best", "random"]
|
|
||||||
}
|
|
||||||
#%% GaussianProcessClassifier()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier.html
|
|
||||||
#GaussianProcessClassifier(1.0 * RBF(1.0)),
|
|
||||||
gs_gpc = Pipeline((
|
|
||||||
('pre' , MinMaxScaler()) # CHECK if it wants -1 to 1
|
|
||||||
,('clf', GaussianProcessClassifier(**rs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_gpc_params = {
|
|
||||||
'clf__kernel': [1*RBF(), 1*DotProduct(), 1*Matern(), 1*RationalQuadratic(), 1*WhiteKernel()]
|
|
||||||
}
|
|
||||||
|
|
||||||
#%% GaussianNB()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html
|
|
||||||
gs_gnb = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
, ('pca', PCA() )# CHECK if it wants -1 to 1
|
|
||||||
,('clf', GaussianNB(**rs))
|
|
||||||
))
|
|
||||||
|
|
||||||
gs_gnb_params = {
|
|
||||||
'clf__priors': [None]
|
|
||||||
, 'clf__var_smoothing': np.logspace(0,-9, num=100)
|
|
||||||
}
|
|
||||||
|
|
||||||
#%% QuadraticDiscriminantAnalysis()
|
|
||||||
#https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.html
|
|
||||||
|
|
||||||
gs_qda = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
#, ('pca', PCA() )# CHECK if it wants -1 to 1
|
|
||||||
,('clf', QuadraticDiscriminantAnalysis())
|
|
||||||
))
|
|
||||||
#%% BernoulliNB()
|
|
||||||
# https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html
|
|
||||||
gs_gnb = Pipeline((
|
|
||||||
('pre' , MinMaxScaler())
|
|
||||||
,('clf', BernoulliNB())
|
|
||||||
))
|
|
||||||
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
|
|
||||||
gs_gnb_params = {
|
|
||||||
'clf__alpha': [0, 1]
|
|
||||||
, 'clf__binarize':['None', 0]
|
|
||||||
, 'clf__fit_prior': [True]
|
|
||||||
, 'clf__class_prior': ['None']
|
|
||||||
}
|
|
480
classification_params_FS.py
Normal file
480
classification_params_FS.py
Normal file
|
@ -0,0 +1,480 @@
|
||||||
|
########################################################################
|
||||||
|
#======================
|
||||||
|
# AdaBoostClassifier()
|
||||||
|
#======================
|
||||||
|
estimator = AdaBoostClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_abc = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('clf', AdaBoostClassifier(**rs))])
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_abc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [AdaBoostClassifier(**rs)],
|
||||||
|
'clf__n_estimators': [1, 2, 5, 10]
|
||||||
|
# , 'clf__base_estimator' : ['SVC']
|
||||||
|
# , 'clf__splitter' : ["best", "random"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
########################################################################
|
||||||
|
#======================
|
||||||
|
# BaggingClassifier()
|
||||||
|
#======================
|
||||||
|
estimator = BaggingClassifier(**rs
|
||||||
|
, **njobs
|
||||||
|
, bootstrap = True
|
||||||
|
, oob_score = True)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_bc = Pipeline([
|
||||||
|
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_bc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [BaggingClassifier(**rs, **njobs , bootstrap = True, oob_score = True)],
|
||||||
|
'clf__n_estimators' : [10, 25, 50, 100, 150, 200, 500, 700, 1000]
|
||||||
|
# , 'clf__base_estimator' : ['None', 'SVC()', 'KNeighborsClassifier()'] # if none, DT is used
|
||||||
|
}
|
||||||
|
]
|
||||||
|
########################################################################
|
||||||
|
#======================
|
||||||
|
# BernoulliNB ()
|
||||||
|
#======================
|
||||||
|
# Define estimator
|
||||||
|
estimator = BernoulliNB()
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_bnb = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_bnb = [
|
||||||
|
{'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [BernoulliNB()],
|
||||||
|
'clf__alpha': [1, 0]
|
||||||
|
, 'clf__binarize':[None, 0]
|
||||||
|
, 'clf__fit_prior': [True]
|
||||||
|
, 'clf__class_prior': [None]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
########################################################################
|
||||||
|
#===========================
|
||||||
|
# DecisionTreeClassifier()
|
||||||
|
#===========================
|
||||||
|
|
||||||
|
# Define estimator
|
||||||
|
estimator = DecisionTreeClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_dt = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_dt = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [DecisionTreeClassifier(**rs)],
|
||||||
|
'clf__max_depth': [None, 2, 4, 6, 8, 10, 12, 16, 20]
|
||||||
|
, 'clf__class_weight':['balanced']
|
||||||
|
, 'clf__criterion': ['gini', 'entropy', 'log_loss']
|
||||||
|
, 'clf__max_features': [None, 'sqrt', 'log2']
|
||||||
|
, 'clf__min_samples_leaf': [1, 2, 3, 4, 5, 10]
|
||||||
|
, 'clf__min_samples_split': [2, 5, 15, 20]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#==============================
|
||||||
|
# GradientBoostingClassifier()
|
||||||
|
#==============================
|
||||||
|
# Define estimator
|
||||||
|
estimator = GradientBoostingClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_gbc = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_gbc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# 'clf': [GradientBoostingClassifier(**rs)],
|
||||||
|
'clf__n_estimators' : [10, 100, 200, 500, 1000]
|
||||||
|
, 'clf__n_estimators' : [10, 100, 1000]
|
||||||
|
, 'clf__learning_rate': [0.001, 0.01, 0.1]
|
||||||
|
, 'clf__subsample' : [0.5, 0.7, 1.0]
|
||||||
|
, 'clf__max_depth' : [3, 7, 9]
|
||||||
|
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#===========================
|
||||||
|
# GaussianNB ()
|
||||||
|
#===========================
|
||||||
|
# Define estimator
|
||||||
|
estimator = GaussianNB()
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_gnb = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_gnb = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# 'clf': [GaussianNB()],
|
||||||
|
'clf__priors': [None]
|
||||||
|
, 'clf__var_smoothing': np.logspace(0,-9, num=100)
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#===========================
|
||||||
|
# GaussianProcessClassifier()
|
||||||
|
#===========================
|
||||||
|
# Define estimator
|
||||||
|
estimator = GaussianProcessClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_gbc = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_gbc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [GaussianProcessClassifier(**rs)],
|
||||||
|
'clf__kernel': [1*RBF(), 1*DotProduct(), 1*Matern(), 1*RationalQuadratic(), 1*WhiteKernel()]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#===========================
|
||||||
|
# KNeighborsClassifier ()
|
||||||
|
#===========================
|
||||||
|
# Define estimator
|
||||||
|
estimator = KNeighborsClassifier(**njobs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_knn = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_knn = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [KNeighborsClassifier(**njobs)],
|
||||||
|
'clf__n_neighbors': range(21, 51, 2)
|
||||||
|
#, 'clf__n_neighbors': [5, 7, 11]
|
||||||
|
, 'clf__metric' : ['euclidean', 'manhattan', 'minkowski']
|
||||||
|
, 'clf__weights' : ['uniform', 'distance']
|
||||||
|
|
||||||
|
}
|
||||||
|
]
|
||||||
|
#########################################################################
|
||||||
|
#===========================
|
||||||
|
# LogisticRegression ()
|
||||||
|
#===========================
|
||||||
|
# Define estimator
|
||||||
|
estimator = LogisticRegression(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_lr = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(LogisticRegression(**rs), cv = rskf_cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_lr = [
|
||||||
|
|
||||||
|
{'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [rskf_cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [LogisticRegression(**rs)],
|
||||||
|
'clf__C': np.logspace(0, 4, 10),
|
||||||
|
'clf__penalty': ['none', 'l1', 'l2', 'elasticnet'],
|
||||||
|
'clf__max_iter': list(range(100,800,100)),
|
||||||
|
'clf__solver': ['saga']
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# 'clf': [LogisticRegression(**rs)],
|
||||||
|
'clf__C': np.logspace(0, 4, 10),
|
||||||
|
'clf__penalty': ['l2', 'none'],
|
||||||
|
'clf__max_iter': list(range(100,800,100)),
|
||||||
|
'clf__solver': ['newton-cg', 'lbfgs', 'sag']
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# 'clf': [LogisticRegression(**rs)],
|
||||||
|
'clf__C': np.logspace(0, 4, 10),
|
||||||
|
'clf__penalty': ['l1', 'l2'],
|
||||||
|
'clf__max_iter': list(range(100,800,100)),
|
||||||
|
'clf__solver': ['liblinear']
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
#########################################################################
|
||||||
|
#==================
|
||||||
|
# MLPClassifier()
|
||||||
|
#==================
|
||||||
|
# Define estimator
|
||||||
|
estimator = MLPClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_mlp = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
param_grid_mlp = [ {
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [MLPClassifier(**rs, max_iter = 1000)],
|
||||||
|
'clf__max_iter': [1000, 2000]
|
||||||
|
, 'clf__hidden_layer_sizes': [(1), (2), (3), (5), (10)]
|
||||||
|
, 'clf__solver': ['lbfgs', 'sgd', 'adam']
|
||||||
|
, 'clf__learning_rate': ['constant', 'invscaling', 'adaptive']
|
||||||
|
#, 'clf__learning_rate': ['constant']
|
||||||
|
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#==================================
|
||||||
|
# QuadraticDiscriminantAnalysis()
|
||||||
|
#==================================
|
||||||
|
# Define estimator
|
||||||
|
estimator = QuadraticDiscriminantAnalysis(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_qda = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_qda = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [QuadraticDiscriminantAnalysis()],
|
||||||
|
'clf__priors': [None]
|
||||||
|
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
#====================
|
||||||
|
# RidgeClassifier()
|
||||||
|
#====================
|
||||||
|
|
||||||
|
# Define estimator
|
||||||
|
estimator = RidgeClassifier(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_abc = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
param_grid_rc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
#'clf' : [RidgeClassifier(**rs)],
|
||||||
|
'clf__alpha': [0.1, 0.2, 0.5, 0.8, 1.0]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
#######################################################################
|
||||||
|
#===========================
|
||||||
|
# RandomForestClassifier()
|
||||||
|
#===========================
|
||||||
|
# Define estimator
|
||||||
|
estimator = [RandomForestClassifier(**rs, **njobs, bootstrap = True, oob_score = True)](**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_rf = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_rf = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [RandomForestClassifier(**rs, **njobs, bootstrap = True, oob_score = True)],
|
||||||
|
'clf__max_depth': [4, 6, 8, 10, 12, 16, 20, None]
|
||||||
|
, 'clf__class_weight':['balanced','balanced_subsample']
|
||||||
|
, 'clf__n_estimators': [10, 25, 50, 100, 200, 300] # go upto a 100
|
||||||
|
, 'clf__criterion': ['gini', 'entropy', 'log_loss']
|
||||||
|
, 'clf__max_features': ['sqrt', 'log2', None] #deafult is sqrt
|
||||||
|
, 'clf__min_samples_leaf': [1, 2, 3, 4, 5, 10]
|
||||||
|
, 'clf__min_samples_split': [2, 5, 15, 20]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
#######################################################################
|
||||||
|
#========
|
||||||
|
# SVC()
|
||||||
|
#========
|
||||||
|
|
||||||
|
estimator = SVC(**rs)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_svc = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Define hyperparmeter space to search for
|
||||||
|
param_grid_svc = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
# 'clf': [SVC(**rs)],
|
||||||
|
'clf__kernel': ['poly', 'rbf', 'sigmoid']
|
||||||
|
#, 'clf__kernel': ['linear']
|
||||||
|
, 'clf__C' : [50, 10, 1.0, 0.1, 0.01]
|
||||||
|
, 'clf__gamma': ['scale', 'auto']
|
||||||
|
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#=================
|
||||||
|
# XGBClassifier ()
|
||||||
|
#=================
|
||||||
|
|
||||||
|
# Define estimator
|
||||||
|
#https://www.datatechnotes.com/2019/07/classification-example-with.html
|
||||||
|
# XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
|
||||||
|
# colsample_bynode=1, colsample_bytree=1, gamma=0, learning_rate=0.1,
|
||||||
|
# max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,
|
||||||
|
# n_estimators=100, n_jobs=1, nthread=None,
|
||||||
|
# objective='multi:softprob', random_state=0, reg_alpha=0,
|
||||||
|
# reg_lambda=1, scale_pos_weight=1, seed=None, silent=None,
|
||||||
|
# subsample=1, verbosity=1)
|
||||||
|
estimator = XGBClassifier(**rs, **njobs, verbose = 3)
|
||||||
|
|
||||||
|
# Define pipleline with steps
|
||||||
|
pipe_xgb = Pipeline([
|
||||||
|
('pre', MinMaxScaler())
|
||||||
|
, ('fs', RFECV(DecisionTreeClassifier(**rs), cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
# , ('fs', RFECV(estimator, cv = cv, scoring = 'matthews_corrcoef'))
|
||||||
|
, ('clf', estimator)
|
||||||
|
])
|
||||||
|
|
||||||
|
param_grid_xgb = [
|
||||||
|
{
|
||||||
|
'fs__min_features_to_select' : [1,2]
|
||||||
|
# , 'fs__cv': [cv]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# 'clf': [XGBClassifier(**rs , **njobs, verbose = 3)],
|
||||||
|
'clf__learning_rate': [0.01, 0.05, 0.1, 0.2]
|
||||||
|
, 'clf__max_depth' : [4, 6, 8, 10, 12, 16, 20]
|
||||||
|
, 'clf__n_estimators': [10, 25, 50, 100, 200, 300]
|
||||||
|
#, 'clf__min_samples_leaf': [4, 8, 12, 16, 20]
|
||||||
|
#, 'clf__max_features': ['auto', 'sqrt']
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
|
@ -16,7 +16,6 @@ Created on Tue Mar 15 11:09:50 2022
|
||||||
cv = skf_cv
|
cv = skf_cv
|
||||||
|
|
||||||
# LogisticRegression: Feature Selelction + GridSearch CV + Pipeline
|
# LogisticRegression: Feature Selelction + GridSearch CV + Pipeline
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Define estimator
|
# Define estimator
|
||||||
estimator = LogisticRegression(**rs)
|
estimator = LogisticRegression(**rs)
|
||||||
|
|
|
@ -8,7 +8,7 @@ Created on Wed May 18 06:03:24 2022
|
||||||
#cv = rskf_cv
|
#cv = rskf_cv
|
||||||
cv = skf_cv
|
cv = skf_cv
|
||||||
|
|
||||||
# AdaBoostClassifier: Feature Selelction + GridSearch CV + Pipeline
|
# RandomForestClassifier: Feature Selelction + GridSearch CV + Pipeline
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Define estimator
|
# Define estimator
|
||||||
estimator = [RandomForestClassifier(**rs, **njobs, bootstrap = True, oob_score = True)](**rs)
|
estimator = [RandomForestClassifier(**rs, **njobs, bootstrap = True, oob_score = True)](**rs)
|
||||||
|
@ -190,4 +190,4 @@ output_modelD
|
||||||
# json.dump(output_modelD, f)
|
# json.dump(output_modelD, f)
|
||||||
# #
|
# #
|
||||||
# with open(file, 'r') as f:
|
# with open(file, 'r') as f:
|
||||||
# data = json.load(f)
|
# data = json.load(f)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue