fs: cut down the number of iterations
This commit is contained in:
parent
7ba838b493
commit
9071a87056
1 changed files with 74 additions and 70 deletions
|
@ -42,6 +42,7 @@ ml_gene_drugD = {'pncA' : 'pyrazinamide'
|
||||||
# , 'gid' : 'streptomycin'
|
# , 'gid' : 'streptomycin'
|
||||||
}
|
}
|
||||||
gene_dataD={}
|
gene_dataD={}
|
||||||
|
# NOTE: for gid, run 'actual' on 80/20 and sl only
|
||||||
split_types = ['70_30', '80_20', 'sl']
|
split_types = ['70_30', '80_20', 'sl']
|
||||||
split_data_types = ['actual', 'complete']
|
split_data_types = ['actual', 'complete']
|
||||||
#split_types = ['70_30']
|
#split_types = ['70_30']
|
||||||
|
@ -49,24 +50,25 @@ split_data_types = ['actual', 'complete']
|
||||||
|
|
||||||
#fs_models = [('Logistic Regression' , LogisticRegression(**rs) )]
|
#fs_models = [('Logistic Regression' , LogisticRegression(**rs) )]
|
||||||
|
|
||||||
fs_models = [('AdaBoost Classifier' , AdaBoostClassifier(**rs) )
|
fs_models = [
|
||||||
, ('Decision Tree' , DecisionTreeClassifier(**rs) )
|
('Logistic Regression' , LogisticRegression(**rs) )
|
||||||
|
, ('Ridge Classifier' , RidgeClassifier(**rs) )
|
||||||
|
#, ('AdaBoost Classifier' , AdaBoostClassifier(**rs) )
|
||||||
|
#, ('Decision Tree' , DecisionTreeClassifier(**rs) )
|
||||||
#, ('Extra Tree' , ExtraTreeClassifier(**rs) )
|
#, ('Extra Tree' , ExtraTreeClassifier(**rs) )
|
||||||
#, ('Extra Trees' , ExtraTreesClassifier(**rs) )
|
#, ('Extra Trees' , ExtraTreesClassifier(**rs) )
|
||||||
#, ('Gradient Boosting' , GradientBoostingClassifier(**rs) )
|
#, ('Gradient Boosting' , GradientBoostingClassifier(**rs) )
|
||||||
#, ('LDA' , LinearDiscriminantAnalysis() )
|
#, ('LDA' , LinearDiscriminantAnalysis() )
|
||||||
#, ('Logistic Regression' , LogisticRegression(**rs) )
|
|
||||||
#, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs))
|
#, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs))
|
||||||
#, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) )
|
#, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) )
|
||||||
#, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) )
|
#, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) )
|
||||||
#, ('Ridge Classifier' , RidgeClassifier(**rs) )
|
|
||||||
#, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) )
|
#, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) )
|
||||||
#, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) )
|
#, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) )
|
||||||
]
|
]
|
||||||
|
|
||||||
for gene, drug in ml_gene_drugD.items():
|
for gene, drug in ml_gene_drugD.items():
|
||||||
print ('\nGene:', gene
|
#print ('\nGene:', gene
|
||||||
, '\nDrug:', drug)
|
# , '\nDrug:', drug)
|
||||||
gene_low = gene.lower()
|
gene_low = gene.lower()
|
||||||
gene_dataD[gene_low] = getmldata(gene, drug
|
gene_dataD[gene_low] = getmldata(gene, drug
|
||||||
, data_combined_model = False # this means it doesn't include 'gene_name' as a feauture as a single gene-target shouldn't have it.
|
, data_combined_model = False # this means it doesn't include 'gene_name' as a feauture as a single gene-target shouldn't have it.
|
||||||
|
@ -92,26 +94,22 @@ for gene, drug in ml_gene_drugD.items():
|
||||||
, 'target' : tempD['y']
|
, 'target' : tempD['y']
|
||||||
, 'var_type' : 'mixed'
|
, 'var_type' : 'mixed'
|
||||||
, 'resampling_type': 'none'}
|
, 'resampling_type': 'none'}
|
||||||
|
#, 'smnc_paramD' : { 'input_df' : tempD['X_smnc']
|
||||||
, 'smnc_paramD' : { 'input_df' : tempD['X_smnc']
|
# , 'target' : tempD['y_smnc']
|
||||||
, 'target' : tempD['y_smnc']
|
# , 'var_type' : 'mixed'
|
||||||
, 'var_type' : 'mixed'
|
# , 'resampling_type' : 'smnc'}
|
||||||
, 'resampling_type' : 'smnc'}
|
#, 'ros_paramD' : { 'input_df' : tempD['X_ros']
|
||||||
|
# , 'target' : tempD['y_ros']
|
||||||
, 'ros_paramD' : { 'input_df' : tempD['X_ros']
|
# , 'var_type' : 'mixed'
|
||||||
, 'target' : tempD['y_ros']
|
# , 'resampling_type' : 'ros'}
|
||||||
, 'var_type' : 'mixed'
|
#, 'rus_paramD' : { 'input_df' : tempD['X_rus']
|
||||||
, 'resampling_type' : 'ros'}
|
# , 'target' : tempD['y_rus']
|
||||||
|
# , 'var_type' : 'mixed'
|
||||||
, 'rus_paramD' : { 'input_df' : tempD['X_rus']
|
# , 'resampling_type' : 'rus'}
|
||||||
, 'target' : tempD['y_rus']
|
#, 'rouC_paramD' : { 'input_df' : tempD['X_rouC']
|
||||||
, 'var_type' : 'mixed'
|
# , 'target' : tempD['y_rouC']
|
||||||
, 'resampling_type' : 'rus'}
|
# , 'var_type' : 'mixed'
|
||||||
|
# , 'resampling_type': 'rouC'}
|
||||||
, 'rouC_paramD' : { 'input_df' : tempD['X_rouC']
|
|
||||||
, 'target' : tempD['y_rouC']
|
|
||||||
, 'var_type' : 'mixed'
|
|
||||||
, 'resampling_type': 'rouC'}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out_fsD = {}
|
out_fsD = {}
|
||||||
|
@ -124,13 +122,16 @@ for gene, drug in ml_gene_drugD.items():
|
||||||
index = index+1
|
index = index+1
|
||||||
#out_fsD[model_name] = {}
|
#out_fsD[model_name] = {}
|
||||||
current_model = {}
|
current_model = {}
|
||||||
|
|
||||||
for k, v in paramD.items():
|
for k, v in paramD.items():
|
||||||
out_filename = (gene.lower() + '_' + split_type + '_' + data_type + '_' + model_name + '_' + k + '.json')
|
out_filename = gene.lower() + '_' + split_type + '_' + data_type + '_' + model_name + '_' + k + '.json'
|
||||||
fsD_params=paramD[k]
|
fsD_params=paramD[k]
|
||||||
|
|
||||||
#out_fsD[model_name][k] = fsgs_rfecv(
|
#out_fsD[model_name][k] = fsgs_rfecv(
|
||||||
thingg = foo(
|
#current_model[k] = v
|
||||||
)
|
|
||||||
|
# NOTE: this will silently fail with a syntax error if you don't have all the necessary libraries installed.
|
||||||
|
# Python will NOT warn you of the missing lib!
|
||||||
current_model[k] = fsgs_rfecv(
|
current_model[k] = fsgs_rfecv(
|
||||||
**fsD_params
|
**fsD_params
|
||||||
, param_gridLd = [{'fs__min_features_to_select': [1]}]
|
, param_gridLd = [{'fs__min_features_to_select': [1]}]
|
||||||
|
@ -138,9 +139,12 @@ for gene, drug in ml_gene_drugD.items():
|
||||||
, blind_test_target = tempD['y_bts']
|
, blind_test_target = tempD['y_bts']
|
||||||
, estimator = model_fn
|
, estimator = model_fn
|
||||||
, use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below
|
, use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below
|
||||||
# NOTE: IS THIS CORRECT?!?
|
|
||||||
, custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef')
|
, custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef')
|
||||||
, cv_method = skf_cv
|
, cv_method = skf_cv
|
||||||
)
|
)
|
||||||
with open(out_filename, 'w') as f:
|
|
||||||
f.write(json.dumps(current_model)
|
# write current model to disk
|
||||||
|
#print(current_model)
|
||||||
|
out_json = json.dumps(current_model)
|
||||||
|
with open(out_filename, 'w', encoding="utf-8") as file:
|
||||||
|
file.write(out_json)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue