fs: cut down the number of iterations

This commit is contained in:
Tanushree Tunstall 2022-07-02 11:12:39 +01:00
parent 7ba838b493
commit 9071a87056

View file

@ -42,6 +42,7 @@ ml_gene_drugD = {'pncA' : 'pyrazinamide'
# , 'gid' : 'streptomycin' # , 'gid' : 'streptomycin'
} }
gene_dataD={} gene_dataD={}
# NOTE: for gid, run 'actual' on 80/20 and sl only
split_types = ['70_30', '80_20', 'sl'] split_types = ['70_30', '80_20', 'sl']
split_data_types = ['actual', 'complete'] split_data_types = ['actual', 'complete']
#split_types = ['70_30'] #split_types = ['70_30']
@ -49,98 +50,101 @@ split_data_types = ['actual', 'complete']
#fs_models = [('Logistic Regression' , LogisticRegression(**rs) )] #fs_models = [('Logistic Regression' , LogisticRegression(**rs) )]
fs_models = [('AdaBoost Classifier' , AdaBoostClassifier(**rs) ) fs_models = [
, ('Decision Tree' , DecisionTreeClassifier(**rs) ) ('Logistic Regression' , LogisticRegression(**rs) )
#, ('Extra Tree' , ExtraTreeClassifier(**rs) ) , ('Ridge Classifier' , RidgeClassifier(**rs) )
#, ('Extra Trees' , ExtraTreesClassifier(**rs) ) #, ('AdaBoost Classifier' , AdaBoostClassifier(**rs) )
#, ('Gradient Boosting' , GradientBoostingClassifier(**rs) ) #, ('Decision Tree' , DecisionTreeClassifier(**rs) )
#, ('LDA' , LinearDiscriminantAnalysis() ) #, ('Extra Tree' , ExtraTreeClassifier(**rs) )
#, ('Logistic Regression' , LogisticRegression(**rs) ) #, ('Extra Trees' , ExtraTreesClassifier(**rs) )
#, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs)) #, ('Gradient Boosting' , GradientBoostingClassifier(**rs) )
#, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) ) #, ('LDA' , LinearDiscriminantAnalysis() )
#, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) ) #, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs))
#, ('Ridge Classifier' , RidgeClassifier(**rs) ) #, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) )
#, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) ) #, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) )
#, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) ) #, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) )
] #, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) )
]
for gene, drug in ml_gene_drugD.items(): for gene, drug in ml_gene_drugD.items():
print ('\nGene:', gene #print ('\nGene:', gene
, '\nDrug:', drug) # , '\nDrug:', drug)
gene_low = gene.lower() gene_low = gene.lower()
gene_dataD[gene_low] = getmldata(gene, drug gene_dataD[gene_low] = getmldata(gene, drug
, data_combined_model = False # this means it doesn't include 'gene_name' as a feauture as a single gene-target shouldn't have it. , data_combined_model = False # this means it doesn't include 'gene_name' as a feauture as a single gene-target shouldn't have it.
, use_or = False , use_or = False
, omit_all_genomic_features = False , omit_all_genomic_features = False
, write_maskfile = False , write_maskfile = False
, write_outfile = False) , write_outfile = False)
for split_type in split_types: for split_type in split_types:
for data_type in split_data_types: for data_type in split_data_types:
# unused per-split outfile # unused per-split outfile
#out_filename = outdir + gene.lower() + '_'+split_type+'_' + data_type + '.json' #out_filename = outdir + gene.lower() + '_'+split_type+'_' + data_type + '.json'
tempD=split_tts(gene_dataD[gene_low] tempD=split_tts(gene_dataD[gene_low]
, data_type = data_type , data_type = data_type
, split_type = split_type , split_type = split_type
, oversampling = True # TURN IT ON TO RUN THE OTHERS BIS , oversampling = True # TURN IT ON TO RUN THE OTHERS BIS
, dst_colname = 'dst' , dst_colname = 'dst'
, target_colname = 'dst_mode' , target_colname = 'dst_mode'
, include_gene_name = True , include_gene_name = True
) )
paramD = { paramD = {
'baseline_paramD': { 'input_df' : tempD['X'] 'baseline_paramD': { 'input_df' : tempD['X']
, 'target' : tempD['y'] , 'target' : tempD['y']
, 'var_type' : 'mixed' , 'var_type' : 'mixed'
, 'resampling_type': 'none'} , 'resampling_type': 'none'}
#, 'smnc_paramD' : { 'input_df' : tempD['X_smnc']
, 'smnc_paramD' : { 'input_df' : tempD['X_smnc'] # , 'target' : tempD['y_smnc']
, 'target' : tempD['y_smnc'] # , 'var_type' : 'mixed'
, 'var_type' : 'mixed' # , 'resampling_type' : 'smnc'}
, 'resampling_type' : 'smnc'} #, 'ros_paramD' : { 'input_df' : tempD['X_ros']
# , 'target' : tempD['y_ros']
, 'ros_paramD' : { 'input_df' : tempD['X_ros'] # , 'var_type' : 'mixed'
, 'target' : tempD['y_ros'] # , 'resampling_type' : 'ros'}
, 'var_type' : 'mixed' #, 'rus_paramD' : { 'input_df' : tempD['X_rus']
, 'resampling_type' : 'ros'} # , 'target' : tempD['y_rus']
# , 'var_type' : 'mixed'
, 'rus_paramD' : { 'input_df' : tempD['X_rus'] # , 'resampling_type' : 'rus'}
, 'target' : tempD['y_rus'] #, 'rouC_paramD' : { 'input_df' : tempD['X_rouC']
, 'var_type' : 'mixed' # , 'target' : tempD['y_rouC']
, 'resampling_type' : 'rus'} # , 'var_type' : 'mixed'
# , 'resampling_type': 'rouC'}
, 'rouC_paramD' : { 'input_df' : tempD['X_rouC']
, 'target' : tempD['y_rouC']
, 'var_type' : 'mixed'
, 'resampling_type': 'rouC'}
} }
out_fsD = {} out_fsD = {}
index = 1 index = 1
for model_name, model_fn in fs_models: for model_name, model_fn in fs_models:
print('\nRunning classifier with FS:', index print('\nRunning classifier with FS:', index
, '\nModel_name:' , model_name , '\nModel_name:' , model_name
, '\nModel func:' , model_fn) , '\nModel func:' , model_fn)
#, '\nList of models:', models) #, '\nList of models:', models)
index = index+1 index = index+1
#out_fsD[model_name] = {} #out_fsD[model_name] = {}
current_model = {} current_model = {}
for k, v in paramD.items(): for k, v in paramD.items():
out_filename = (gene.lower() + '_' + split_type + '_' + data_type + '_' + model_name + '_' + k + '.json') out_filename = gene.lower() + '_' + split_type + '_' + data_type + '_' + model_name + '_' + k + '.json'
fsD_params=paramD[k] fsD_params=paramD[k]
#out_fsD[model_name][k] = fsgs_rfecv( #out_fsD[model_name][k] = fsgs_rfecv(
thingg = foo( #current_model[k] = v
)
# NOTE: this will silently fail with a syntax error if you don't have all the necessary libraries installed.
# Python will NOT warn you of the missing lib!
current_model[k] = fsgs_rfecv( current_model[k] = fsgs_rfecv(
**fsD_params **fsD_params
, param_gridLd = [{'fs__min_features_to_select': [1]}] , param_gridLd = [{'fs__min_features_to_select': [1]}]
, blind_test_df = tempD['X_bts'] , blind_test_df = tempD['X_bts']
, blind_test_target = tempD['y_bts'] , blind_test_target = tempD['y_bts']
, estimator = model_fn , estimator = model_fn
, use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below , use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below
# NOTE: IS THIS CORRECT?!? , custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef')
, custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef') , cv_method = skf_cv
, cv_method = skf_cv )
)
with open(out_filename, 'w') as f: # write current model to disk
f.write(json.dumps(current_model) #print(current_model)
out_json = json.dumps(current_model)
with open(out_filename, 'w', encoding="utf-8") as file:
file.write(out_json)