#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 16 16:55:06 2022 @author: tanu """ # https://stackoverflow.com/questions/57248072/gridsearchcv-gives-different-result # https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result #https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn #https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn # If you only want accuracy, then you can simply use cross_val_score() # kf = KFold(n_splits=10) # clf_tree=DecisionTreeClassifier() # scores = cross_val_score(clf_tree, X, y, cv=kf) # avg_score = np.mean(score_array) # print(avg_score) # Here cross_val_score will take as input your original X and y (without splitting into train and test). cross_val_score will automatically split them into train and test, fit the model on train data and score on test data. And those scores will be returned in the scores variable. # So when you have 10 folds, 10 scores will be returned in scores variable. You can then just take an average of that. mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)} scoring_refit_recall = {'scoring': 'recall' ,'refit': 'recall'} scoring_refit_recall = {'scoring': 'precision' ,'refit': 'precision'} scoring_refit_mcc = {'scoring': mcc_score_fn ,'refit': 'mcc'} #n_jobs = 10 # my desktop has 12 cores #cv = {'cv': 10}#%% njobs = {'n_jobs': 10} skf_cv = StratifiedKFold(n_splits = 10, shuffle = True) #%% GSCV: RandomForest gs_rf = GridSearchCV(estimator=RandomForestClassifier(n_jobs=-1, oob_score = True #,class_weight = {1: 10/11, 0: 1/11} ) , param_grid=[{'max_depth': [4, 6, 8, 10, None] , 'max_features': ['auto', 'sqrt'] , 'min_samples_leaf': [2, 4, 8] , 'min_samples_split': [10, 20]}] , cv = skf_cv , **njobs , **scoring_refit_recall #, **scoring_refit_mcc #, scoring = scoring_fn, refit = False ) #gs_rf.fit(X_train, y_train) #gs_rf_fit = gs_rf.fit(X_train y_train) gs_rf.fit(X, y) gs_rf_fit = gs_rf.fit(X, y) gs_rf_res = gs_rf_fit.cv_results_ print('Best model:\n', gs_rf.best_params_) print('Best models score:\n', gs_rf.best_score_) print('Check mean models score:\n', mean(gs_rf_res['mean_test_score'])) #%% Proof of concept: manual inspection to see how best score is calcualted! # SATISFIED! # Best_model example: recall, Best model's score: 0.8059288537549408 # {'max_depth': 4, 'max_features': 'sqrt', 'min_samples_leaf': 2, 'min_samples_split': 10} # Best model example: mcc, Best models score: 0.42504894661702863 # {'max_depth': 4, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 20} # Best model example: precision, Best models score: 0.7144745254745255 # {'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10} best_model = [{'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10 }] gs_results_df = pd.DataFrame(gs_rf_res) gs_results_df.shape gs_best_df = gs_results_df.loc[gs_results_df['params'].isin(best_model)] gs_best_df.shape gs_best_df_test = gs_best_df.filter(like = 'test_', axis = 1) gs_best_df_test.shape gs_best_df_test_recall = gs_best_df_test.filter(like = '_score', axis = 1) gs_best_df_test_recall.shape f = gs_best_df_test_recall.filter(like='split', axis = 1) f.shape #gs_best_df_test_mcc = gs_best_df_test.filter(like = '_mcc', axis = 1) #f = gs_best_df_test_mcc.filter(like='split', axis = 1) f.iloc[:,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].mean(axis = 1) # recall: 0.801186 vs 0.8059288537549408 # mcc: 0.425049 vs 0.42504894661702863 # precision: 0.714475 vs 0.7144745254745255 #%% #%% Check the scores: print([(len(train), len(test)) for train, test in skf_cv.split(X, y)]) gs_rf_fit.cv_results_ #its the weighted average!? #%%