diff --git a/MultClassPipe.py b/MultClassPipe.py index 592c193..217bbe9 100644 --- a/MultClassPipe.py +++ b/MultClassPipe.py @@ -23,6 +23,7 @@ from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, f1_score #%% rs = {'random_state': 42} +# TODO: add preprocessing step with one hot encoder # Multiple Classification - Model Pipeline def MultClassPipeline(X_train, X_test, y_train, y_test): @@ -35,6 +36,15 @@ def MultClassPipeline(X_train, X_test, y_train, y_test): dt = DecisionTreeClassifier(**rs) et = ExtraTreesClassifier(**rs) rf = RandomForestClassifier(**rs) + rf2 = RandomForestClassifier( + min_samples_leaf=50, + n_estimators=150, + bootstrap=True, + oob_score=True, + n_jobs=-1, + random_state=42, + max_features='auto') + xgb = XGBClassifier(**rs, verbosity=0) clfs = [ @@ -46,6 +56,7 @@ def MultClassPipeline(X_train, X_test, y_train, y_test): ('Decision Tree', dt), ('Extra Trees', et), ('Random Forest', rf), + ('Random Forest2', rf2), ('XGBoost', xgb) ] diff --git a/__pycache__/MultClassPipe.cpython-37.pyc b/__pycache__/MultClassPipe.cpython-37.pyc index 3e4d465..2156ad9 100644 Binary files a/__pycache__/MultClassPipe.cpython-37.pyc and b/__pycache__/MultClassPipe.cpython-37.pyc differ diff --git a/my_data9.py b/my_data9.py index 6265051..314ed91 100644 --- a/my_data9.py +++ b/my_data9.py @@ -7,7 +7,12 @@ Created on Sat Mar 5 12:57:32 2022 """ #%% # data, etc for now comes from my_data6.py and/or my_data5.py +#%% +homedir = os.path.expanduser("~") +os.chdir(homedir + "/git/ML_AI_training/") +# my function +from MultClassPipe2 import MultClassPipeline2 #%% try combinations #import sys, os #os.system("imports.py") @@ -45,11 +50,19 @@ X_train, X_test, y_train, y_test = train_test_split(all_features_df, preprocessor = ColumnTransformer( transformers=[ ('num', MinMaxScaler() , numerical_features_names) - #,('cat', OneHotEncoder(), categorical_features_names) - ]) + ,('cat', OneHotEncoder(), categorical_features_names) + ], remainder = 'passthrough') + +f = preprocessor.fit(numerical_features_df) +f2 = preprocessor.transform(numerical_features_df) + +f3 = preprocessor.fit_transform(numerical_features_df) +(f3==f2).all() + +f4 = preprocessor.fit_transform(all_features_df) +f4 +reprocessor.fit_transform(numerical_features_df) -preprocessor.fit(numerical_features_df) -preprocessor.transform(numerical_features_df) #%% model_log = Pipeline(steps = [ ('preprocess', preprocessor) @@ -90,21 +103,30 @@ output = cross_validate(model, X_trainN, y_trainN , cv = 10 , return_train_score = False) pd.DataFrame(output).mean() +#%% Run multiple models using MultClassPipeline +# only good for numerical features as categ features is not supported yet! +t1_res = MultClassPipeline2(X_trainN, X_testN, y_trainN, y_testN, input_df = all_features_df) +t1_res - +t2_res = MultClassPipeline2(X_train, X_test, y_train, y_test, input_df = all_features_df) +t2_res #%% +# https://machinelearningmastery.com/columntransformer-for-numerical-and-categorical-data/ +#Each transformer is a three-element tuple that defines the name of the transformer, the transform to apply, and the column indices to apply it to. For example: +# (Name, Object, Columns) -selector_logistic = RFECV(estimator = model - , cv = 10 - , step = 1) +# Determine categorical and numerical features +numerical_ix = all_features_df.select_dtypes(include=['int64', 'float64']).columns +numerical_ix +categorical_ix = all_features_df.select_dtypes(include=['object', 'bool']).columns +categorical_ix -X_trainN, X_testN, y_trainN, y_testN = train_test_split(numerical_features_df - , target1 - , test_size = 0.33 - , random_state = 42) +# Define the data preparation for the columns +t = [('cat', OneHotEncoder(), categorical_ix) + , ('num', MinMaxScaler(), numerical_ix)] +col_transform = ColumnTransformer(transformers=t + , remainder='passthrough') +# create pipeline (unlike example above where the col transfer was a preprocess step and it was fit_transformed) -selector_logistic_xtrain = selector_logistic.fit_transform(X_trainN, y_trainN) -print(sel_rfe_logistic.get_support()) -X_trainN.columns - -print(sel_rfe_logistic.ranking_) \ No newline at end of file +pipeline = Pipeline(steps=[('prep', col_transform) + , ('classifier', clf)]) \ No newline at end of file diff --git a/p_jr_d1.py b/p_jr_d1.py index 602681d..add37e2 100644 --- a/p_jr_d1.py +++ b/p_jr_d1.py @@ -351,6 +351,7 @@ pred # make a pipeline # PCA(Dimension reduction to two) -> Scaling the data -> DecisionTreeClassification +#https://www.geeksforgeeks.org/pipelines-python-and-scikit-learn/ pipe1 = Pipeline([('pca', PCA(n_components = 2)) , ('std', StandardScaler()) , ('decision_tree', DecisionTreeClassifier())]