// MathJax

Boosting Ensemble

  • AdaBoost
  • Gradient Boosting
  • XGBoost
    • [objective] reg:squarederror, reg:squaredlogerror, reg:logistic, reg:pseudohubererror, reg:absoluteerror, binary:logistic, binary:logitraw, binary:hinge, count:poisson, survival:cox, survival:aft, multi:softmax, multi:softprob, rank:pairwise, rank:ndcg, rank:map, reg:gamma, reg:tweedie
  • LightGBM
  • CatBoost
  • NGBoost

Overview

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network
#import xgboost as xgb #import lightgbm as lgb #import catboost as cb #import ngboost as ngb

# classification
X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)
classifier = ensemble.AdaBoostClassifier(estimator=tree.DecisionTreeClassifier())
classifier = ensemble.GradientBoostingClassifier()
classifier.fit(X, y)
classifier.predict(X)
classifier.predict_proba(X)

# regression
X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
regressor = ensemble.AdaBoostRegressor(estimator=tree.DecisionTreeRegressor())
regressor = ensemble.GradientBoostingRegressor()
regressor.fit(X, y)
regressor.predict(X)

 

 

Task: Classification

Validation: AdaBoostingClassifier

Validation: GradientBoostingClassifier: binary classification

더보기
# https://scikit-learn.org/stable/modules/model_evaluation.html

import joblib
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import cross_val_score, cross_validate, GridSearchCV, StratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier

X, y = make_classification(n_samples=1000, n_features=10, n_classes=2, weights=[0.6, 0.4], flip_y=0)

#binary_class_scoring = ['accuracy', 'balanced_accuracy', 'recall', 'average_precision', 'precision', 'f1', 'jaccard', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo']
#multi_class_scoring = ['accuracy', 'balanced_accuracy', 'recall_micro', 'recall_macro', 'recall_weighted', 'precision_micro', 'precision_macro', 'precision_weighted', 'f1_micro', 'f1_macro', 'f1_weighted', 'jaccard_micro', 'jaccard_macro', 'jaccard_weighted', 'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted']
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=None) # cross validation & randomness control
classifier = make_pipeline(PowerTransformer(method='yeo-johnson', standardize=True), Normalizer(), GradientBoostingClassifier())
classifier = GridSearchCV(
    estimator=classifier, cv=cv, 
    scoring=['accuracy', 'recall', 'precision', 'f1'][1], 
    param_grid={
        'powertransformer__standardize':[True, False],
        'gradientboostingclassifier__loss':['log_loss', 'deviance', 'exponential'][0:1],
        'gradientboostingclassifier__learning_rate':[.1, .2], 
        'gradientboostingclassifier__criterion':['friedman_mse', 'squared_error'][0:1],
        'gradientboostingclassifier__n_estimators':[10, 30, 50], 
        'gradientboostingclassifier__subsample':[.7,.8, 1], 
        'gradientboostingclassifier__min_samples_split':[2], 
        'gradientboostingclassifier__min_impurity_decrease':[0.0],
#        'gradientboostingclassifier__min_samples_leaf':[1], 
#        'gradientboostingclassifier__min_weight_fraction_leaf':[0.0],
#        'gradientboostingclassifier__max_depth':[3],
#        'gradientboostingclassifier__init':[None],
#        'gradientboostingclassifier__random_state':[None], 
#        'gradientboostingclassifier__max_features':[None],
#        'gradientboostingclassifier__max_leaf_nodes':[None],
#        'gradientboostingclassifier__warm_start':[False],
#        'gradientboostingclassifier__validation_fraction':[0.1], 
#        'gradientboostingclassifier__n_iter_no_change':[None],
#        'gradientboostingclassifier__tol':[0.0001],
#        'gradientboostingclassifier__ccp_alpha':[0.0],
    },
    return_train_score=True, 
)
classifier.fit(X, y) ; joblib.dump(classifier, 'classifier.joblib')
classifier = joblib.load('classifier.joblib')
classifier.cv_results_



# Evaluation
train_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('train_score') , classifier.cv_results_.items())))
train_scores = train_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=train_scores[0].to_dict())
train_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('train_score', column.replace('_train_score', '')), train_scores.columns))

test_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('test_score') , classifier.cv_results_.items())))
test_scores = test_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=test_scores[0].to_dict())
test_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('test_score', column.replace('_test_score', '')), test_scores.columns))

time_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('time') , classifier.cv_results_.items())))
time_scores = time_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=time_scores[0].to_dict())
time_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('time', column.replace('_time', '')), time_scores.columns))

scores = pd.concat([train_scores, test_scores, time_scores], axis=1)
scores.index = pd.MultiIndex.from_frame(pd.DataFrame(classifier.cv_results_['params']))
scores.sort_values(('test_score', 'rank'))

Preprocessing effect: GradientBoostingClassifier: binary classification

더보기
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.preprocessing import OneHotEncoder, Binarizer, KBinsDiscretizer, PolynomialFeatures, SplineTransformer

def scoring(classifier, X, y, preprocessor_name, task_type, random_state=None):
    from sklearn.model_selection import cross_validate, RepeatedStratifiedKFold, RepeatedKFold
    
    if task_type == 'binary':
        scoring = ['accuracy', 'balanced_accuracy', 'recall', 'average_precision', 'precision', 'f1', 'jaccard', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo']
        cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=random_state) # StratifiedKFold(n_splits=5, shuffle=False, random_state=None)  # cross validation & randomness control
    elif task_type == 'multi':
        scoring = ['accuracy', 'balanced_accuracy', 'recall_micro', 'recall_macro', 'recall_weighted', 'precision_micro', 'precision_macro', 'precision_weighted', 'f1_micro', 'f1_macro', 'f1_weighted', 'jaccard_micro', 'jaccard_macro', 'jaccard_weighted', 'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted']
        cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=random_state) # StratifiedKFold(n_splits=5, shuffle=False, random_state=None)  # cross validation & randomness control
    elif task_type == 'reg':
        scoring = ['r2', 'explained_variance', 'max_error', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error', 'neg_median_absolute_error', 'neg_mean_absolute_percentage_error']
        cv = RepeatedKFold(n_splits=5, n_repeats=3, random_state=random_state) # KFold(n_splits=5, shuffle=False, random_state=None)
    
    scores = pd.DataFrame(cross_validate(classifier, X, y, cv=cv, scoring=scoring, return_train_score=True)).mean()
    scores.name = preprocessor_name
    return scores

def scoring_summary(scores):
    # summary
    train_scores = scores[list(filter(lambda column: column.startswith('train'), scores.columns))]
    test_scores = scores[list(filter(lambda column: column.startswith('test'), scores.columns))]
    time_scores = scores[list(filter(lambda column: column.endswith('time'), scores.columns))]
    train_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('train', '_'.join(column.split('_')[1:])), train_scores.columns))
    test_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('test', '_'.join(column.split('_')[1:])), test_scores.columns))
    time_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('time', '_'.join(column.split('_')[:-1])), time_scores.columns))
    scores = pd.concat([train_scores, test_scores, time_scores], axis=1).swaplevel(0,1,axis=1)
    return scores 

random_state = None; task_type = 'binary'
X, y = make_classification(n_samples=1000, n_features=10, n_classes=2, weights=[0.9, 0.1], flip_y=0, random_state=random_state)

scores = list()
# transform of measure
params = dict(loss='log_loss', learning_rate=0.1, n_estimators=30, subsample=1.0, criterion='friedman_mse', random_state=random_state)
scores.append(scoring(GradientBoostingClassifier(**params), X, y, preprocessor_name='baseline', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), PCA(n_components=None).fit_transform(X), y, preprocessor_name='PCA', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), FactorAnalysis(n_components=None, rotation='varimax').fit_transform(X), y, preprocessor_name='FactorAnalysis', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), QuantileTransformer(output_distribution='normal').fit_transform(X), y, preprocessor_name='QuantileTransformer', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), PowerTransformer(method='yeo-johnson', standardize=True).fit_transform(X), y, preprocessor_name='PowerTransform', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), Normalizer().fit_transform(X), y, preprocessor_name='Normalizer', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), StandardScaler().fit_transform(X), y, preprocessor_name='StandardScaler', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), MinMaxScaler().fit_transform(X), y, preprocessor_name='MinMaxScaler', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), MaxAbsScaler().fit_transform(X), y, preprocessor_name='MaxAbsScaler', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), RobustScaler().fit_transform(X), y, preprocessor_name='RobustScaler', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), KBinsDiscretizer(n_bins=10, encode='ordinal').fit_transform(X), y, preprocessor_name='KBinsDiscretizer', task_type=task_type, random_state=random_state))

# transform of sigma-algebra
scores.append(scoring(GradientBoostingClassifier(**params), Binarizer(threshold=0).fit_transform(X), y, preprocessor_name='Binarizer', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), KBinsDiscretizer(n_bins=10, encode='onehot').fit_transform(X), y, preprocessor_name='OneHotEncoder', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), PolynomialFeatures(degree=2, interaction_only=False, include_bias=True).fit_transform(X), y, preprocessor_name='PolynomialFeatures', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), SplineTransformer(degree=2, n_knots=3).fit_transform(X), y, preprocessor_name='SplineTransformer', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), PolynomialFeatures(degree=2, interaction_only=False, include_bias=True).fit_transform(SplineTransformer(degree=3, n_knots=5).fit_transform(X)), y, preprocessor_name='SplineTransformer&PolynomialFeatures', task_type=task_type, random_state=random_state))
scores.append(scoring(GradientBoostingClassifier(**params), SplineTransformer(degree=3, n_knots=5).fit_transform(PolynomialFeatures(degree=2, interaction_only=False, include_bias=True).fit_transform(X)), y, preprocessor_name='PolynomialFeatures&PolynomialFeatures', task_type=task_type, random_state=random_state))
scores = pd.concat(scores, axis=1).T

# summary
scores = scoring_summary(scores)
scores[['accuracy', 'recall', 'precision', 'f1']]

Validation: XGBClassifier: binary classification

더보기
# https://scikit-learn.org/stable/modules/model_evaluation.html

import joblib
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import cross_val_score, cross_validate, GridSearchCV, StratifiedKFold
from xgboost import XGBClassifier

X, y = make_classification(n_samples=1000, n_features=10, n_classes=2, weights=[0.6, 0.4], flip_y=0)

#binary_class_scoring = ['accuracy', 'balanced_accuracy', 'recall', 'average_precision', 'precision', 'f1', 'jaccard', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo']
#multi_class_scoring = ['accuracy', 'balanced_accuracy', 'recall_micro', 'recall_macro', 'recall_weighted', 'precision_micro', 'precision_macro', 'precision_weighted', 'f1_micro', 'f1_macro', 'f1_weighted', 'jaccard_micro', 'jaccard_macro', 'jaccard_weighted', 'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted']
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=None) # cross validation & randomness control
classifier = make_pipeline(PowerTransformer(method='yeo-johnson', standardize=True), Normalizer(), XGBClassifier(objective='binary:logistic'))
classifier = GridSearchCV(
    estimator=classifier, cv=cv, 
    scoring=['accuracy', 'recall', 'precision', 'f1'][1], 
    param_grid={
        'powertransformer__standardize':[True, False],
        'normalizer__norm':['l1', 'l2', 'max'],
        'xgbclassifier__n_estimators': [10, 50],
#        'xgbclassifier__grow_policy': ['depthwise', 'lossguide'],
#        'xgbclassifier__reg_alpha': [0, .1],
#        'xgbclassifier__reg_lambda': [0, .1],        
    },
    return_train_score=True, 
)
classifier.fit(X, y) ; joblib.dump(classifier, 'classifier.joblib')
classifier = joblib.load('classifier.joblib')
classifier.cv_results_



# Evaluation
train_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('train_score') , classifier.cv_results_.items())))
train_scores = train_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=train_scores[0].to_dict())
train_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('train_score', column.replace('_train_score', '')), train_scores.columns))

test_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('test_score') , classifier.cv_results_.items())))
test_scores = test_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=test_scores[0].to_dict())
test_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('test_score', column.replace('_test_score', '')), test_scores.columns))

time_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('time') , classifier.cv_results_.items())))
time_scores = time_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=time_scores[0].to_dict())
time_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('time', column.replace('_time', '')), time_scores.columns))

scores = pd.concat([train_scores, test_scores, time_scores], axis=1)
scores.index = pd.MultiIndex.from_frame(pd.DataFrame(classifier.cv_results_['params']))
scores.sort_values(('test_score', 'rank'))

 

 

 

Task: Regression

Validation: AdaBoostingRegressor

Validation: GradientBoostingRegressor

더보기
# https://scikit-learn.org/stable/modules/model_evaluation.html

import joblib
import pandas as pd
from sklearn.datasets import make_regression
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import cross_val_score, cross_validate, GridSearchCV, KFold
from sklearn.ensemble import GradientBoostingRegressor

X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
cv = KFold(n_splits=10, shuffle=False, random_state=None)

regressor = make_pipeline(PowerTransformer(method='yeo-johnson', standardize=True), Normalizer(), GradientBoostingRegressor())
regressor = GridSearchCV(
    estimator=regressor, cv=cv, 
    scoring=['r2', 'explained_variance', 'max_error', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error', 'neg_median_absolute_error', 'neg_mean_absolute_percentage_error'][1],    
    param_grid={
        'gradientboostingregressor__loss':['squared_error', 'absolute_error', 'huber', 'quantile'],
        'gradientboostingregressor__learning_rate':[.1, .2], 
#        'gradientboostingregressor__n_estimators':[10, 50, 100], 
        'gradientboostingregressor__subsample':[1.0], 
        'gradientboostingregressor__criterion':['friedman_mse', 'squared_error'], 
#        'gradientboostingregressor__min_samples_split':[2], 
#        'gradientboostingregressor__min_samples_leaf':[1], 
#        'gradientboostingregressor__min_weight_fraction_leaf':[0.0], 
#        'gradientboostingregressor__max_depth':[3], 
#        'gradientboostingregressor__min_impurity_decrease':[0.0], 
#        'gradientboostingregressor__init':[None], 
#        'gradientboostingregressor__random_state':[None], 
#        'gradientboostingregressor__max_features':[None], 
#        'gradientboostingregressor__alpha':[0.9], 
#        'gradientboostingregressor__max_leaf_nodes':[None], 
#        'gradientboostingregressor__warm_start':[False], 
#        'gradientboostingregressor__validation_fraction':[0.1], 
#        'gradientboostingregressor__n_iter_no_change':[None], 
#        'gradientboostingregressor__tol':[0.0001], 
#        'gradientboostingregressor__ccp_alpha':[0.0]        
    }, 
    return_train_score=True)
regressor.fit(X, y); joblib.dump(regressor, 'regressor.joblib')
regressor = joblib.load('regressor.joblib')

# Evaluation
train_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('train_score') , regressor.cv_results_.items())))
train_scores = train_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=train_scores[0].to_dict())
train_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('train_score', column.replace('_train_score', '')), train_scores.columns))

test_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('test_score') , regressor.cv_results_.items())))
test_scores = test_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=test_scores[0].to_dict())
test_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('test_score', column.replace('_test_score', '')), test_scores.columns))

time_scores = pd.DataFrame(list(filter(lambda score: score[0].endswith('time') , regressor.cv_results_.items())))
time_scores = time_scores[1].apply(lambda x: pd.Series(x)).T.rename(columns=time_scores[0].to_dict())
time_scores.columns = pd.MultiIndex.from_tuples(map(lambda column: ('time', column.replace('_time', '')), time_scores.columns))

scores = pd.concat([train_scores, test_scores, time_scores], axis=1)
scores.index = pd.MultiIndex.from_frame(pd.DataFrame(regressor.cv_results_['params']))
scores.sort_values(('test_score', 'rank'))

 

 

 

 


Bagging Ensemble

Overview

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network

# classification
X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)
classifier = ensemble.BaggingClassifier(estimator=tree.DecisionTreeClassifier())
classifier.fit(X, y)
classifier.predict(X)
classifier.predict_proba(X)

# regression
X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
regressor = ensemble.BaggingRegressor(estimator=tree.DecisionTreeRegressor())
regressor.fit(X, y)
regressor.predict(X)

 

 

Task: Classification

Validation: BaggingClassifier

 

 

 

Task: Regression

Validation: BaggingRegressor

 

 

 


Voting Ensemble

Task: Classification

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network

#X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)

estimators = [('Base1', tree.DecisionTreeClassifier()),
              ('Base2', ensemble.RandomForestClassifier()),
              ('Base3', discriminant_analysis.LinearDiscriminantAnalysis())]
classifier = ensemble.VotingClassifier(estimators=estimators, voting='soft', weights=[2,1,1])
classifier.fit(X, y)
classifier.predict(X)
classifier.predict_proba(X)

Validation: VotingClassifier

 

 

 

Task: Regression

Overview

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network

#X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)
X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)

estimators = [('Base1', tree.DecisionTreeRegressor()),
              ('Base2', ensemble.RandomForestRegressor()),
              ('Base3', svm.SVR(kernel='rbf', max_iter=-1))]
regressor = ensemble.VotingRegressor(estimators=estimators, weights=[2,1,1])
regressor.fit(X, y)
regressor.predict(X)

Validation: VotingRegressor

 

 


Stacking Ensemble

Task: Classification

Overview

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network

#X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)

estimators = [('Base1', tree.DecisionTreeClassifier()),
              ('Base2', ensemble.RandomForestClassifier()),
              ('Base3', discriminant_analysis.LinearDiscriminantAnalysis())]
meta_classifier = svm.SVC(kernel='poly', probability=True, max_iter=-1)
classifier = ensemble.StackingClassifier(estimators=estimators, final_estimator=meta_classifier)
classifier.fit(X, y)
classifier.predict(X)
classifier.predict_proba(X)
import pandas as pd
from sklearn.datasets import make_classification, make_regression
from sklearn.preprocessing import Binarizer, KBinsDiscretizer, OrdinalEncoder, LabelBinarizer, LabelEncoder, OneHotEncoder
from sklearn.preprocessing import FunctionTransformer, PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network
from sklearn.pipeline import FeatureUnion, make_union, Pipeline, make_pipeline 
from sklearn.model_selection import cross_validate, RepeatedStratifiedKFold

X, y = make_classification(n_samples=3000, n_features=10, n_classes=2, weights=[0.6, 0.4], flip_y=0, random_state=None)

estimators = [('BernoulliNB', make_pipeline(StandardScaler(), Binarizer(), naive_bayes.BernoulliNB())),
              ('MultinomialNB', make_pipeline(PowerTransformer(method='yeo-johnson', standardize=True), KBinsDiscretizer(n_bins=[3]*X.shape[1], encode='ordinal'), OneHotEncoder(min_frequency=10, max_categories=5, drop='if_binary', sparse_output=False), naive_bayes.MultinomialNB())),
              ('GaussianNB', make_pipeline(QuantileTransformer(output_distribution='normal'), PCA(n_components=None), naive_bayes.GaussianNB())),
              ('LinearDiscriminantAnalysis', make_pipeline(QuantileTransformer(output_distribution='normal'), discriminant_analysis.LinearDiscriminantAnalysis())),              
              ]
meta_classifier = svm.SVC(kernel='rbf', probability=True, max_iter=-1)
classifier = ensemble.StackingClassifier(estimators=estimators, final_estimator=meta_classifier)
classifier.fit(X, y)

cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=None) # StratifiedKFold(n_splits=5, shuffle=False, random_state=None)  # cross validation & randomness control
scores = pd.DataFrame(cross_validate(classifier, X, y, cv=cv, scoring=['accuracy', 'recall', 'precision', 'f1'], return_train_score=True)).mean()
scores

Validation: StackingClassifier

 

 

 

Task: Regression

Overview

from sklearn.datasets import make_classification, make_regression
from sklearn import linear_model, ensemble, naive_bayes, tree, neighbors, discriminant_analysis, svm, neural_network

#X, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_clusters_per_class=1, weights=[.6, .3, .1], flip_y=0)
X, y = make_regression(n_samples=3000, n_features=10, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)

estimators = [('Base1', tree.DecisionTreeRegressor()),
              ('Base2', ensemble.RandomForestRegressor()),
              ('Base3', svm.SVR(kernel='rbf', max_iter=-1))]
meta_regressor = linear_model.LinearRegression()
regressor = ensemble.StackingRegressor(estimators=estimators, final_estimator=meta_regressor)
regressor.fit(X, y)
regressor.predict(X)

Validation: StackingRegressor

 

 

 


Reference

'artificial intelligence > machine learning' 카테고리의 다른 글

SVM Model  (0) 2023.05.09
K-Nearest and Radius Neighbor Model  (0) 2023.05.09
Decision Tree, Random Forest and Extra Tree  (0) 2023.05.09
Naive Bayes Classifier and Discriminant Model  (0) 2023.05.09
Linear Model  (0) 2023.05.09

+ Recent posts