// MathJax

Information Quantitation

Model-based Information

Variance Inflation Factor (continous variable;explanatory)

import numpy as np
import pandas as pd
from sklearn.datasets import make_regression, make_classification
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor

X, y, coef = make_regression(n_samples=300, n_features=5, n_informative=2, n_targets=1, bias=100, effective_rank=None, tail_strength=0.5, noise=50.0, shuffle=True, coef=True, random_state=np.random.randint(1000))
X, y = make_classification(n_samples=300, n_features=5, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, weights=[.1, 9], n_clusters_per_class=1, random_state=np.random.randint(1000))

data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda x: 'X'+str(x), range(1,6))) + ['Y'])
vif = pd.Series(map(lambda i: variance_inflation_factor(X, i), range(X.shape[1])))
vif

Information Values (discrete variable & binary variable;target)

Information Value Variable Predictiveness
Less than 0.02 Not useful for prediction
0.02 to 0.1 Weak Predictive Power
0.1 to 0.3 Medium Predictive Power
0.3 to 0.5 Strong Predictive Power
> 0.5 Suspicous Predictive Power
import numpy as np
from scipy import stats
import pandas as pd

def information_values(targets, data):
    data = data.copy()
    data['criterion'] = np.random.normal(size=data.shape[0])
    
    def variable_information_values(targets, column, data):
        WOE_IV = pd.crosstab(index=[data[column]], columns=[data[targets['column']]], margins=False)
        targets['non-target-instance'] = pd.Index(data[targets['column']].unique()).difference([targets['instance']])[0]
        
        WOE_IV['EventRatio'] = WOE_IV[targets['instance']].apply(lambda x: np.maximum(x, 0.5)/WOE_IV[targets['instance']].sum())
        WOE_IV['NonEventRatio'] = WOE_IV[targets['non-target-instance']].apply(lambda x: np.maximum(x, 0.5)/WOE_IV[targets['non-target-instance']].sum())
        WOE_IV['WOE'] = (WOE_IV['NonEventRatio'] / WOE_IV['EventRatio']).apply(np.log)
        WOE_IV['IV'] = (WOE_IV['NonEventRatio'] - WOE_IV['EventRatio']) * WOE_IV['WOE']
        WOE_IV['RANK'] = WOE_IV['IV'].rank(ascending=False)
        #WOE_IV['IV'].plot(kind='barh', figsize=(30,5))
        return WOE_IV

    iv_summary = pd.DataFrame(
        data=dict(map(lambda column: (column, [variable_information_values(targets, column, data)['IV'].sum(), variable_information_values(targets, column, data)['IV'].mean()]), data.columns.difference([targets['column']]))),
        index=['IV_SUM', 'IV_MEAN']
    ).T
    iv_summary['SUM_RANK'] = iv_summary['IV_SUM'].rank(ascending=False)
    iv_summary['MEAN_RANK'] = iv_summary['IV_MEAN'].rank(ascending=False)
    return iv_summary

size = 1000
data = pd.DataFrame(columns=['X0', 'X1', 'X2', 'Y'],
    data = np.c_[
    stats.poisson.rvs(mu=5, size=size),
    stats.poisson.rvs(mu=10, size=size),
    stats.poisson.rvs(mu=30, size=size),
    stats.bernoulli.rvs(p=0.3, size=size),    
])

information_values(targets=dict(column='Y', instance=1), data=data)

 

 

 

Feature importance and coefficient by model-specific information

discrete variable;target

import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.tree import DecisionTreeClassifier     # feature selector estimator: feature importances
from sklearn.linear_model import LogisticRegression # feature selector estimator: coefficient

X, y = make_classification(n_samples=3000, n_features=20, n_classes=2, weights=[0.6, 0.4], flip_y=0)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

feature_importance = DecisionTreeClassifier().fit(X, y).feature_importances_.squeeze()
coefficient = LogisticRegression().fit(X, y).coef_.squeeze()

feature_information = pd.DataFrame(np.c_[feature_importance, coefficient], columns=['feature_importance', 'coefficient'], index=data.columns[data.columns != 'y'])
feature_information
import numpy as np
import pandas as pd
import seaborn as sns
from copy import deepcopy
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score, cross_validate, GridSearchCV, StratifiedKFold, RepeatedStratifiedKFold
from sklearn.pipeline import FeatureUnion, make_union, Pipeline, make_pipeline 
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression

X, y = make_classification(n_samples=3000, n_features=30, n_classes=2, weights=[0.6, 0.4], flip_y=0)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

pipelines = dict()
pipelines['feature_importance'] = make_pipeline(
    DecisionTreeClassifier()
) 
pipelines['coefficient'] = make_pipeline(
    LogisticRegression()
) 

#binary_class_scoring = ['accuracy', 'balanced_accuracy', 'recall', 'average_precision', 'precision', 'f1', 'jaccard', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo']
#multi_class_scoring = ['accuracy', 'balanced_accuracy', 'recall_micro', 'recall_macro', 'recall_weighted', 'precision_micro', 'precision_macro', 'precision_weighted', 'f1_micro', 'f1_macro', 'f1_weighted', 'jaccard_micro', 'jaccard_macro', 'jaccard_weighted', 'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted']
cv_results = dict()
cross_validation = RepeatedStratifiedKFold(n_splits=5, n_repeats=3,random_state=None) # StratifiedKFold(n_splits=5, shuffle=False, random_state=None)
for name, pipeline in deepcopy(pipelines).items():
    cv_results[name] = cross_validate(pipeline, X, y, scoring=['accuracy', 'recall', 'precision', 'f1'], cv=cross_validation, return_train_score=True, fit_params=None, return_estimator=False, n_jobs=-1, verbose=0)    
    pipeline.fit(X, y)
    pipelines.update({name:pipeline})
    
feature_importance = pipelines['feature_importance']['decisiontreeclassifier'].feature_importances_.squeeze()    
coefficient = pipelines['coefficient']['logisticregression'].coef_.squeeze()    



# visualization: feature importance and coefficient
feature_imformations = pd.DataFrame(np.c_[feature_importance, coefficient], columns=['feature_importance', 'coefficient'], index=data.columns[data.columns != 'y'].values)
feature_imformations = feature_imformations.stack(0).reset_index().rename(columns={'level_0':'feature_name', 'level_1':'feature_information_type', 0:'score'})

g = sns.FacetGrid(feature_imformations, col="feature_information_type",  row=None, aspect=4, height=4) 
g.map_dataframe(sns.barplot, x="score", y="feature_name", orient='h') # x: numerical variable, y: numerical variable, hue: categorical variable
g.fig.suptitle('Feature Information')
g.axes[0][0].set_title('Feature Importance')
g.axes[0][1].set_title('Coefficient')
g.add_legend()
g.tight_layout()



# visualization: cross-validation results    
cv_results = pd.DataFrame(cv_results)
cv_1st_normalization = list()
for name in cv_results.keys():
    cv_result = cv_results[name].apply(lambda x: pd.Series(x)).stack(0).to_frame().reset_index().rename(columns={0:'score', 'level_0':'score_name', 'level_1':'fold'})
    cv_result['scenario'] = name
    cv_1st_normalization.append(cv_result)

cv_result = pd.concat(cv_1st_normalization, axis=0).reset_index(drop=True)
cv_result['domain'] = cv_result['score_name'].apply(lambda x: 'target' if x.startswith('train') or x.startswith('test') else 'nontarget')
cv_result = cv_result.loc[lambda x: x['domain'] == 'target'].copy().reset_index(drop=True) 
cv_result['domain'] = cv_result['score_name'].apply(lambda x: 'train' if x.startswith('train') else 'test')
cv_result['scoring_type'] = cv_result['score_name'].apply(lambda x: '_'.join(x.split('_')[1:]))

sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks
g = sns.FacetGrid(cv_result, col="scoring_type",  row="domain", aspect=2, height=3) 
g.map_dataframe(sns.boxplot, x="scenario", y="score", hue='fold') # x: numerical variable, y: numerical variable, hue: categorical variable
g.fig.suptitle('Scenario Evaluation')
g.add_legend()
g.tight_layout()

continous variable;target

import numpy as np
import pandas as pd
from sklearn.datasets import make_regression
from sklearn.tree import DecisionTreeRegressor     # feature selector estimator: feature importances
from sklearn.linear_model import LinearRegression # feature selector estimator: coefficient

X, y = make_regression(n_samples=3000, n_features=20, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

feature_importance = DecisionTreeRegressor().fit(X, y).feature_importances_.squeeze()
coefficient = LinearRegression().fit(X, y).coef_.squeeze()

feature_information = pd.DataFrame(np.c_[feature_importance, coefficient], columns=['feature_importance', 'coefficient'], index=data.columns[data.columns != 'y'])
feature_information
import numpy as np
import pandas as pd
import seaborn as sns
from copy import deepcopy
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score, cross_validate, KFold, RepeatedKFold
from sklearn.pipeline import FeatureUnion, make_union, Pipeline, make_pipeline 
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression

X, y = make_regression(n_samples=3000, n_features=20, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

pipelines = dict()
pipelines['feature_importance'] = make_pipeline(
    DecisionTreeRegressor()
) 
pipelines['coefficient'] = make_pipeline(
    LinearRegression()
) 

# regression_scoring: ['r2', 'explained_variance', 'max_error', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error', 'neg_median_absolute_error', 'neg_mean_absolute_percentage_error']
cv_results = dict()
cross_validation = RepeatedKFold(n_splits=10, n_repeats=3, random_state=None) # KFold(n_splits=10, shuffle=False, random_state=None)
for name, pipeline in deepcopy(pipelines).items():
    cv_results[name] = cross_validate(pipeline, X, y, scoring=['r2', 'explained_variance'], cv=cross_validation, return_train_score=True, fit_params=None, return_estimator=False, n_jobs=-1, verbose=0)    
    pipeline.fit(X, y)
    pipelines.update({name:pipeline})
    
feature_importance = pipelines['feature_importance']['decisiontreeregressor'].feature_importances_.squeeze()    
coefficient = pipelines['coefficient']['linearregression'].coef_.squeeze()    

# visualization: feature importance and coefficient
feature_imformations = pd.DataFrame(np.c_[feature_importance, coefficient], columns=['feature_importance', 'coefficient'], index=data.columns[data.columns != 'y'].values)
feature_imformations = feature_imformations.stack(0).reset_index().rename(columns={'level_0':'feature_name', 'level_1':'feature_information_type', 0:'score'})

g = sns.FacetGrid(feature_imformations, col="feature_information_type",  row=None, aspect=4, height=4) 
g.map_dataframe(sns.barplot, x="score", y="feature_name", orient='h') # x: numerical variable, y: numerical variable, hue: categorical variable
g.fig.suptitle('Feature Information')
g.axes[0][0].set_title('Feature Importance')
g.axes[0][1].set_title('Coefficient')
g.add_legend()
g.tight_layout()

# visualization: cross-validation results    
cv_results = pd.DataFrame(cv_results)
cv_1st_normalization = list()
for name in cv_results.keys():
    cv_result = cv_results[name].apply(lambda x: pd.Series(x)).stack(0).to_frame().reset_index().rename(columns={0:'score', 'level_0':'score_name', 'level_1':'fold'})
    cv_result['scenario'] = name
    cv_1st_normalization.append(cv_result)

cv_result = pd.concat(cv_1st_normalization, axis=0).reset_index(drop=True)
cv_result['domain'] = cv_result['score_name'].apply(lambda x: 'target' if x.startswith('train') or x.startswith('test') else 'nontarget')
cv_result = cv_result.loc[lambda x: x['domain'] == 'target'].copy().reset_index(drop=True) 
cv_result['domain'] = cv_result['score_name'].apply(lambda x: 'train' if x.startswith('train') else 'test')
cv_result['scoring_type'] = cv_result['score_name'].apply(lambda x: '_'.join(x.split('_')[1:]))

sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks
g = sns.FacetGrid(cv_result, col="scoring_type",  row="domain", aspect=4, height=4) 
g.map_dataframe(sns.boxplot, x="scenario", y="score", hue='fold') # x: numerical variable, y: numerical variable, hue: categorical variable
g.fig.suptitle('Scenario Evaluation')
g.add_legend()
g.tight_layout()

 

 

 

 

Permutation importance by model-agnostic information

discrete variable;target

import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.inspection import permutation_importance # model-agnostic
from sklearn.svm import SVC

X, y = make_classification(n_samples=3000, n_features=20, n_classes=2, weights=[0.6, 0.4], flip_y=0)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

regressor = SVC().fit(X,y)
importance = permutation_importance(regressor, X, y, scoring='accuracy', n_repeats=3).importances_mean
importance = pd.DataFrame(importance, columns=['permutation_importance'], index=data.columns[data.columns != 'y'])
importance

continous variable;target

import numpy as np
import pandas as pd
from sklearn.datasets import make_regression
from sklearn.inspection import permutation_importance # model-agnostic
from sklearn.svm import SVR

X, y = make_regression(n_samples=3000, n_features=20, n_informative=5, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None)
data = pd.DataFrame(data=np.c_[X, y], columns=list(map(lambda column: 'X'+ str(column), range(X.shape[1]))) + ['y'])
X = data.loc[:, data.columns != 'y'].values
y = data.loc[:, data.columns == 'y'].values.ravel()

regressor = SVR().fit(X,y)
importance = permutation_importance(regressor, X, y, scoring='neg_mean_squared_error', n_repeats=3).importances_mean
importance = pd.DataFrame(importance, columns=['permutation_importance'], index=data.columns[data.columns != 'y'])
importance

 

 

 

Statistic-based Infromation

Variance threshold (binary variables)

import numpy as np
import pandas as pd
from scipy import stats

size = 100
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2.0/3, size=size),
     stats.bernoulli.rvs(p=2.5/3, size=size),
     stats.bernoulli.rvs(p=2.7/3, size=size),
], columns=['Binary1', 'Binary2', 'Binary3'])
data.var()

from sklearn.feature_selection import VarianceThreshold
selector = VarianceThreshold(threshold=(.7 * (1 - .7))).fit(data) # Threshold: Var[X] = p*(1-p)  # select binary variables being close to p=1/2  (symmetry)
selector.variances_, selector.threshold

 

 

 

Chi-squares statistics (positive continuous variables & discrete variable;target)

import numpy as np
import pandas as pd
from scipy import stats, special
from sklearn.feature_selection import SelectKBest, chi2

size = 100
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     np.random.normal(0,1, size=size) + 3
], columns=['Binary', 'Discrete_B', 'Discrete_P', 'Category_1', 'Category_2', 'Category_3', 'Continuous'])

probability = data['Discrete_B'].value_counts()/data['Discrete_B'].count()
expected = data['Continuous'].sum()*probability
observed = data.groupby('Discrete_B')['Continuous'].sum()
chi2 = ((observed - expected)**2 / expected).sum()
p = special.chdtrc(observed.shape[0] - 1, chi2)
chi2, p

selector = SelectKBest(chi2, k=1).fit(data[['Continuous']], data['Discrete_B'])
selector.scores_, selector.pvalues_

 

 

 

F-statistics (continuous variables & discrete variable;target)

import numpy as np
import pandas as pd
from scipy import stats
from sklearn.feature_selection import SelectKBest, f_classif

size = 100
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     np.random.normal(0,1, size=size) + 3
], columns=['Binary', 'Discrete_B', 'Discrete_P', 'Category_1', 'Category_2', 'Category_3', 'Continuous'])

n = data.shape[0]
a = data['Discrete_P'].unique().size
msb = data.groupby('Discrete_P')['Continuous'].apply(lambda x: x.size*(x.mean() - data['Continuous'].mean())**2).sum() / (a-1)
msw = data.groupby('Discrete_P')['Continuous'].apply(lambda x: ((x - x.mean())**2).sum()).sum() / ((n-a)) 
f = msb/msw
p = 1 - stats.f.cdf(f, a-1, n-a)

selector = SelectKBest(f_classif, k=1).fit(data[['Continuous']], data['Discrete_P'])
selector.scores_, selector.pvalues_

 

 

 

R-squared & F-statistics (continous variables & continous variable;target)

import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
from sklearn.feature_selection import SelectKBest, r_regression, f_regression

size = 100
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     np.random.normal(0,1, size=size) + 3,
     np.random.normal(0,1, size=size) 
], columns=['Binary', 'Discrete_B', 'Discrete_P', 'Category_1', 'Category_2', 'Category_3', 'Continuous1', 'Continuous2'])

X = np.c_[np.ones((data.shape[0], 1)), data['Continuous1'].values]
y = data['Continuous2'].values.squeeze()
model = sm.OLS(y, X).fit()
r = np.sqrt(model.rsquared)
f = model.fvalue
p = model.f_pvalue

selector = SelectKBest(r_regression, k=1).fit(data[['Continuous1']], data['Continuous2'])
selector.scores_
selector = SelectKBest(f_regression, k=1).fit(data[['Continuous1']], data['Continuous2'])
selector.scores_, selector.pvalues_

 

 

 

Mutual Information (discrete variables & discrete variable;target)

import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
from sklearn.metrics import mutual_info_score, adjusted_mutual_info_score, normalized_mutual_info_score

size = 100
data = pd.DataFrame(np.c_[
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),    
     stats.multinomial.rvs(n=5, p=[.5, .5], size=size),
], columns=['Descrite_B', 'Descrite_P', 'Category_1', 'Category_2'])
data.loc[data.index[data['Category_1'] == 1].to_series().sample(frac=.8).index, ['Category_2']] = 1

mutual_info_score(data['Category_1'], data['Category_2'])
adjusted_mutual_info_score(data['Category_2'], data['Category_2'])
normalized_mutual_info_score(data['Category_1'], data['Category_2'])

 

 

 


Data Visualization

RC params

import matplotlib as mpl

mpl.rc('xtick', labelsize=20) 
mpl.rc('ytick', labelsize=20)
import matplotlib.pyplot as plt

SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12

plt.rc('font', size=SMALL_SIZE)          # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE)     # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE)    # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE)    # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE)    # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE)    # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title

plt.cla()   # Clear axis
plt.clf()   # Clear figure
plt.close() # Close a figure window
# Korean Font D/U
# - Download1 : sudo apt-get install -y fonts-nanum fonts-nanum-coding fonts-nanum-extra
# - Downlaod2 : https://hangeul.naver.com/font
# - Upload : site-packages/matplotlib/mpl-data/fonts/ttf
# - Check : head ~/.cache/matplotlib/fontList.json

import matplotlib as mpl

mpl.matplotlib_fname()
mpl.get_cachedir()
mpl.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
mpl.font_manager.get_font_names()


# Korean Font Setup
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'NanumBarunGothic'"""

Basic Options

figure

import numpy as np
import matplotlib.pyplot as plt

xdata = np.arange(1000)
ydata = np.random.normal(size=1000).cumsum()

fig = plt.figure()
axes = fig.add_subplot(1,1,1)
lines = axes.plot(xdata, ydata)

plt.gcf() # id(fig), id(plt.gcf())
plt.gca() # id(axes), id(plt.gca())
fig.get_axes() # id(axes), id(fig.get_axes()[0])
axes.get_lines() # id(lines[0]), id(ax.get_lines()[0])
lines[0].get_xdata()
lines[0].get_ydata()
import numpy as np
import matplotlib.pyplot as plt

fig = plt.figure()
subfigs = fig.subfigures(1, 1, wspace=0.07)
subsubfigs = subfigs.subfigures(1, 1, wspace=0.07)
axes = subsubfigs.add_subplot(1,1,1)
lines = axes.plot(np.random.normal(size=1000).cumsum())
import numpy as np
import matplotlib.pyplot as plt

fig = plt.figure()
subfigs = fig.subfigures(1, 2, wspace=0.07)
subsubfigs1 = subfigs[0].subfigures(2, 1, wspace=0.07)
subsubfigs2 = subfigs[1].subfigures(2, 1, wspace=0.07)
subsubfigs1_axes1 = subsubfigs1[0].add_subplot(1,1,1)
subsubfigs1_axes2 = subsubfigs1[1].add_subplot(1,1,1)
subsubfigs2_axes1 = subsubfigs2[0].add_subplot(1,1,1)
subsubfigs2_axes2 = subsubfigs2[1].add_subplot(1,1,1)
lines1 = subsubfigs1_axes1.plot(np.random.normal(size=1000).cumsum())
lines2 = subsubfigs1_axes2.plot(np.random.normal(size=1000).cumsum())
lines3 = subsubfigs2_axes1.plot(np.random.normal(size=1000).cumsum())
lines4 = subsubfigs2_axes2.plot(np.random.normal(size=1000).cumsum())
import numpy as np
import matplotlib.pyplot as plt

fig = plt.figure()
subfigs = fig.subfigures(1, 2, wspace=0.07)
subfigs1_axes = subfigs[0].subplots(2,1)
subfigs2_axes = subfigs[1].subplots(2,1)
lines1 = subfigs1_axes[0].plot(np.random.normal(size=1000).cumsum())
lines2 = subfigs1_axes[1].plot(np.random.normal(size=1000).cumsum())
lines3 = subfigs2_axes[0].plot(np.random.normal(size=1000).cumsum())
lines4 = subfigs2_axes[1].plot(np.random.normal(size=1000).cumsum())
import numpy as np
import matplotlib.pyplot as plt

fig = plt.figure()
axes = fig.subplots(2,2)
lines1 = axes[0][0].plot(np.random.normal(size=1000).cumsum())
lines2 = axes[0][1].plot(np.random.normal(size=1000).cumsum())
lines3 = axes[1][0].plot(np.random.normal(size=1000).cumsum())
lines4 = axes[1][1].plot(np.random.normal(size=1000).cumsum())

 

subfigures

import matplotlib.pyplot as plt
fig = plt.figure(layout='constrained', figsize=(30, 5))
fig.suptitle('Main Figure')

subfigs = fig.subfigures(1, 2, wspace=0.07) # fig.subfigs

# subfigs[0].figure
subfigs[0].add_subplot(2, 2, 1, sharex=None, sharey=None)
subfigs[0].add_subplot(2, 2, 2, sharex=None, sharey=None)
subfigs[0].add_subplot(2, 2, 3, sharex=None, sharey=None)
subfigs[0].add_subplot(2, 2, 4, sharex=None, sharey=None)
subfigs[0].suptitle('sub-figure0')
subfigs[0].supxlabel('sub-figure0 x-axis label')
subfigs[0].supylabel('sub-figure0 y-axis label')
subfigs[0].align_xlabels()
subfigs[0].align_ylabels()

# subfigs[1].figure 
subfigs[1].add_subplot(2, 2, 1, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 2, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 3, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 4, sharex=None, sharey=None)
subfigs[1].suptitle('sub-figure1')
subfigs[1].supxlabel('sub-figure1 x-axis label')
subfigs[1].supylabel('sub-figure1 y-axis label')
subfigs[1].align_xlabels()
subfigs[1].align_ylabels()

#fig.subplots_adjust(top=1, right=1, hspace=0, wspace=0)
import matplotlib.pyplot as plt
fig = plt.figure(layout='constrained', figsize=(30, 5))
fig.suptitle('Main Figure')

subfigs = fig.subfigures(1, 2, wspace=0.07)
subsubfigs = subfigs[0].subfigures(1, 2, wspace=0.07)
subsubfigs[0].add_subplot(2, 2, 1, sharex=None, sharey=None)
subsubfigs[0].add_subplot(2, 2, 2, sharex=None, sharey=None)
subsubfigs[0].add_subplot(2, 2, 3, sharex=None, sharey=None)
subsubfigs[0].add_subplot(2, 2, 4, sharex=None, sharey=None)
subsubfigs[0].suptitle('sub-figure0')

subsubfigs[1].add_subplot(2, 2, 1, sharex=None, sharey=None)
subsubfigs[1].add_subplot(2, 2, 2, sharex=None, sharey=None)
subsubfigs[1].add_subplot(2, 2, 3, sharex=None, sharey=None)
subsubfigs[1].add_subplot(2, 2, 4, sharex=None, sharey=None)
subsubfigs[1].suptitle('sub-figure1')

subfigs[1].add_subplot(2, 2, 1, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 2, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 3, sharex=None, sharey=None)
subfigs[1].add_subplot(2, 2, 4, sharex=None, sharey=None)
subfigs[1].suptitle('sub-figure2')

axes

 

import matplotlib.pyplot as plt

mainax = plt.gca()
subax = plt.axes([.2, .5, .25, .25], frame_on=True, adjustable='box', alpha=.5) # (left, bottom, width, height)
import matplotlib.pyplot as plt
plt.figure(figsize=(30,5)).set_size_inches(30,5)

plt.title('title', position = (0.5, 1.0+0.05))
plt.title('title', loc = ['left', 'right', 'center'][2])
plt.xlabel('x-axis label')
plt.ylabel('y-axis label')
plt.tick_params(axis='x', rotation=45)
plt.tick_params(axis='y', rotation=45)
plt.tick_params(top=True, bottom=False, left=True, right=False, labeltop=True, labelbottom=False, labelleft=True, labelright=False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().get_xaxis().set_visible(True)
plt.gca().get_yaxis().set_visible(True)
plt.gca().xaxis.set_label_position(['bottom', 'top'][0])

plt.gca().get_xticks() # xtick location
plt.gca().get_yticks() # ytick location
plt.gca().get_xticklabels() # xtick label
plt.gca().get_yticklabels() # ytick label
plt.gca().yaxis.set_label_position(['left', 'right'][0])

plt.gca().set_xticks(plt.gca().get_xticks())
plt.gca().set_yticks(plt.gca().get_yticks())
plt.gca().set_xticklabels(plt.gca().get_xticklabels())
plt.gca().set_yticklabels(plt.gca().get_yticklabels())

#from cycler import cycler
#import cmasher as cmr
#default_cycler = cycler(color=cmr.take_cmap_colors('viridis', None, return_fmt='hex')) # plt.colormaps()
#plt.gca().set_prop_cycle(default_cycler)

plt.grid(True)
#plt.legend(loc='best', ncol=2, fontsize=8, frameon=False, shadow=False) # upper/lower, left/right

plt.tight_layout()
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(30,5))
fig.subplots_adjust(top=.9, bottom=.8, right=.9, left=.8)
plt.suptitle('Main Title', position = (0.5, 1.0+0.05))
ax0 = fig.add_subplot(2, 2, 1, sharex=None, sharey=None)
ax1 = fig.add_subplot(2, 2, 2, sharex=None, sharey=None)
ax2 = fig.add_subplot(2, 2, 3, sharex=None, sharey=None)
ax3 = fig.add_subplot(2, 2, 4, sharex=None, sharey=None)

ax0.set_title('title')
ax0.set_xlabel('x-axis label')
ax0.set_ylabel('y-axis label')
ax0.tick_params(axis='x', rotation=45)
ax0.tick_params(axis='y', rotation=45)
ax0.tick_params(top=True, bottom=False, left=True, right=False, labeltop=True, labelbottom=False, labelleft=True, labelright=False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.get_xaxis().set_visible(True)
ax0.get_yaxis().set_visible(True)
ax0.grid(True)
#ax0.legend(loc='best', ncol=2, fontsize=8, frameon=False, shadow=False)

ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)

ax2.title.set_fontsize(20)
ax2.xaxis.label.set_fontsize(20)
ax2.yaxis.label.set_fontsize(20)
list(map(lambda xticklabel: xticklabel.set_fontsize(20), ax2.get_xticklabels()))
list(map(lambda yticklabel: yticklabel.set_fontsize(20), ax2.get_yticklabels()))

fig.tight_layout()
fig.savefig('figure.png', bbox_inches="tight")
import matplotlib.pyplot as plt
import matplotlib as mpl
fig = plt.figure(figsize=(30, 10))
fig.subplots_adjust(top=0.8)
plt.suptitle('Main Title', position = (0.5, 1.0+0.05))
gs = mpl.gridspec.GridSpec(4, 4)
ax0 = fig.add_subplot(gs[0, 0], sharex=None, sharey=None)
ax1 = fig.add_subplot(gs[1, 1], sharex=None, sharey=None)
ax2 = fig.add_subplot(gs[2, 2], sharex=None, sharey=None)
ax3 = fig.add_subplot(gs[3, 3], sharex=None, sharey=None)
ax4 = fig.add_subplot(gs[0, 1:], sharex=None, sharey=None)
ax5 = fig.add_subplot(gs[1:, 0], sharex=None, sharey=None)
ax6 = fig.add_subplot(gs[1, 2:], sharex=None, sharey=None)
ax7 = fig.add_subplot(gs[2:, 1], sharex=None, sharey=None)
ax8 = fig.add_subplot(gs[2, 3], sharex=None, sharey=None)
ax9 = fig.add_subplot(gs[3, 2], sharex=None, sharey=None)

ax0.set_title('title')
ax0.set_xlabel('x-axis label')
ax0.set_ylabel('y-axis label')
ax0.tick_params(axis='x', rotation=45)
ax0.tick_params(axis='y', rotation=45)
ax0.tick_params(top=True, bottom=False, left=True, right=False, labeltop=True, labelbottom=False, labelleft=True, labelright=False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.get_xaxis().set_visible(True)
ax0.get_yaxis().set_visible(True)
ax0.grid(True)
plt.tight_layout()
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2,4, figsize=(30,5), sharex=True, sharey=True)
fig.subplots_adjust(top=0.8)
plt.suptitle('Main Title', position = (0.5, 1.0+0.05))

axes[0][0].set_title('title')
axes[0][0].set_xlabel('x-axis label')
axes[0][0].set_ylabel('y-axis label')
axes[0][0].tick_params(axis='x', rotation=45)
axes[0][0].tick_params(axis='y', rotation=45)
axes[0][0].tick_params(top=True, bottom=False, left=True, right=False, labeltop=True, labelbottom=False, labelleft=True, labelright=False)
axes[0][0].spines['bottom'].set_visible(False)
axes[0][0].spines['right'].set_visible(False)
axes[0][0].get_xaxis().set_visible(True)
axes[0][0].get_yaxis().set_visible(True)
axes[0][0].grid(True)
plt.tight_layout()
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(30,10))
fig.subplots_adjust(top=0.8)
plt.suptitle('Main Title', position = (0.5, 1.0+0.05))
ax0 = plt.subplot2grid((5,2), (0,0), colspan=2, sharex=None, sharey=None)
ax1 = plt.subplot2grid((5,2), (1,0), rowspan=2, sharex=None, sharey=None)
ax2 = plt.subplot2grid((5,2), (1,1), rowspan=2, sharex=None, sharey=None)
ax3 = plt.subplot2grid((5,2), (3,0), rowspan=2, colspan=2, sharex=None, sharey=None)

ax0.set_title('title')
ax0.set_xlabel('x-axis label')
ax0.set_ylabel('y-axis label')
ax0.tick_params(axis='x', rotation=45)
ax0.tick_params(axis='y', rotation=45)
ax0.tick_params(top=True, bottom=False, left=True, right=False, labeltop=True, labelbottom=False, labelleft=True, labelright=False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax0.get_xaxis().set_visible(True)
ax0.get_yaxis().set_visible(True)
ax0.grid(True)
plt.tight_layout()
import numpy as np
import pandas as pd
import seaborn as sns
g = sns.FacetGrid(data=pd.DataFrame(data=np.random.randint(0, 3, size=(100,2)), columns=['A', 'B']) , col="A",  row="B") 
g.map_dataframe

g.fig.suptitle('Main Title', position = (0.5, 1.0+0.05))
g.fig.supxlabel('x-axis label')
g.fig.supylabel('y-axis label')
g.add_legend()
g.tight_layout()

text & annotation

import matplotlib.pyplot as plt

plt.text(.9,.9, r'$X_{\alpha}$')
plt.annotate(r'$X_{\alpha}$', xy=(0,0), xytext=(+.8, +.8))
plt.annotate(r'$X_{\alpha}$', xy=(0,0), xytext=(+.2, +.1), arrowprops=dict(arrowstyle="->"))
plt.annotate(r'$X_{\alpha}$', xy=(0,0), xytext=(+.3, +.3), arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.5"))
plt.annotate(r'$X_{\alpha}$', xy=(0,0), xytext=(+.5, +.5), fontsize=20, family="serif", arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.5"))
import matplotlib.pyplot as plt

text = '\n'.join([
    '%-10s' % ('contents', ),
    '%10s' % ('contents', ),
    r'$%s$' % ('contents', ),
    r'$%.2f$' % (0.347, ),
    r'$%.2f$' % (0, ),])
plt.text(0.05, 0.95, text, transform=plt.gca().transAxes, horizontalalignment='left', verticalalignment='top', bbox=dict(alpha=.1))
plt.text(0.50, 0.95, text, transform=plt.gca().transAxes, horizontalalignment='center', verticalalignment='top', bbox=dict(alpha=.1))
plt.text(0.95, 0.95, text, transform=plt.gca().transAxes, horizontalalignment='right', verticalalignment='top', bbox=dict(alpha=.1))
plt.text(0.05, 0.50, text, transform=plt.gca().transAxes, horizontalalignment='left', verticalalignment='center', bbox=dict(alpha=.1))
plt.text(0.50, 0.50, text, transform=plt.gca().transAxes, horizontalalignment='center', verticalalignment='center', bbox=dict(alpha=.1))
plt.text(0.95, 0.50, text, transform=plt.gca().transAxes, horizontalalignment='right', verticalalignment='center', bbox=dict(alpha=.1))
plt.text(0.05, 0.05, text, transform=plt.gca().transAxes, horizontalalignment='left', verticalalignment='bottom', bbox=dict(alpha=.1))
plt.text(0.50, 0.05, text, transform=plt.gca().transAxes, horizontalalignment='center', verticalalignment='bottom', bbox=dict(alpha=.1))
plt.text(0.95, 0.05, text, transform=plt.gca().transAxes, horizontalalignment='right', verticalalignment='bottom', bbox=dict(alpha=.1))

lines

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

data = pd.DataFrame(data=np.random.normal(size=(100,3)), columns=['TS0', 'TS1', 'TS2']).cumsum()
ax = data.plot()
lines = ax.lines

# re-redering
for line in lines:
    plt.plot(line.get_xdata(), line.get_ydata(), label=line.get_label(),
            color=line.get_color(), linestyle=line.get_linestyle(),
            linewidth=line.get_linewidth(), marker=line.get_marker(),
            markersize=line.get_markersize())
plt.show()

 

 

 

palettes

import seaborn as sns

#sns.choose_colorbrewer_palette(data_type='diverging') # 'sequential', 'diverging', 'qualitative'
#sns.choose_light_palette()
#sns.choose_diverging_palette()
#sns.choose_dark_palette()
#sns.choose_cubehelix_palette()

# available palettes
sns.palettes.color_palette
sns.palettes.crayon_palette
sns.palettes.cubehelix_palette
sns.palettes.dark_palette
sns.palettes.diverging_palette
sns.palettes.hls_palette
sns.palettes.husl_palette
sns.palettes.light_palette
sns.palettes.mpl_palette

list of styles

import matplotlib.lines as mlines
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt

mlines.lineMarkers
mlines.lineStyles
mcolors.CSS4_COLORS

plt.style.available
plt.colormaps()

colormap

# https://matplotlib.org/stable/tutorials/colors/colormaps.html
# https://matplotlib.org/stable/tutorials/intermediate/color_cycle.html
# https://matplotlib.org/2.0.2/examples/color/named_colors.html
# https://matplotlib.org/2.0.2/examples/lines_bars_and_markers/marker_reference.html
# https://matplotlib.org/2.0.2/examples/lines_bars_and_markers/line_styles_reference.html

from cycler import cycler
import cmasher as cmr
import matplotlib.pyplot as plt

# colormap to color-code
plt.colormaps() # list of color-map
colormap = 'viridis'
colors = cmr.take_cmap_colors(colormap, None, cmap_range=(0.2, 0.8), return_fmt='hex')
colors = cmr.take_cmap_colors(colormap, 5, return_fmt='hex')

# custom color-cycler
default_cycler = (cycler(color=['r', 'g', 'b', 'y']) + cycler(linestyle=['-', '--', ':', '-.']))
plt.rcParams['axes.prop_cycle'] = default_cycler
# https://matplotlib.org/stable/users/explain/artists/color_cycle.html
import numpy as np
import pandas as pd

from cycler import cycler
import cmasher as cmr
import matplotlib.pyplot as plt

# data
df = pd.DataFrame(data=np.random.normal(0, 1, size=(500, 10)).cumsum(axis=0), columns= list('ABCDEFGHIJ'))

# cyclic-colormap
default_cycler = cycler(color=cmr.take_cmap_colors('viridis', 10, return_fmt='hex')) # plt.colormaps()
plt.rcParams['axes.prop_cycle'] = default_cycler # method1 
plt.rc('axes', prop_cycle=default_cycler) # method2
plt.gca().set_prop_cycle(default_cycler) # method3

# visualization with lines with cyclic-color
plt.plot(df)
# https://matplotlib.org/stable/users/explain/artists/color_cycle.html
import numpy as np
import pandas as pd

from cycler import cycler
import cmasher as cmr
import matplotlib.pyplot as plt

# data
df = pd.DataFrame(data=np.random.normal(0, 1, size=(500, 12)).cumsum(axis=0))

cycler_components = pd.MultiIndex.from_product([['r', 'g', 'b'], ['-', '--'], ['1', '2']]).to_frame() # 3 * 2 * 2 = 12
colors = cycler_components[0].tolist() # ['r', 'g', 'b', 'y', ...] >>> https://matplotlib.org/2.0.2/examples/color/named_colors.html
lines = cycler_components[1].tolist() # ['-', '--', ':', '-.', ...] >>> https://matplotlib.org/2.0.2/examples/lines_bars_and_markers/line_styles_reference.html
markers = cycler_components[2].tolist() # ['1', '2', '3', '4', ...] >>> https://matplotlib.org/2.0.2/examples/lines_bars_and_markers/marker_reference.html

default_cycler = (cycler(color=colors) + cycler(linestyle=lines) + cycler(marker=markers))
plt.gca().set_prop_cycle(default_cycler)
plt.plot(df)

1-Dimensional Visualization 

Categorical Variables

  • Pie Plot
  • Bar Plot

Continuous Variables

  • Line Plot
  • Box Plot
  • Hist Plot

High-Dimensinal Visualization

Categorical Variables & Categorical Variables

  • Heatmap using Cross Table(Frequency Analysis; Joint Probability) or Association Table(Association Analysis; Conditional Probability)
  • Clustermap using Cross Table(Frequency Analysis; Joint Probability) or Association Table(Association Analysis; Conditional Probability)
import numpy as np
import pandas as pd
import seaborn as sns

matrix = pd.DataFrame(np.random.randint(0,5, size=(50,4)))

#Create additional row_colors1 here
labels1 = np.random.randint(0,5, size=50)
lut1 = dict(zip(set(labels1), sns.hls_palette(len(set(labels1)), l=0.5, s=0.8)))
row_colors1 = pd.DataFrame(labels1)[0].map(lut1)

#Create additional row_colors2 here
labels2 = np.random.randint(0,1, size=50)
lut2 = dict(zip(set(labels2), sns.hls_palette(len(set(labels2)), l=0.5, s=0.8)))
row_colors2 = pd.DataFrame(labels2)[0].map(lut2)

sns.clustermap(matrix, row_colors=[row_colors1, row_colors2], col_cluster=False, linewidths=0.1, cmap='coolwarm')

 

Categorical Variables & Continuous Variables

  • Box Plot by Category
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns

size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Category_1', 'Category_2', 'Category_3']
numerical_columns = ['Continuous1', 'Continuous2', 'Continuous3']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.norm.rvs(0,1, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.norm.rvs(0,1, size=size),    
], columns= categorical_columns + numerical_columns)

# grid by categorical variables
sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks
g = sns.FacetGrid(data, col="Discrete_B",  row="Binary") 

# sns.scatterplot, sns.lineplot
# sns.boxplot(outlier), sns.violinplot(variance), sns.boxenplot(variance), sns.stripplot(distribution), sns.pointplot(mean, variance), sns.barplot(mean)
g.map_dataframe(sns.boxplot, x="Discrete_P", y="Continuous1", hue='Category_1') # x: numerical variable, y: numerical variable, hue: categorical variable
g.fig.suptitle('Seaborn Plot')
g.add_legend()
g.tight_layout()
  • Bar Plot by Category

Continuous Variables & Continuous Variables

  • Scatter Plot
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_classification

X, y = make_classification(
    n_samples=300,
    n_features=5,
    n_informative=2,
    n_redundant=1,
    n_repeated=0,
    n_classes=2,
    n_clusters_per_class=2,
    random_state=np.random.randint(1000))
data = pd.DataFrame(np.c_[X,y], columns=[ 'X'+str(i) for i in range(X.shape[1])] + ['y'])

# grid by categorical variables
sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks
sns.pairplot(data, hue='y')
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_regression

X, y, coef = make_regression(n_samples=1000, n_features=5, n_informative=2, n_targets=1, bias=100, effective_rank=None, tail_strength=0.5, noise=50.0, shuffle=True, coef=True, random_state=np.random.randint(1000))
data = pd.DataFrame(np.c_[X,y], columns=[ 'X'+str(i) for i in range(X.shape[1])] + ['y'], index=pd.date_range('00:00:00', periods=1000, freq='d'))
data['year'] = data.index.year
data['month'] = data.index.month
data['date'] = data.index.year + data.index.month/12 + data.index.day/365

# grid by categorical variables
sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks

g = sns.FacetGrid(data, col="month",  row="year") 
g.map_dataframe(sns.regplot, x="date", y="y") # x: numerical variable, y: numerical variable, hue: categorical variable
# sns.lmplot
  • Countour Plot
  • Heatmap & Clustermap for Correlation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
plt.figure(figsize=(10,10))

X, y = make_classification(
    n_samples=300,
    n_features=5,
    n_informative=2,
    n_redundant=1,
    n_repeated=0,
    n_classes=1,
    n_clusters_per_class=1,
    random_state=np.random.randint(1000))
corr = pd.DataFrame(X).corr().values

sns.heatmap(
    data=corr,
    mask=np.triu(np.ones_like(corr, dtype=bool)),
    cmap="RdBu",
    center=0,
    square=True,
    linewidths=0.5,
    cbar_kws={"shrink": 0.5},
    annot=True, fmt='.3g' 
)

sns.clustermap(
    data=corr, 
    cmap='Blues', 
    row_colors=None, 
    col_colors=None, 
    row_cluster=False, 
    col_cluster=False, 
    dendrogram_ratio=0.05, 
    cbar_pos=None, 
    annot=True, 
    fmt='.3g'
)

 


Analysis Visualization

  • Data Exploratory
    • Descriptive Statistics
    • Conditional probability 
    • Dependence between variables
    • Decision boundary and decision path
  • Data wrangling
    • Preprocessing
      • Model scenario: modeling assumption for linearity
        • Normality / Homoscedasticity / Vectorization
      • Data scenario
        • Missing-value preprocessing
        • Sampling for imbalance data
        • Outlier removal
        • Derived variables
  • Modeling
    • Models (generative/discrimative/function)
      • Parameters 
    • Scoring types
      • classification: accuracy, recall, precision, f1
      • regression: r-squared, mse
    • Evaluation domain
      • train, validation, test
        • splitting

pandas

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns


size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)


# frequency analysis
#data[numerical_columns].hist(bins=30, edgecolor='white', grid=True, figsize=(30,10))
#data[numerical_columns].plot.kde(grid=True, figsize=(30,10), subplots=True)
#data['Discrete_B'].value_counts().plot.pie()
#data['Discrete_P'].value_counts().plot.bar() # rank sorting
#data['Discrete_U'].value_counts().plot.barh().invert_yaxis() # rank sorting
#data['Continuous_U'].plot.hist(bins=30, edgecolor='white', grid=True, figsize=(30,10))
#data['Continuous_E'].plot.kde(grid=True, figsize=(30,10))

# relationship analysis
#data[numerical_columns + ['Discrete_P']].boxplot(by='Discrete_P', figsize=(30,10))
#data.boxplot(column='Continuous_E', by='Discrete_P', figsize=(30,10))
#data.plot.scatter(x='Continuous_U', y='Continuous_N', figsize=(30,10)) 
axes = data[numerical_columns].plot.line(grid=True, color='DarkRed', figsize=(30,10), subplots=True) # with .set_index()

list(map(lambda ax: ax.legend(loc='upper right'), axes))
list(map(lambda ax: ax.spines['top'].set_visible(False), axes))
list(map(lambda ax: ax.spines['right'].set_visible(False), axes))
plt.tight_layout()

seaborn

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns


size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)


plt.figure(figsize=(30,10))
sns.set(style="darkgrid")
sns.countplot(data=data, x='Discrete_B' , hue='Response', ax=plt.subplot2grid((3,2), (0,0)))
sns.histplot(data=data, x='Continuous_U' , hue='Response', kde=True, ax=plt.subplot2grid((3,2), (0,1)))
sns.barplot(data=data, x='Discrete_B', y='Continuous_E' , hue='Response', ax=plt.subplot2grid((3,2), (1,0)))
sns.boxplot(data=data, x='Discrete_B', y='Continuous_E', hue='Response', ax=plt.subplot2grid((3,2), (1,1)))
sns.scatterplot(data=data, x='Continuous_U', y='Continuous_N', hue='Response', ax=plt.subplot2grid((3,2), (2,0)))
sns.kdeplot(data=data, x='Continuous_U', y='Continuous_E', hue='Response', ax=plt.subplot2grid((3,2), (2,1)))
sns.pairplot(data=data[numerical_columns + ['Response']], hue='Response', kind='kde') # kind : {'scatter', 'kde', 'hist', 'reg'}
plt.tight_layout()

 

 

 

Descriptive Statistics

Task: classification

#

Task: regression

#

 

 

 

(axis:0) Conditional probability distribution

Task: classification

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns


size = 1000
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)


plt.figure(figsize=(30,10))
sns.set(style="darkgrid")
g = sns.FacetGrid(data=data, row='Binary', col='Discrete_P')
g.map_dataframe(sns.countplot, x='Response', hue='Discrete_B')
g.add_legend()
g.tight_layout()

Task: regression

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns


size = 1000
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.norm.rvs(0,1, size=size)


plt.figure(figsize=(30,10))
sns.set(style="darkgrid")
g = sns.FacetGrid(data=data, row='Discrete_B', col='Discrete_P')
g.map_dataframe(sns.histplot, x='Response', hue='Binary')
g.add_legend()
g.tight_layout()

 

(axis:0) Conditional dependence for response variable; explainatory variables distribution by class condition; 

Task: Classification

classification for numerical explainatory variables

checking points: informative, redundent, repeated, cluster by class, correlation 

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns


size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)



sns.set(style="darkgrid")
g = sns.pairplot(data=data[numerical_columns + ['Response']], hue='Response', kind='scatter') # kind : {'scatter', 'kde', 'hist', 'reg'}
g.map_lower(sns.kdeplot, levels=4, color=".2")
g.tight_layout()

classification for categorical explainatory variables

checking points: informative, redundent, repeated, cluster by class, correlation 

import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns


size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)


sns.set(style="darkgrid")
gs = mpl.gridspec.GridSpec(len(categorical_columns), 4)
fig = plt.figure(figsize=(30, 30))
for row, categorical_column in enumerate(categorical_columns):
    sns.countplot(data=data, x=categorical_column, ax=fig.add_subplot(gs[row, 0]))
    sns.countplot(data=data, x=categorical_column, hue='Response', ax=fig.add_subplot(gs[row, 1:]))
plt.suptitle('Informative Chracteristics')
plt.tight_layout()

 

from tqdm import tqdm
from itertools import combinations
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns


size = 100
categorical_columns = ['Binary', 'Discrete_B', 'Discrete_P', 'Discrete_M1', 'Discrete_M2', 'Discrete_M3', 'Discrete_U', 'Discrete_G', 'Discrete_N']
numerical_columns = ['Continuous_N', 'Continuous_U', 'Continuous_E']
data = pd.DataFrame(np.c_[
     stats.bernoulli.rvs(p=2/3, size=size),
     stats.binom.rvs(n=5, p=2/3, size=size),
     stats.poisson.rvs(mu=5, size=size),
     stats.multinomial.rvs(n=5, p=[.3, .3, .3], size=size),
     stats.randint.rvs(0, 10, size=size),
     stats.geom.rvs(p=.2, size=size),
     stats.nbinom.rvs(n=5, p=.2, size=size),
     stats.norm.rvs(0,1, size=size),
     stats.uniform.rvs(0,10, size=size),
     stats.expon.rvs(0, 1, size=size),
], columns= categorical_columns + numerical_columns)
data['Response'] = stats.bernoulli.rvs(p=2/3, size=size)

paired_features = combinations(categorical_columns, 2)
num_paired_features = sum(1 for ignore in combinations(categorical_columns, 2))

target_classes = data['Response'].unique()
sns.set(style="darkgrid")
gs = mpl.gridspec.GridSpec(int(2*num_paired_features), 7)
fig = plt.figure(figsize=(30, 100))
#fig.subplots_adjust(top=0.8)
plt.suptitle('Redundent/Reapeated/Correlation Chracteristics', position = (0.5, 1.0+0.0005))
for row, paired_categorical_column in tqdm(enumerate(paired_features), total=num_paired_features):
    sns.countplot(data=data, x=paired_categorical_column[0], hue='Response', ax=fig.add_subplot(gs[int(2*row):int(2*row)+target_classes.size, 0]))
    sns.countplot(data=data, x=paired_categorical_column[1], hue='Response', ax=fig.add_subplot(gs[int(2*row):int(2*row)+target_classes.size, 1]))
    joint = pd.crosstab(index=[data['Response'], data[paired_categorical_column[0]]], columns=data[paired_categorical_column[1]], margins=False)
    for i, target_class in enumerate(target_classes):
        ax = fig.add_subplot(gs[int(2*row)+i, 2:])
        ax.set_title('Class: '+str(target_class))
        sns.heatmap(joint.xs(target_class, level=0, axis=0), cmap='Blues', ax=ax)
plt.tight_layout()

Task: Regression

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px

y = pd.Series(data=np.random.normal(size=1000).cumsum(), index=pd.date_range(end='00:00:00', periods=1000, freq='B'))

plt.figure(figsize=(30,5))
plt.plot(y)
plt.fill_between(x=y[y.index.year == 2022].index, y1=y.max(), y2=y.min(), alpha=.3)
px.line(y, width=2000, height=400)

 

 

 

 

(axis:1) Dependence between explainatory variables

import numpy as np
import pandas as pd
import seaborn as sns

size=1000
data = pd.DataFrame(np.c_[
    np.r_[np.random.normal( 0, 1, size=size), np.random.normal(0, 1, size=size)], 
    np.r_[np.random.normal(0, 1, size=size), np.random.normal(0, 1, size=size)]
], columns=['X0', 'X1'])
data['X2'] = np.random.normal( 0, 1, size=int(2*size)) 

# dependency
sampling_index = data.index[(data['X2'] < 0 )].to_series().sample(frac=.8).index
data.loc[sampling_index, 'X2'] = data.loc[sampling_index, 'X0']

sns.set_theme(style="ticks") # white, dark, whitegrid, darkgrid, ticks
sns.pairplot(data)

 

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA

X, y = make_classification(
    n_samples=300,
    n_features=5,
    n_informative=2,
    n_redundant=3,
    n_repeated=0,
    n_classes=2,
    weights=[.1, 9],
    n_clusters_per_class=2,
    random_state=np.random.randint(1000))

X_new = PCA().fit_transform(X)

plt.figure(figsize=(20,10))
sns.heatmap(
    pd.DataFrame(X).corr(),
    cmap="RdBu",
    center=0,
    square=True,
    linewidths=0.5,
    cbar_kws={"shrink": 0.5},
    annot=True, fmt='.4g',
    ax=plt.subplot2grid((1,2), (0,0))
)
sns.heatmap(
    pd.DataFrame(X_new).corr(),
    cmap="RdBu",
    center=0,
    square=True,
    linewidths=0.5,
    cbar_kws={"shrink": 0.5},
    annot=True, fmt='.4g',
    ax=plt.subplot2grid((1,2), (0,1))
)

 

 

 

 

Decision boundary and decision path

Task: Classification

import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_classification, make_regression
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz
import graphviz

X, y = make_classification(n_samples=1000, n_features=5, n_informative=2, n_redundant=1, n_repeated=0, n_classes=2, n_clusters_per_class=1, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None)
data = pd.DataFrame(np.c_[X, y], columns=['X0', 'X1', 'X2', 'X3', 'X4', 'y'])
numerical_columns = ['X0', 'X1', 'X2', 'X3', 'X4']

# decision boundary
#sns.pairplot(data, hue='y')
#sns.pairplot(pd.DataFrame(np.c_[StandardScaler().fit_transform(data[numerical_columns]), data['y']], columns=numerical_columns + ['y']), hue='y')
#sns.pairplot(pd.DataFrame(np.c_[MaxAbsScaler().fit_transform(data[numerical_columns]), data['y']], columns=numerical_columns + ['y']), hue='y')
#sns.pairplot(pd.DataFrame(np.c_[PowerTransformer(method='yeo-johnson').fit_transform(data[numerical_columns]), data['y']], columns=numerical_columns + ['y']), hue='y')
sns.pairplot(pd.DataFrame(np.c_[QuantileTransformer(output_distribution='normal').fit_transform(data[numerical_columns]), data['y']], columns=numerical_columns + ['y']), hue='y')

# decision path
model = DecisionTreeClassifier(criterion='gini', min_impurity_decrease=0.01).fit(QuantileTransformer(output_distribution='normal').fit_transform(data[numerical_columns]), data['y'])
dot_data=export_graphviz(model,out_file=None, filled=True, rounded=True, special_characters=True,
                         feature_names=list(map(lambda x: 'X'+str(x), range(X.shape[1]))),
                         class_names= data['y'].unique().astype(str).tolist())
display(graphviz.Source(dot_data))

 

Task: Regression

import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_classification, make_regression
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz
import graphviz

X, y, coef = make_regression(n_samples=300, n_features=5, n_informative=2, n_targets=1, bias=100, effective_rank=None, tail_strength=0.5, noise=50.0, shuffle=True, coef=True, random_state=np.random.randint(1000))
data = pd.DataFrame(np.c_[X, y], columns=['X0', 'X1', 'X2', 'X3', 'X4', 'y'])
data['hue'] = data['y'].diff().fillna(data['y'].iloc[0]).apply(lambda diff_y: 'increasing' if diff_y > 0 else 'decreasing')
numerical_columns = ['X0', 'X1', 'X2', 'X3', 'X4']

# decision boundary
#sns.pairplot(data, hue='hue')
#sns.pairplot(pd.DataFrame(np.c_[StandardScaler().fit_transform(data[numerical_columns]), data['hue']], columns=numerical_columns + ['hue']), hue='hue')
#sns.pairplot(pd.DataFrame(np.c_[MaxAbsScaler().fit_transform(data[numerical_columns]), data['hue']], columns=numerical_columns + ['hue']), hue='hue')
#sns.pairplot(pd.DataFrame(np.c_[PowerTransformer(method='yeo-johnson').fit_transform(data[numerical_columns]), data['hue']], columns=numerical_columns + ['hue']), hue='hue')
sns.pairplot(pd.DataFrame(np.c_[QuantileTransformer(output_distribution='normal').fit_transform(data[numerical_columns]), data['hue']], columns=numerical_columns + ['hue']), hue='hue')

 

 

 


Reference

'quantitative analysis > data analysis' 카테고리의 다른 글

Analysis Project Integration Management  (0) 2023.05.07
Performance Monitoring  (0) 2023.05.07
Analysis Modeling  (0) 2023.05.07
Data Acquisition & Preprocessing  (0) 2023.05.07
Data Analysis Project  (0) 2023.05.07

+ Recent posts