本文基于sklearn进行数据的特征工作
0.数据的导入
1 2 3 4 5 6 7 8 9 10
| from sklearn.datasets import load_iris
iris=load_iris()
print(iris.data[:5],len(iris.data))
print(iris.target[:5],len(iris.target))
|
[[ 5.1 3.5 1.4 0.2]
[ 4.9 3. 1.4 0.2]
[ 4.7 3.2 1.3 0.2]
[ 4.6 3.1 1.5 0.2]
[ 5. 3.6 1.4 0.2]] 150
[0 0 0 0 0] 150
1.数据预处理
1.1无量纲化
1.1.1标准化
1 2 3 4 5
| from sklearn.preprocessing import StandardScaler
iris_standar=StandardScaler().fit_transform(iris.data) print(iris_standar[:5])
|
[[-0.90068117 1.03205722 -1.3412724 -1.31297673]
[-1.14301691 -0.1249576 -1.3412724 -1.31297673]
[-1.38535265 0.33784833 -1.39813811 -1.31297673]
[-1.50652052 0.10644536 -1.2844067 -1.31297673]
[-1.02184904 1.26346019 -1.3412724 -1.31297673]]
1.1.2区间缩放
1 2 3 4 5
| from sklearn.preprocessing import MinMaxScaler
iris_minmax=MinMaxScaler().fit_transform(iris.data) print(iris_minmax[:5])
|
[[ 0.22222222 0.625 0.06779661 0.04166667]
[ 0.16666667 0.41666667 0.06779661 0.04166667]
[ 0.11111111 0.5 0.05084746 0.04166667]
[ 0.08333333 0.45833333 0.08474576 0.04166667]
[ 0.19444444 0.66666667 0.06779661 0.04166667]]
1.2对定量特征进行二值化
1 2 3 4 5
| from sklearn.preprocessing import Binarizer
iris_binarizer=Binarizer(threshold=3).fit_transform(iris.data) print(iris_binarizer[:5])
|
[[ 1. 1. 0. 0.]
[ 1. 0. 0. 0.]
[ 1. 1. 0. 0.]
[ 1. 1. 0. 0.]
[ 1. 1. 0. 0.]]
1.3对定性特征进行哑编码
1 2 3 4 5 6 7
| from sklearn.preprocessing import OneHotEncoder
iris_onehotencoder=OneHotEncoder().fit_transform(iris.target.reshape((-1,1))) print(iris.target[-5:]) print(iris.target.reshape((-1,1))[-5:]) print(iris_onehotencoder[-5:])
|
[2 2 2 2 2]
[[2]
[2]
[2]
[2]
[2]]
(0, 2) 1.0
(1, 2) 1.0
(2, 2) 1.0
(3, 2) 1.0
(4, 2) 1.0
1.4缺失值计算
1 2 3 4 5 6 7 8
| from numpy import vstack, array, nan from sklearn.preprocessing import Imputer
iris_imputer=Imputer().fit_transform(vstack((array([nan, nan, nan, nan]), iris.data))) print(iris_imputer[:5],len(iris_imputer))
|
[[ 5.84333333 3.054 3.75866667 1.19866667]
[ 5.1 3.5 1.4 0.2 ]
[ 4.9 3. 1.4 0.2 ]
[ 4.7 3.2 1.3 0.2 ]
[ 4.6 3.1 1.5 0.2 ]] 151
1.5数据变换
1 2 3 4 5 6
| from sklearn.preprocessing import PolynomialFeatures
iris_pol=PolynomialFeatures().fit_transform(iris.data) print(iris_pol[:5])
|
[[ 1. 5.1 3.5 1.4 0.2 26.01 17.85 7.14 1.02 12.25
4.9 0.7 1.96 0.28 0.04]
[ 1. 4.9 3. 1.4 0.2 24.01 14.7 6.86 0.98 9. 4.2
0.6 1.96 0.28 0.04]
[ 1. 4.7 3.2 1.3 0.2 22.09 15.04 6.11 0.94 10.24
4.16 0.64 1.69 0.26 0.04]
[ 1. 4.6 3.1 1.5 0.2 21.16 14.26 6.9 0.92 9.61
4.65 0.62 2.25 0.3 0.04]
[ 1. 5. 3.6 1.4 0.2 25. 18. 7. 1. 12.96
5.04 0.72 1.96 0.28 0.04]]
1 2 3 4 5 6 7
| from numpy import log1p from sklearn.preprocessing import FunctionTransformer
iris_ftf=FunctionTransformer(log1p).fit_transform(iris.data) print(iris_ftf[:5])
|
[[ 1.80828877 1.5040774 0.87546874 0.18232156]
[ 1.77495235 1.38629436 0.87546874 0.18232156]
[ 1.74046617 1.43508453 0.83290912 0.18232156]
[ 1.7227666 1.41098697 0.91629073 0.18232156]
[ 1.79175947 1.5260563 0.87546874 0.18232156]]
2.特征选择
2.1Filter
2.1.1方差选择法
1 2 3 4 5 6
| from sklearn.feature_selection import VarianceThreshold
iris_vt=VarianceThreshold(threshold=3).fit_transform(iris.data) print(iris_vt,len(iris_vt))
|
[[ 1.4]
[ 1.4]
[ 1.3]
[ 1.5]
[ 1.4]
[ 1.7]
[ 1.4]
.......
2.1.2相关系数法(此处使用第二篇博客进行修改)
1 2 3 4 5 6 7 8
| from sklearn.feature_selection import SelectKBest,chi2 from scipy.stats import pearsonr
iris_pear=SelectKBest(chi2, k=2).fit_transform(iris.data, iris.target) print(iris_pear,len(iris_pear))
|
[[ 1.4 0.2]
[ 1.4 0.2]
[ 1.3 0.2]
[ 1.5 0.2]
[ 1.4 0.2]
[ 1.7 0.4]
[ 1.4 0.3]
[ 1.5 0.2]
[ 1.4 0.2]
..........
2.1.3卡方检验
1 2 3 4 5 6
| from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2
iris_chi2=SelectKBest(chi2, k=2).fit_transform(iris.data, iris.target) print(iris_chi2[:5],len(iris_chi2))
|
[[ 1.4 0.2]
[ 1.4 0.2]
[ 1.3 0.2]
[ 1.5 0.2]
[ 1.4 0.2]] 150
2.1.4互信息法
1 2 3 4 5 6 7 8 9 10 11
| from sklearn.feature_selection import SelectKBest from minepy import MINE
def mic(x, y): m = MINE() m.compute_score(x, y) return (m.mic(), 0.5)
SelectKBest(lambda X, Y: array(map(lambda x:mic(x, Y), X.T)).T, k=2).fit_transform(iris.data, iris.target)
|
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-47-807ad1fcacee> in <module>()
1 from sklearn.feature_selection import SelectKBest
----> 2 from minepy import MINE
3
4 #由于MINE的设计不是函数式的,定义mic方法将其为函数式的,返回一个二元组,二元组的第2项设置成固定的P值0.5
5 def mic(x, y):
ImportError: No module named 'minepy'
2.2Wrapper
3.2.1 递归特征消除法
1 2 3 4 5 6 7 8
| from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression
iris_pfe=RFE(estimator=LogisticRegression(), n_features_to_select=2).fit_transform(iris.data, iris.target) print(iris_pfe[:5])
|
[[ 3.5 0.2]
[ 3. 0.2]
[ 3.2 0.2]
[ 3.1 0.2]
[ 3.6 0.2]]
3.3 Embedded
3.3.1 基于惩罚项的特征选择法
1 2 3 4 5 6
| from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import LogisticRegression
iris_sfm=SelectFromModel(LogisticRegression(penalty="l1", C=0.1)).fit_transform(iris.data, iris.target) print(iris_sfm[:5])
|
[[ 5.1 3.5 1.4]
[ 4.9 3. 1.4]
[ 4.7 3.2 1.3]
[ 4.6 3.1 1.5]
[ 5. 3.6 1.4]]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
| from sklearn.linear_model import LogisticRegression
class LR(LogisticRegression): def __init__(self, threshold=0.01, dual=False, tol=1e-4, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.threshold = threshold LogisticRegression.__init__(self, penalty='l1', dual=dual, tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight=class_weight, random_state=random_state, solver=solver, max_iter=max_iter, multi_class=multi_class, verbose=verbose, warm_start=warm_start, n_jobs=n_jobs) self.l2 = LogisticRegression(penalty='l2', dual=dual, tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight = class_weight, random_state=random_state, solver=solver, max_iter=max_iter, multi_class=multi_class, verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)
def fit(self, X, y, sample_weight=None): super(LR, self).fit(X, y, sample_weight=sample_weight) self.coef_old_ = self.coef_.copy() self.l2.fit(X, y, sample_weight=sample_weight)
cntOfRow, cntOfCol = self.coef_.shape for i in range(cntOfRow): for j in range(cntOfCol): coef = self.coef_[i][j] if coef != 0: idx = [j] coef1 = self.l2.coef_[i][j] for k in range(cntOfCol): coef2 = self.l2.coef_[i][k] if abs(coef1-coef2) < self.threshold and j != k and self.coef_[i][k] == 0: idx.append(k) mean = coef / len(idx) self.coef_[i][idx] = mean return self
|
1 2 3 4 5 6
| from sklearn.feature_selection import SelectFromModel
iris_sfm2=SelectFromModel(LR(threshold=0.5, C=0.1)).fit_transform(iris.data, iris.target) print(iris_sfm2[:5])
|
[[ 5.1 3.5 1.4 0.2]
[ 4.9 3. 1.4 0.2]
[ 4.7 3.2 1.3 0.2]
[ 4.6 3.1 1.5 0.2]
[ 5. 3.6 1.4 0.2]]
3.3.2 基于树模型的特征选择法
1 2 3 4 5 6
| from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import GradientBoostingClassifier
iris_sfm3=SelectFromModel(GradientBoostingClassifier()).fit_transform(iris.data, iris.target) print(iris_sfm3[:5])
|
[[ 1.4 0.2]
[ 1.4 0.2]
[ 1.3 0.2]
[ 1.5 0.2]
[ 1.4 0.2]]
4 降维
4.1 主成分分析法(PCA)
1 2 3 4 5 6
| from sklearn.decomposition import PCA
iris_pca=PCA(n_components=2).fit_transform(iris.data) print(iris_pca[:5])
|
[[-2.68420713 0.32660731]
[-2.71539062 -0.16955685]
[-2.88981954 -0.13734561]
[-2.7464372 -0.31112432]
[-2.72859298 0.33392456]]
4.2 线性判别分析法(LDA)
1 2 3 4 5
| from sklearn.lda import LDA
LDA(n_components=2).fit_transform(iris.data, iris.target)
|
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-56-21fd5d727aec> in <module>()
----> 1 from sklearn.lda import LDA
2
3 #线性判别分析法,返回降维后的数据
4 #参数n_components为降维后的维数
5 LDA(n_components=2).fit_transform(iris.data, iris.target)
ImportError: No module named 'sklearn.lda'
参考文章
1.使用sklearn做单机特征工程
2.利用scikit-learn进行FeatureSelection