《sklearn机器学习——管道和复合估计器》联合特征(FeatureUnion)
超详细解说 sklearn 中的联合特征(FeatureUnion)
1. FeatureUnion 简介
FeatureUnion
是 scikit-learn 中的一个工具,用于并行地组合多个特征提取器的输出。它允许你将不同的特征提取方法(如文本向量化、数值特征缩放、自定义特征工程等)的结果**横向拼接(concatenate)**成一个更大的特征矩阵。
核心思想:
- 并行处理:每个特征提取器独立处理原始数据。
- 特征拼接:将所有提取器的输出在特征维度(列方向)上拼接。
- 统一接口:对外提供与单个转换器相同的
fit
、transform
、fit_transform
接口。
适用场景:
- 同时使用多种特征提取方法(如 TF-IDF + 词袋模型 + 自定义统计特征)。
- 处理异构数据(如文本 + 数值 + 分类特征)。
- 构建复杂特征工程流水线。
2. FeatureUnion 基本语法
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA# 创建多个特征提取器
tfidf = TfidfVectorizer()
bow = CountVectorizer()
scaler = StandardScaler()# 使用 FeatureUnion 组合
combined_features = FeatureUnion([('tfidf', tfidf),('bow', bow),# ('scaler', scaler) # 注意:scaler 通常用于数值特征,不能直接用于文本
])# 使用方法
X_transformed = combined_features.fit_transform(X)
!注意:所有特征提取器必须能处理相同的输入数据格式(如都是文本或都是数值数组)。
3.完整代码示例
示例1:文本特征组合(TF-IDF+词袋模型)
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report# 加载数据
categories = ['alt.atheism', 'soc.religion.christian']
newsgroups = fetch_20newsgroups(subset='all', categories=categories, remove=('headers', 'footers', 'quotes'))
X, y = newsgroups.data, newsgroups.target# 划分训练测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)# 创建 FeatureUnion
feature_union = FeatureUnion([('tfidf', TfidfVectorizer(max_features=1000, stop_words='english')),('bow', CountVectorizer(max_features=500, ngram_range=(1, 2), stop_words='english'))
])# 训练特征提取器
X_train_features = feature_union.fit_transform(X_train)
X_test_features = feature_union.transform(X_test)print(f"训练集特征维度: {X_train_features.shape}") # (样本数, 1500) = 1000(TF-IDF) + 500(BOW)
print(f"测试集特征维度: {X_test_features.shape}")# 训练分类器
clf = LogisticRegression(random_state=42)
clf.fit(X_train_features, y_train)# 预测
y_pred = clf.predict(X_test_features)
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=newsgroups.target_names))
示例2:数值特征组合(PCA+原始特征)
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import numpy as np# 加载数据
iris = load_iris()
X, y = iris.data, iris.target# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)# 创建 FeatureUnion
feature_union = FeatureUnion([('pca', PCA(n_components=2)), # 降维到2个主成分('scaler', StandardScaler()) # 标准化原始特征
])# 训练并转换
X_train_combined = feature_union.fit_transform(X_train)
X_test_combined = feature_union.transform(X_test)print(f"原始特征维度: {X_train.shape[1]}") # 4
print(f"组合后特征维度: {X_train_combined.shape[1]}") # 6 = 2(PCA) + 4(标准化)# 训练分类器
clf = RandomForestClassifier(n_estimators=100, random_state=42)
clf.fit(X_train_combined, y_train)# 评估
accuracy = clf.score(X_test_combined, y_test)
print(f"\n测试集准确率: {accuracy:.4f}")
示例3:自定义特征提取器+内置提取器
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import reclass TextStatsExtractor(BaseEstimator, TransformerMixin):"""自定义文本统计特征提取器"""def fit(self, X, y=None):return selfdef transform(self, X):# 提取文本长度、单词数、大写字母比例等统计特征features = []for text in X:length = len(text)word_count = len(text.split())uppercase_ratio = sum(1 for c in text if c.isupper()) / (len(text) + 1e-8)exclamation_count = text.count('!')question_count = text.count('?')features.append([length, word_count, uppercase_ratio, exclamation_count, question_count])return np.array(features)# 示例数据
texts = ["This is a GREAT product!!!","I hate this item... it's terrible.","Average quality, nothing special.","AMAZING!!! Best purchase ever!!!","Not bad, but could be better?"
]# 创建 FeatureUnion
feature_union = FeatureUnion([('tfidf', TfidfVectorizer(max_features=50, stop_words='english')),('stats', TextStatsExtractor())
])# 转换数据
X_combined = feature_union.fit_transform(texts)print("特征名称:")
feature_names = []
# TF-IDF 特征名
tfidf_names = feature_union.transformer_list[0][1].get_feature_names_out()
feature_names.extend([f"tfidf_{name}" for name in tfidf_names])
# 统计特征名
stat_names = ['length', 'word_count', 'uppercase_ratio', 'exclamation_count', 'question_count']
feature_names.extend(stat_names)print(f"总特征数: {X_combined.shape[1]}")
print("前5个特征名:", feature_names[:5])
print("后5个特征名:", feature_names[-5:])print("\n转换后的特征矩阵:")
print(X_combined.toarray())
4.高级用法
4.1设置权重(transformer_weights)
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer# 为不同特征提取器设置权重
feature_union = FeatureUnion([('tfidf', TfidfVectorizer(max_features=100)),('bow', CountVectorizer(max_features=100))
], transformer_weights={'tfidf': 1.0,'bow': 0.5 # 词袋模型的特征乘以0.5
})# 权重会在 transform 后应用
X_weighted = feature_union.fit_transform(texts)
4.2与Pipeline结合使用
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.datasets import fetch_20newsgroups# 创建复杂的流水线
pipeline = Pipeline([('features', FeatureUnion([('tfidf', TfidfVectorizer(max_features=1000)),('stats', TextStatsExtractor()) # 假设已定义])),('scaler', StandardScaler(with_mean=False)), # 稀疏矩阵不能用 with_mean=True('classifier', SVC(kernel='linear'))
])# 使用流水线
newsgroups = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'soc.religion.christian'])
pipeline.fit(newsgroups.data, newsgroups.target)
4.3使用make_union快捷方式
from sklearn.pipeline import make_union
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest# 简化语法
union = make_union(PCA(n_components=2),SelectKBest(k=3),n_jobs=1 # 并行处理的作业数
)
5.重要注意事项
5.1输入数据一致性
# × 错误:混合不同类型的数据处理
feature_union = FeatureUnion([('tfidf', TfidfVectorizer()), #处理文本('scaler', StandardScaler()) #处理数值-会报错!
])
5.2稀疏矩阵处理
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
# TF-IDF 产生稀疏矩阵
tfidf = TfidfVectorizer()
X_sparse = tfidf.fit_transform(texts)
# StandardScaler 不能直接处理稀疏矩阵
# scalar = StandardScaler() # 会报错!
# 解决方案:使用 with_mean=False
scaler=StandardScaler(with_mean=False) # 可以处理稀疏矩阵
5.3 并行处理 (n_jobs)
feature_union = FeatureUnion([('tfidf1', TfidfVectorizer(max_features=1000)),('tfidf2', TfidfVectorizer(max_features=1000, ngram_range=(2,2)))
], n_jobs=-1) # 使用所有CPU核心并行处理
6.调试与检查
6.1查看各组件输出维度
feature_union = FeatureUnion([('tfidf', TfidfVectorizer(max_features=500)),('bow', CountVectorizer(max_features=300))
])X_combined = feature_union.fit_transform(texts)# 检查每个组件的输出
for name, transformer in feature_union.transformer_list:X_part = transformer.transform(texts)print(f"{name}: {X_part.shape}")print(f"Combined: {X_combined.shape}")
6.2 获取特征名称
def get_feature_names(feature_union):"""获取 FeatureUnion 的所有特征名称"""feature_names = []for name, transformer in feature_union.transformer_list:if hasattr(transformer, 'get_feature_names_out'):names = transformer.get_feature_names_out()elif hasattr(transformer, 'get_feature_names'):names = transformer.get_feature_names()else:# 对于自定义转换器,可能需要手动定义n_features = transformer.transform([texts[0]]).shape[1]names = [f"{name}_feature_{i}" for i in range(n_features)]feature_names.extend([f"{name}__{n}" for n in names])return feature_names# 使用示例
feature_names = get_feature_names(feature_union)
print(f"总特征数: {len(feature_names)}")
print("前10个特征名:", feature_names[:10])
7. 替代方案:ColumnTransformer
对于异构数据(不同列不同类型),推荐使用 ColumnTransformer:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd# 创建示例数据框
df = pd.DataFrame({'text': ['good product', 'bad service', 'excellent quality'],'price': [100, 200, 150],'category': ['A', 'B', 'A']
})# 使用 ColumnTransformer 处理不同列
preprocessor = ColumnTransformer([('text', TfidfVectorizer(), 'text'),('num', StandardScaler(), ['price']),('cat', OneHotEncoder(), ['category'])
])X_transformed = preprocessor.fit_transform(df)
print(f"转换后形状: {X_transformed.shape}")
8. 总结
FeatureUnion 的优势:
- ✅ 简化多特征提取器的组合
- ✅ 提供统一的接口
- ✅ 支持并行处理
- ✅ 可与 Pipeline 无缝集成
使用建议:
- 当需要组合同类型数据的多种特征提取方法时使用
- 对于异构数据,优先考虑 ColumnTransformer
- 注意稀疏矩阵的处理
- 合理设置特征权重
- 使用 make_union 简化代码
最佳实践:
# 推荐的完整流程
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import cross_val_score# 1. 定义特征组合
features = FeatureUnion([('tfidf', TfidfVectorizer(max_features=1000)),('custom', CustomFeatureExtractor())
])# 2. 创建完整流水线
pipeline = Pipeline([('features', features),('classifier', LogisticRegression())
])# 3. 交叉验证评估
scores = cross_val_score(pipeline, X, y, cv=5)
print(f"平均准确率: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
通过合理使用 FeatureUnion
,你可以构建强大的特征工程系统,显著提升机器学习模型的性能!