特徵工程
對連續值處理
0.binarizer/二值化
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.ml.feature import Binarizer
spark = SparkSession\
.builder\
.appName("BinarizerExample")\
.getOrCreate()
# 建立DataFrame
continuousDataFrame = spark.createDataFrame([
(0, 1.1),
(1, 8.5),
(2, 5.2)
], ["id", "feature"])
# 給定閾值,對連續特徵列進行二值化。
binarizer = Binarizer(threshold=5.1, inputCol="feature", outputCol="binarized_feature")
binarizedDataFrame = binarizer.transform(continuousDataFrame)
print("Binarizer output with Threshold = %f" % binarizer.getThreshold())
binarizedDataFrame.show()
spark.stop()
Binarizer output with Threshold = 5.100000
+---+-------+-----------------+
| id|feature|binarized_feature|
+---+-------+-----------------+
| 0| 1.1| 0.0|
| 1| 8.5| 1.0|
| 2| 5.2| 1.0|
+---+-------+-----------------+
1.按照給定邊界離散化
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.ml.feature import Bucketizer
spark = SparkSession\
.builder\
.appName("BucketizerExample")\
.getOrCreate()
splits = [-float("inf"), -0.5, 0.0, 0.5, float("inf")]
data = [(-999.9,), (-0.5,), (-0.3,), (0.0,), (0.2,), (999.9,)]
dataFrame = spark.createDataFrame(data, ["features"])
# 將連續特性列映射到特徵桶。
bucketizer = Bucketizer(splits=splits, inputCol="features", outputCol="bucketedFeatures")
# 按照給定的邊界進行分桶
bucketedData = bucketizer.transform(dataFrame)
print("Bucketizer output with %d buckets" % (len(bucketizer.getSplits())-1))
bucketedData.show()
spark.stop()
Bucketizer output with 4 buckets
+--------+----------------+
|features|bucketedFeatures|
+--------+----------------+
| -999.9| 0.0|
| -0.5| 1.0|
| -0.3| 1.0|
| 0.0| 2.0|
| 0.2| 2.0|
| 999.9| 3.0|
+--------+----------------+
2.quantile_discretizer/按分位數離散化
from __future__ import print_function
from pyspark.ml.feature import QuantileDiscretizer
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("QuantileDiscretizerExample")\
.getOrCreate()
data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2), (5, 9.2), (6, 14.4)]
df = spark.createDataFrame(data, ["id", "hour"])
df = df.repartition(1)
# 分紅3個桶進行離散化
discretizer = QuantileDiscretizer(numBuckets=3, inputCol="hour", outputCol="result")
result = discretizer.fit(df).transform(df)
result.show()
spark.stop()
+---+----+------+
| id|hour|result|
+---+----+------+
| 0|18.0| 2.0|
| 1|19.0| 2.0|
| 2| 8.0| 1.0|
| 3| 5.0| 0.0|
| 4| 2.2| 0.0|
| 5| 9.2| 1.0|
| 6|14.4| 2.0|
+---+----+------+
3.最大最小值幅度縮放
from __future__ import print_function
from pyspark.ml.feature import MaxAbsScaler
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("MaxAbsScalerExample")\
.getOrCreate()
dataFrame = spark.createDataFrame([
(0, Vectors.dense([1.0, 0.1, -8.0]),),
(1, Vectors.dense([2.0, 1.0, -4.0]),),
(2, Vectors.dense([4.0, 10.0, 8.0]),)
], ["id", "features"])
# 經過除以每一個特徵的最大絕對值,將每一個特徵單獨縮放到範圍[- 1,1]。
scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")
# 計算最大最小值用於縮放
scalerModel = scaler.fit(dataFrame)
# 縮放幅度到[-1, 1]之間
scaledData = scalerModel.transform(dataFrame)
scaledData.select("features", "scaledFeatures").show()
spark.stop()
+--------------+----------------+
| features| scaledFeatures|
+--------------+----------------+
|[1.0,0.1,-8.0]|[0.25,0.01,-1.0]|
|[2.0,1.0,-4.0]| [0.5,0.1,-0.5]|
|[4.0,10.0,8.0]| [1.0,1.0,1.0]|
+--------------+----------------+
4.標準化
from __future__ import print_function
from pyspark.ml.feature import StandardScaler
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("StandardScalerExample")\
.getOrCreate()
dataFrame = spark.read.format("libsvm").load("sample_libsvm_data.txt")
# 使用訓練集中樣本的列彙總統計信息,經過刪除均值並縮放到單位方差來標準化特徵。
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures", withStd=True, withMean=False)
# 計算均值方差等參數
scalerModel = scaler.fit(dataFrame)
# 標準化
scaledData = scalerModel.transform(dataFrame)
scaledData.show()
spark.stop()
+-----+--------------------+--------------------+
|label| features| scaledFeatures|
+-----+--------------------+--------------------+
| 0.0|(692,[127,128,129...|(692,[127,128,129...|
| 1.0|(692,[158,159,160...|(692,[158,159,160...|
| 1.0|(692,[124,125,126...|(692,[124,125,126...|
| 1.0|(692,[152,153,154...|(692,[152,153,154...|
| 1.0|(692,[151,152,153...|(692,[151,152,153...|
| 0.0|(692,[129,130,131...|(692,[129,130,131...|
| 1.0|(692,[158,159,160...|(692,[158,159,160...|
| 1.0|(692,[99,100,101,...|(692,[99,100,101,...|
| 0.0|(692,[154,155,156...|(692,[154,155,156...|
| 0.0|(692,[127,128,129...|(692,[127,128,129...|
| 1.0|(692,[154,155,156...|(692,[154,155,156...|
| 0.0|(692,[153,154,155...|(692,[153,154,155...|
| 0.0|(692,[151,152,153...|(692,[151,152,153...|
| 1.0|(692,[129,130,131...|(692,[129,130,131...|
| 0.0|(692,[154,155,156...|(692,[154,155,156...|
| 1.0|(692,[150,151,152...|(692,[150,151,152...|
| 0.0|(692,[124,125,126...|(692,[124,125,126...|
| 0.0|(692,[152,153,154...|(692,[152,153,154...|
| 1.0|(692,[97,98,99,12...|(692,[97,98,99,12...|
| 1.0|(692,[124,125,126...|(692,[124,125,126...|
+-----+--------------------+--------------------+
only showing top 20 rows
from __future__ import print_function
from pyspark.ml.feature import StandardScaler
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("StandardScalerExample")\
.getOrCreate()
dataFrame = spark.createDataFrame([
(0, Vectors.dense([1.0, 0.1, -8.0]),),
(1, Vectors.dense([2.0, 1.0, -4.0]),),
(2, Vectors.dense([4.0, 10.0, 8.0]),)
], ["id", "features"])
# 計算均值方差等參數
scalerModel = scaler.fit(dataFrame)
# 標準化
scaledData = scalerModel.transform(dataFrame)
scaledData.show()
spark.stop()
+---+--------------+--------------------+
| id| features| scaledFeatures|
+---+--------------+--------------------+
| 0|[1.0,0.1,-8.0]|[0.65465367070797...|
| 1|[2.0,1.0,-4.0]|[1.30930734141595...|
| 2|[4.0,10.0,8.0]|[2.61861468283190...|
+---+--------------+--------------------+
5.添加多項式特徵
from __future__ import print_function
from pyspark.ml.feature import PolynomialExpansion
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("PolynomialExpansionExample")\
.getOrCreate()
df = spark.createDataFrame([
(Vectors.dense([2.0, 1.0]),),
(Vectors.dense([0.0, 0.0]),),
(Vectors.dense([3.0, -1.0]),)
], ["features"])
# 在多項式空間中進行特徵展開。
polyExpansion = PolynomialExpansion(degree=3, inputCol="features", outputCol="polyFeatures")
polyDF = polyExpansion.transform(df)
polyDF.show(truncate=False)
spark.stop()
+----------+------------------------------------------+
|features |polyFeatures |
+----------+------------------------------------------+
|[2.0,1.0] |[2.0,4.0,8.0,1.0,2.0,4.0,1.0,2.0,1.0] |
|[0.0,0.0] |[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0] |
|[3.0,-1.0]|[3.0,9.0,27.0,-1.0,-3.0,-9.0,1.0,3.0,-1.0]|
+----------+------------------------------------------+
對離散型處理
0.獨熱向量編碼
from __future__ import print_function
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("OneHotEncoderExample")\
.getOrCreate()
df = spark.createDataFrame([
(0, "a"),
(1, "b"),
(2, "c"),
(3, "a"),
(4, "a"),
(5, "c")
], ["id", "category"])
# 將分類值轉換爲類別索引
stringIndexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = stringIndexer.fit(df)
indexed = model.transform(df)
# 獨熱向量編碼
encoder = OneHotEncoder(inputCol="categoryIndex", outputCol="categoryVec")
encoded = encoder.transform(indexed)
encoded.show()
spark.stop()
+---+--------+-------------+-------------+
| id|category|categoryIndex| categoryVec|
+---+--------+-------------+-------------+
| 0| a| 0.0|(2,[0],[1.0])|
| 1| b| 2.0| (2,[],[])|
| 2| c| 1.0|(2,[1],[1.0])|
| 3| a| 0.0|(2,[0],[1.0])|
| 4| a| 0.0|(2,[0],[1.0])|
| 5| c| 1.0|(2,[1],[1.0])|
+---+--------+-------------+-------------+
對文本型處理
0.去停用詞
from __future__ import print_function
from pyspark.ml.feature import StopWordsRemover
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("StopWordsRemoverExample")\
.getOrCreate()
sentenceData = spark.createDataFrame([
(0, ["I", "saw", "the", "red", "balloon"]),
(1, ["Mary", "had", "a", "little", "lamb"])
], ["id", "raw"])
# 去停用詞
remover = StopWordsRemover(inputCol="raw", outputCol="filtered")
remover.transform(sentenceData).show(truncate=False)
spark.stop()
+---+----------------------------+--------------------+
|id |raw |filtered |
+---+----------------------------+--------------------+
|0 |[I, saw, the, red, balloon] |[saw, red, balloon] |
|1 |[Mary, had, a, little, lamb]|[Mary, little, lamb]|
+---+----------------------------+--------------------+
1.Tokenizer 分詞器
from __future__ import print_function
from pyspark.ml.feature import Tokenizer, RegexTokenizer
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("TokenizerExample")\
.getOrCreate()
sentenceDataFrame = spark.createDataFrame([
(0, "Hi I heard about Spark"),
(1, "I wish Java could use case classes"),
(2, "Logistic,regression,models,are,neat")
], ["id", "sentence"])
# 分詞
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
# 經過使用提供的正則表達式模式分割文本
regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
countTokens = udf(lambda words: len(words), IntegerType())
tokenized = tokenizer.transform(sentenceDataFrame)
tokenized.select("sentence", "words").withColumn("tokens", countTokens(col("words"))).show(truncate=False)
regexTokenized = regexTokenizer.transform(sentenceDataFrame)
regexTokenized.select("sentence", "words").withColumn("tokens", countTokens(col("words"))).show(truncate=False)
spark.stop()
+-----------------------------------+------------------------------------------+------+
|sentence |words |tokens|
+-----------------------------------+------------------------------------------+------+
|Hi I heard about Spark |[hi, i, heard, about, spark] |5 |
|I wish Java could use case classes |[i, wish, java, could, use, case, classes]|7 |
|Logistic,regression,models,are,neat|[logistic,regression,models,are,neat] |1 |
+-----------------------------------+------------------------------------------+------+
+-----------------------------------+------------------------------------------+------+
|sentence |words |tokens|
+-----------------------------------+------------------------------------------+------+
|Hi I heard about Spark |[hi, i, heard, about, spark] |5 |
|I wish Java could use case classes |[i, wish, java, could, use, case, classes]|7 |
|Logistic,regression,models,are,neat|[logistic, regression, models, are, neat] |5 |
+-----------------------------------+------------------------------------------+------+
2.count_vectorizer
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.ml.feature import CountVectorizer
spark = SparkSession\
.builder\
.appName("CountVectorizerExample")\
.getOrCreate()
df = spark.createDataFrame([
(0, "a b c".split(" ")),
(1, "a b b c a".split(" "))
], ["id", "words"])
# 從文檔集合中提取詞彙表並生成CountVectorizerModel
cv = CountVectorizer(inputCol="words", outputCol="features", vocabSize=3, minDF=2.0)
model = cv.fit(df)
result = model.transform(df)
result.show(truncate=False)
spark.stop()
+---+---------------+-------------------------+
|id |words |features |
+---+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+---+---------------+-------------------------+
3.TF-IDF權重
from __future__ import print_function
from pyspark.ml.feature import HashingTF, IDF, Tokenizer
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("TfIdfExample")\
.getOrCreate()
sentenceData = spark.createDataFrame([
(0.0, "Hi I heard about Spark"),
(0.0, "I wish Java could use case classes"),
(1.0, "Logistic regression models are neat")
], ["label", "sentence"])
# 分詞
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
wordsData = tokenizer.transform(sentenceData)
# 使用哈希技巧將一系列項映射到它們的項頻率。
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
# 計算給定文檔集合的逆文檔頻率(IDF)。
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)
rescaledData.select("label", "features").show()
spark.stop()
+-----+--------------------+
|label| features|
+-----+--------------------+
| 0.0|(20,[0,5,9,17],[0...|
| 0.0|(20,[2,7,9,13,15]...|
| 1.0|(20,[4,6,13,15,18...|
+-----+--------------------+
4.n-gram語言模型
from __future__ import print_function
from pyspark.ml.feature import NGram
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("NGramExample")\
.getOrCreate()
#Hanmeimei loves LiLei
#LiLei loves Hanmeimei
wordDataFrame = spark.createDataFrame([
(0, ["Hi", "I", "heard", "about", "Spark"]),
(1, ["I", "wish", "Java", "could", "use", "case", "classes"]),
(2, ["Logistic", "regression", "models", "are", "neat"])
], ["id", "words"])
# 將字符串輸入數組轉換爲n-grams數組的特徵轉換器。
ngram = NGram(n=2, inputCol="words", outputCol="ngrams")
ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.select("ngrams").show(truncate=False)
spark.stop()
+------------------------------------------------------------------+
|ngrams |
+------------------------------------------------------------------+
|[Hi I, I heard, heard about, about Spark] |
|[I wish, wish Java, Java could, could use, use case, case classes]|
|[Logistic regression, regression models, models are, are neat] |
+------------------------------------------------------------------+
高級變換
0.SQL變換
from __future__ import print_function
from pyspark.ml.feature import SQLTransformer
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("SQLTransformerExample")\
.getOrCreate()
df = spark.createDataFrame([
(0, 1.0, 3.0),
(2, 2.0, 5.0)
], ["id", "v1", "v2"])
# 實現由SQL語句定義的轉換。
sqlTrans = SQLTransformer(statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
sqlTrans.transform(df).show()
spark.stop()
+---+---+---+---+----+
| id| v1| v2| v3| v4|
+---+---+---+---+----+
| 0|1.0|3.0|4.0| 3.0|
| 2|2.0|5.0|7.0|10.0|
+---+---+---+---+----+
1.R公式變換
from __future__ import print_function
from pyspark.ml.feature import RFormula
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("RFormulaExample")\
.getOrCreate()
dataset = spark.createDataFrame(
[(7, "US", 18, 1.0),
(8, "CA", 12, 0.0),
(9, "NZ", 15, 0.0)],
["id", "country", "hour", "clicked"])
# 實現根據R模型公式擬合數據集所需的轉換。
formula = RFormula(
formula="clicked ~ country + hour",
featuresCol="features",
labelCol="label")
output = formula.fit(dataset).transform(dataset)
output.select("features", "label").show()
spark.stop()
+--------------+-----+
| features|label|
+--------------+-----+
|[0.0,0.0,18.0]| 1.0|
|[0.0,1.0,12.0]| 0.0|
|[1.0,0.0,15.0]| 0.0|
+--------------+-----+