Python深體驗,常見的數據處理方式(必需要懂的)

1.缺失值處理 - 拉格朗日插值法python

input_file數據文件內容(存在部分缺失值):數據庫

from scipy.interpolate import lagrange
import pandas as pd
import numpy as np


input_file = './data/catering_sale.xls'
output_file = './data/sales.xls'

data = pd.read_excel(input_file)
data['銷量'][(data['銷量'] < 400) | (data['銷量'] > 5000)] = None  # 銷量小於400及大於5000的視爲異常值,置爲None
# 自定義列向量插值函數
# 問題:當n<k時,list(range(n-k, n))會出現負數,致使y的值出現空值,會影響最終的插值結果,這個問題還未解決。。。
def ployinterp_column(s, n, k=5):
    # s爲列向量,n爲被插值的位置,k爲取先後的數據個數,默認爲5
    y = s[list(range(n-k, n)) + list(range(n+1, n+k+1))]
    y = y[y.notnull()]  # 剔除空值
    if n-k < 0: # 若是NaN值在前5位,則插值結果取k-n位
        return lagrange(y.index, list(y))(k-n)
    else:
        return lagrange(y.index, list(y))(n)    # 插值並返回插值結果

# 逐個元素判斷是否須要插值
for j in range(len(data)):
    if (data['銷量'].isnull())[j]:   # 若是元素爲空,則進行插值
        data['銷量'][j] = ployinterp_column(data['銷量'], j)

data.to_excel(output_file)

 

output_file結果:數組

 

 

# np.where()
a = pd.Series([np.nan, 2.5, np.nan, 3.5, 4.5, np.nan], index=['f', 'e', 'd', 'c', 'b', 'a'])
b = pd.Series(np.arange(len(a), dtype=np.float64), index=['f', 'e', 'd', 'c', 'b', 'a'])

# 若是a有缺失值,則用相應位置的b填充,不然使用a的原有元素
print(np.where(pd.isnull(a), b, a))

# result
[ 0.   2.5  2.   3.5  4.5  5. ]

 

# df.combine_first()
df1 = pd.DataFrame({'a': [1., np.nan, 5., np.nan],
                 'b': [np.nan, 2., np.nan, 6.],
                 'c': range(2, 18, 4)})
df2 = pd.DataFrame({'a': [5., 4., np.nan, 3., 7.],
                 'b': [np.nan, 3., 4., 6., 8.]})

# 將df1中的缺失值用df2中相同位置的元素填充,若是沒有缺失值則保持df1的原有元素
df1.combine_first(df2)

# result
     a    b     c
0  1.0  NaN   2.0
1  4.0  2.0   6.0
2  5.0  4.0  10.0
3  3.0  6.0  14.0
4  7.0  8.0   NaN

 

# 異常值處理
data = pd.DataFrame(np.random.randn(1000, 4))
print(data.describe())
# result
                 0            1            2            3
count  1000.000000  1000.000000  1000.000000  1000.000000
mean     -0.012809     0.007609    -0.002442     0.027889
std       1.026971     0.985884     0.999810     1.006344
min      -3.174895    -2.970125    -3.011063    -3.440525
25%      -0.723649    -0.657574    -0.642299    -0.647432
50%      -0.019972     0.021018    -0.015020     0.012603
75%       0.707184     0.678987     0.674781     0.707672
max       3.076159     3.890196     2.869127     3.089114


col = data[3]
# 大於3的值爲異常值
col[np.abs(col) > 3]
data[(np.abs(data) > 3).any(1)] # any(1)
# np.sign()函數,大於0爲1,小於0爲-1
data[np.abs(data) > 3] = np.sign(data) * 3
print(data.describe())
# result
                 0            1            2            3
count  1000.000000  1000.000000  1000.000000  1000.000000
mean     -0.012763     0.006719    -0.002428     0.028545
std       1.026062     0.982772     0.999768     1.003687
min      -3.000000    -2.970125    -3.000000    -3.000000
25%      -0.723649    -0.657574    -0.642299    -0.647432
50%      -0.019972     0.021018    -0.015020     0.012603
75%       0.707184     0.678987     0.674781     0.707672
max       3.000000     3.000000     2.869127     3.000000

 

 2.數據合併:dom

 

# pd.merge()
# 使用列或者索引,以相似數據庫鏈接的方式合併多個DataFrame對象
df1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'a', 'b'], 'data1': range(7)})
df2 =  pd.DataFrame({'key': ['a', 'b', 'd'], 'data2': range(3)})

print(pd.merge(df1, df2))  # 自動匹配合並列, 默認內鏈接
print(pd.merge(df1, df2, on='key'))    # 顯式指定

# result

 data1 key data2
0 0 b 1
1 1 b 1
2 6 b 1
3 2 a 0
4 4 a 0
5 5 a 0ide

df3 = pd.DataFrame({'lkey': ['b', 'b', 'a', 'c', 'a', 'a', 'b'], 'data1': range(7)})
df4 = pd.DataFrame({'rkey': ['a', 'b', 'd'], 'data2': range(3)})
print(pd.merge(df3, df4, left_on='lkey', right_on='rkey'))    # 當不存在相同column時,須要分別指定鏈接列名
# result

data1 lkey data2 rkey
0 0 b 1 b
1 1 b 1 b
2 6 b 1 b
3 2 a 0 a
4 4 a 0 a
5 5 a 0 a函數

 

## 指定鏈接方式
# 外鏈接
print(pd.merge(df1, df2, how='outer'))

# result

   data1 key  data2
0    0.0   b    1.0
1    1.0   b    1.0
2    6.0   b    1.0
3    2.0   a    0.0
4    4.0   a    0.0
5    5.0   a    0.0
6    3.0   c    NaN
7    NaN   d    2.0

 

# 左鏈接
df1 = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'], 'data1': range(6)})
df2 = pd.DataFrame({'key': ['a', 'b', 'a', 'b', 'd'] ,'data2': range(5)})

print(pd.merge(df1, df2, how='left'))

# result

    data1 key  data2
0       0   b    1.0
1       0   b    3.0
2       1   b    1.0
3       1   b    3.0
4       2   a    0.0
5       2   a    2.0
6       3   c    NaN
7       4   a    0.0
8       4   a    2.0
9       5   b    1.0
10      5   b    3.0

 

# 多列鏈接
left = pd.DataFrame({'key1': ['foo', 'foo', 'bar'],
                  'key2': ['one', 'two', 'one'],
                  'lval': [1, 2, 3]})
right = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'],
                   'key2': ['one', 'one', 'one', 'two'],
                   'rval': [4, 5, 6, 7]})

print(pd.merge(left, right, on=['key1', 'key2']))  # 默認內鏈接

# result
  key1 key2  lval  rval
0  foo  one     1     4
1  foo  one     1     5
2  bar  one     3     6


print(pd.merge(left, right, on=['key1', 'key2'], how='outer')) # 外鏈接

# result
  key1 key2  lval  rval
0  foo  one   1.0   4.0
1  foo  one   1.0   5.0
2  foo  two   2.0   NaN
3  bar  one   3.0   6.0
4  bar  two   NaN   7.0

 

# 只以其中一個列鏈接,會出現冗餘列
pd.merge(left, right, on='key1')

# result
  key1 key2_x  lval key2_y  rval
0  foo    one     1    one     4
1  foo    one     1    one     5
2  foo    two     2    one     4
3  foo    two     2    one     5
4  bar    one     3    one     6
5  bar    one     3    two     7


print(pd.merge(left, right, on='key1', suffixes=('_left', '_right')))  # 給冗餘列增長後綴

# result
  key1 key2_left  lval key2_right  rval
0  foo       one     1        one     4
1  foo       one     1        one     5
2  foo       two     2        one     4
3  foo       two     2        one     5
4  bar       one     3        one     6
5  bar       one     3        two     7

 

# 使用索引與列進行合併
left1 = pd.DataFrame({'key': ['a', 'b', 'a', 'a', 'b', 'c'],'value': range(6)})
right1 = pd.DataFrame({'group_val': [3.5, 7]}, index=['a', 'b'])

print(pd.merge(left1, right1, left_on='key', right_index=True))    # left1使用key列鏈接,right1使用index列鏈接

# result
  key  value  group_val
0   a      0        3.5
2   a      2        3.5
3   a      3        3.5
1   b      1        7.0
4   b      4        7.0

 

# 多列索引鏈接
lefth = pd.DataFrame({'key1': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
                   'key2': [2000, 2001, 2002, 2001, 2002],
                   'data': np.arange(5.)})
righth = pd.DataFrame(np.arange(12).reshape((6, 2)),
                   index=[['Nevada', 'Nevada', 'Ohio', 'Ohio', 'Ohio', 'Ohio'],
                          [2001, 2000, 2000, 2000, 2001, 2002]],
                   columns=['event1', 'event2'])

print(pd.merge(lefth, righth, left_on=['key1', 'key2'], right_index=True))

# result

data key1 key2 event1 event2
0 0.0 Ohio 2000 4 5
0 0.0 Ohio 2000 6 7
1 1.0 Ohio 2001 8 9
2 2.0 Ohio 2002 10 11
3 3.0 Nevada 2001 0 13d

 

# pd.join()
# pd.join()可使用index或key合併兩個及以上的DataFrame(列方向上的合併)

left2 = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], index=['a', 'c', 'e'],
                 columns=['Ohio', 'Nevada'])
right2 = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [13, 14]],
                   index=['b', 'c', 'd', 'e'], columns=['Missouri', 'Alabama'])

print(left2.join(right2, how='outer'))

# result

   Ohio  Nevada  Missouri  Alabama
a   1.0     2.0       NaN      NaN
b   NaN     NaN       7.0      8.0
c   3.0     4.0       9.0     10.0
d   NaN     NaN      11.0     12.0
e   5.0     6.0      13.0     14.0

 

# 合併多個DataFrame
another = pd.DataFrame([[7., 8.], [9., 10.], [11., 12.], [16., 17.]],
                    index=['a', 'c', 'e', 'f'], columns=['New York', 'Oregon'])

left2.join([right2, another], how='outer')

# result
   Ohio  Nevada  Missouri  Alabama  New York  Oregon
a   1.0     2.0       NaN      NaN       7.0     8.0
b   NaN     NaN       7.0      8.0       NaN     NaN
c   3.0     4.0       9.0     10.0       9.0    10.0
d   NaN     NaN      11.0     12.0       NaN     NaN
e   5.0     6.0      13.0     14.0      11.0    12.0
f   NaN     NaN       NaN      NaN      16.0    17.0

 

# 軸向鏈接# np.concatenate()
arr = np.arange(12).reshape((3,4))
print(np.concatenate([arr, arr], axis=1))  # 在column方向上鍊接

# result

array([[ 0,  1,  2, ...,  1,  2,  3],
       [ 4,  5,  6, ...,  5,  6,  7],
       [ 8,  9, 10, ...,  9, 10, 11]])

 

# pd.concat()
s1 = pd.Series([0,1], index=['a', 'b'])
s2 = pd.Series([2, 3, 4], index=['c', 'd', 'e'])
s3 = pd.Series([5, 6], index=['f', 'g'])

print(pd.concat([s1, s2, s3]))    # axis參數默認爲0,row方向的
# result
a    0
b    1
c    2
d    3
e    4
f    5
g    6
dtype: int64

print(pd.concat([s1, s2, s3], axis=1)) # column方向合併,值若是不存在則記爲NaN
# result
     0    1    2
a  0.0  NaN  NaN
b  1.0  NaN  NaN
c  NaN  2.0  NaN
d  NaN  3.0  NaN
e  NaN  4.0  NaN
f  NaN  NaN  5.0
g  NaN  NaN  6.0


s4 = pd.concat([s1 * 5, s3])
s5 = pd.concat([s1, s4], axis=1)
s5.columns = ['s1', 's4']
print(s5)

# result
    s1  s4
a  0.0   0
b  1.0   5
f  NaN   5
g  NaN   6

print(pd.concat([s1, s4], axis=1, join='inner'))   # join參數指定鏈接方式
# result
   0  1
a  0  0
b  1  5

print(pd.concat([s1, s4], axis=1, join_axes=[['a', 'c', 'b', 'e']]))    # 手動指定要鏈接的index  
# result
     0    1
a  0.0  0.0
c  NaN  NaN
b  1.0  5.0
e  NaN  NaN

 

# 使用keys參數對索引進行分級
result = pd.concat([s1, s2, s3], keys=['one', 'two', 'three'])  # 在row方向合併時,keys對應每一個Series的一級index,每一個Series原有的index則做爲二級index

print(result)

# result
one    a    0
       b    1
two    c    2
       d    3
       e    4
three  f    5
       g    6
dtype: int64

 

# Series.unstack() 將Seris格式轉換爲DataFrame格式
print(result.unstack()) # 一級索引將做爲index,二級索引做爲columns

# result
         a    b    c    d    e    f    g
one    0.0  1.0  NaN  NaN  NaN  NaN  NaN
two    NaN  NaN  2.0  3.0  4.0  NaN  NaN
three  NaN  NaN  NaN  NaN  NaN  5.0  6.0

 

# 在列合併時使用keys參數指定column名稱
print(pd.concat([s1, s2, s3], axis=1, keys=['one', 'two', 'three']))   # 在column方向合併時,keys對應每一個合併的Series的column

# result
   one  two  three
a  0.0  NaN    NaN
b  1.0  NaN    NaN
c  NaN  2.0    NaN
d  NaN  3.0    NaN
e  NaN  4.0    NaN
f  NaN  NaN    5.0
g  NaN  NaN    6.0

 

# 指定分級column
df1 = pd.DataFrame(np.arange(6).reshape(3, 2), index=['a', 'b', 'c'], columns=['one', 'two'])
df2 = pd.DataFrame(5 + np.arange(4).reshape(2, 2), index=['a', 'c'], columns=['three', 'four'])

# 由於DataFrame對象已經有了column,因此keys參數會設置新的一級column, df原有的column則做爲二級column
df3 = pd.concat([df1, df2], axis=1, keys=['level1', 'level2'])
print(df3)
print(df3.columns)
# result
  level1     level2     
     one two  three four
a      0   1    5.0  6.0
b      2   3    NaN  NaN
c      4   5    7.0  8.0

MultiIndex(levels=[['level1', 'level2'], ['four', 'one', 'three', 'two']],
           labels=[[0, 0, 1, 1], [1, 3, 2, 0]])

# 使用字典實現相同的功能
print(pd.concat({'level1': df1, 'level2': df2}, axis=1))
#result
  level1     level2     
     one two  three four
a      0   1    5.0  6.0
b      2   3    NaN  NaN
c      4   5    7.0  8.0

# 指定分級column名稱
df = pd.concat([df1, df2], axis=1, keys=['level1', 'level2'], names=['levels', 'number'])
print(df)
print(df.columns)

# result
levels level1     level2     
number    one two  three four
a           0   1    5.0  6.0
b           2   3    NaN  NaN
c           4   5    7.0  8.0

MultiIndex(levels=[['level1', 'level2'], ['four', 'one', 'three', 'two']],
           labels=[[0, 0, 1, 1], [1, 3, 2, 0]],
           names=['levels', 'number'])

 

# ignore_index
df1 = pd.DataFrame(np.random.randn(3, 4), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.random.randn(2, 3), columns=['b', 'd', 'a'])

# row方向忽略索引
print(pd.concat([df1, df2], ignore_index=True))
# result
          a         b         c         d
0  1.261208  0.022188 -2.489475 -1.098245
1  0.618618 -1.179827  1.475738  0.334444
2 -0.319088 -0.153492  0.029245  0.336055
3 -0.999023 -0.502154       NaN  0.722256
4  1.428007 -0.726810       NaN  0.432440

# column方向忽略列名
print(pd.concat([df1, df2], axis=1, ignore_index=True))
# result
          0         1         2         3         4         5         6
0  1.261208  0.022188 -2.489475 -1.098245 -0.502154  0.722256 -0.999023
1  0.618618 -1.179827  1.475738  0.334444 -0.726810  0.432440  1.428007
2 -0.319088 -0.153492  0.029245  0.336055       NaN       NaN       NaN

3.重塑層次化索引excel

data = pd.DataFrame(np.arange(6).reshape((2, 3)),
                    index=pd.Index(['Ohio', 'Colorado'], name='state'),
                    columns=pd.Index(['one', 'two', 'three'], name='number'))

# 軸向旋轉
result = data.stack()
print(result)
# result
state     number
Ohio      one       0
          two       1
          three     2
Colorado  one       3
          two       4
          three     5

# 還原操做
print(result.unstack())
# result
number    one  two  three
state                    
Ohio        0    1      2
Colorado    3    4      5

# 行列轉置
print(result.unstack(0))
# result
state   Ohio  Colorado
number                
one        0         3
two        1         4
three      2         5

# 指定要轉置的索引名
print(result.unstack('number'))
# result
number    one  two  three
state                    
Ohio        0    1      2
Colorado    3    4      5

 

# 例1:
s1 = pd.Series([0, 1, 2, 3], index=['a', 'b', 'c', 'd'])
s2 =  pd.Series([4, 5, 6], index=['c', 'd', 'e'])
data2 = pd.concat([s1, s2], keys=['one', 'two'])

print(data2.unstack())
# result
       a    b    c    d    e
one  0.0  1.0  2.0  3.0  NaN
two  NaN  NaN  4.0  5.0  6.0

print(data2.unstack().stack())
# result
one  a    0.0
     b    1.0
     c    2.0
     d    3.0
two  c    4.0
     d    5.0
     e    6.0
dtype: float64

# 不dropnan值
print(data2.unstack().stack(dropna=False))
# result
one  a    0.0
     b    1.0
     c    2.0
     d    3.0
     e    NaN
two  a    NaN
     b    NaN
     c    4.0
     d    5.0
     e    6.0
dtype: float64

 

# 例2:
df = pd.DataFrame({'left': result, 'right': result + 5},
                  columns=pd.Index(['left', 'right'], name='side'))

print(df.unstack('state'))
# result
side   left          right         
state  Ohio Colorado  Ohio Colorado
number                             
one       0        3     5        8
two       1        4     6        9
three     2        5     7       10

print(df.unstack('state').stack('side'))
# result
state         Colorado  Ohio
number side                 
one    left          3     0
       right         8     5
two    left          4     1
       right         9     6
three  left          5     2
       right        10     7

 

4.長寬格式的轉換:code

所謂長格式,即相關屬性都集中在同一個列中,另有一個VALUE列對應相應的屬性值;orm

而寬格式, 就是各個屬性自成一列,不須要單獨的VALUE列。

# 導入寬格式數據
data = pd.read_csv('./data/macrodata.csv')
# pd.PeriodIndex 用來存放表示週期性日期的數組,數組元素是不可更改的。例如:年、季度、月、天等。
periods = pd.PeriodIndex(year=data.year, quarter=data.quarter, name='date')
data = pd.DataFrame(data.to_records(),  # to_records() 將DF轉換成numpy record數組
                    columns=pd.Index(['realgdp', 'infl', 'unemp'], name='item'),
                    index=periods.to_timestamp('D', 'end'))

print(data.head())
# result
item         realgdp  infl  unemp
date                             
1959-03-31  2710.349  0.00    5.8
1959-06-30  2778.801  2.34    5.1
1959-09-30  2775.488  2.74    5.3
1959-12-31  2785.204  0.27    5.6
1960-03-31  2847.699  2.31    5.2

 

# 將寬格式轉換爲長格式
# 軸向旋轉 -> 重置索引 -> rename列名
long_data = data.stack().reset_index().rename(columns={0: 'value'})

print(long_data.head())
# result
        date     item     value
0 1959-03-31  realgdp  2710.349
1 1959-03-31     infl     0.000
2 1959-03-31    unemp     5.800
3 1959-06-30  realgdp  2778.801
4 1959-06-30     infl     2.340

 

# 將長格式轉換爲寬格式
""" pd.pivot()
基於index/column的值從新調整DataFrame的座標軸。不支持數據聚合,重複值會致使重複記錄
語法格式: df.pivot(index(optional), columns, values) """

wide_data = long_data.pivot('date', 'item', 'value')

print(wide_data.head())
# result
item        infl   realgdp  unemp
date                             
1959-03-31  0.00  2710.349    5.8
1959-06-30  2.34  2778.801    5.1
1959-09-30  2.74  2775.488    5.3
1959-12-31  0.27  2785.204    5.6
1960-03-31  2.31  2847.699    5.2

 

# 增長一列value2
long_data['value2'] = np.random.rand(len(long_data))

print(long_data.head())
# result
        date     item     value    value2
0 1959-03-31  realgdp  2710.349  0.155924
1 1959-03-31     infl     0.000  0.340776
2 1959-03-31    unemp     5.800  0.615475
3 1959-06-30  realgdp  2778.801  0.417256
4 1959-06-30     infl     2.340  0.845293


# 轉換時若是不指定values,會將剩餘的列都做爲values列
pivoted = long_data.pivot('date', 'item')   # data爲index,item爲columns

print(pivoted.head())
# result
           value                    value2                    
item        infl   realgdp unemp      infl   realgdp     unemp
date                                                          
1959-03-31  0.00  2710.349   5.8  0.340776  0.155924  0.615475
1959-06-30  2.34  2778.801   5.1  0.845293  0.417256  0.825615
1959-09-30  2.74  2775.488   5.3  0.413700  0.512401  0.874806
1959-12-31  0.27  2785.204   5.6  0.081047  0.358632  0.790962
1960-03-31  2.31  2847.699   5.2  0.833500  0.395999  0.329820

5. 刪除重複數據:

data = pd.DataFrame({'k1': ['one'] * 3 + ['two'] * 4,
                     'k2': [1, 1, 2, 3, 3, 4, 4]})

print(data)
# result
    k1  k2
0  one   1
1  one   1
2  one   2
3  two   3
4  two   3
5  two   4
6  two   4


# 判斷當前行與前一行是否相同
print(data.duplicated())
# result
0    False
1     True
2    False
3    False
4     True
5    False
6     True
dtype: bool

# drop重複行
print(data.drop_duplicates())
# result
    k1  k2
0  one   1
2  one   2
3  two   3
5  two   4

 

# 新增v1列
data['v1'] = range(7)

# 只以k1列爲標準刪除重複行
print(data.drop_duplicates(['k1']))
# result
    k1  k2  v1
0  one   1   0
3  two   3   3

# 以k1,k2爲準,而且取最後一行的值
print(data.drop_duplicates(['k1', 'k2'], keep='last'))
# result
    k1  k2  v1
1  one   1   1
2  one   2   2
4  two   3   4
6  two   4   6

6.利用函數及映射進行轉換

# 使用字典映射進行轉換
data = pd.DataFrame({'food': ['bacon', 'pulled pork', 'bacon', 'Pastrami',
                           'corned beef', 'Bacon', 'pastrami', 'honey ham',
                           'nova lox'],
                     'ounces': [4, 3, 12, 6, 7.5, 8, 3, 5, 6]})
print(data)
# result
          food  ounces
0        bacon     4.0
1  pulled pork     3.0
2        bacon    12.0
3     Pastrami     6.0
4  corned beef     7.5
5        Bacon     8.0
6     pastrami     3.0
7    honey ham     5.0
8     nova lox     6.0

meat_to_animal = {
  'bacon': 'pig',
  'pulled pork': 'pig',
  'pastrami': 'cow',
  'corned beef': 'cow',
  'honey ham': 'pig',
  'nova lox': 'salmon'
  }

data['animal'] = data['food'].map(str.lower).map(meat_to_animal)
print(data)
# result
          food  ounces  animal
0        bacon     4.0     pig
1  pulled pork     3.0     pig
2        bacon    12.0     pig
3     Pastrami     6.0     cow
4  corned beef     7.5     cow
5        Bacon     8.0     pig
6     pastrami     3.0     cow
7    honey ham     5.0     pig
8     nova lox     6.0  salmon

 

# 使用lambda匿名函數進行轉換

data['animal2'] = data.food.map(lambda x:meat_to_animal[x.lower()])

print(data)
# result
          food  ounces  animal animal2
0        bacon     4.0     pig     pig
1  pulled pork     3.0     pig     pig
2        bacon    12.0     pig     pig
3     Pastrami     6.0     cow     cow
4  corned beef     7.5     cow     cow
5        Bacon     8.0     pig     pig
6     pastrami     3.0     cow     cow
7    honey ham     5.0     pig     pig
8     nova lox     6.0  salmon  salmon

 

7.數據標準化

有時候因爲量綱(數據單位)不一致,致使數據的差別很大,沒法進行比較,須要進行數據標準化,將數據進行必定範圍的壓縮,以便進行數據比對等後續操做。

datafile = './data/normalization_data.xls'
data = pd.read_excel(datafile, header=None)
print(data)
# result
     0    1    2     3
0   78  521  602  2863
1  144 -600 -521  2245
2   95 -457  468 -1283
3   69  596  695  1054
4  190  527  691  2051
5  101  403  470  2487
6  146  413  435  2571


# 最小-最大規範化
data1 = (data - data.min()) / (data.max() - data.min())
print(data1)
# result
          0         1         2         3
0  0.074380  0.937291  0.923520  1.000000
1  0.619835  0.000000  0.000000  0.850941
2  0.214876  0.119565  0.813322  0.000000
3  0.000000  1.000000  1.000000  0.563676
4  1.000000  0.942308  0.996711  0.804149
5  0.264463  0.838629  0.814967  0.909310
6  0.636364  0.846990  0.786184  0.929571

# 零-均值規範化
data2 = (data - data.mean()) / data.std()
print(data2)
# result
          0         1         2         3
0 -0.905383  0.635863  0.464531  0.798149
1  0.604678 -1.587675 -2.193167  0.369390
2 -0.516428 -1.304030  0.147406 -2.078279
3 -1.111301  0.784628  0.684625 -0.456906
4  1.657146  0.647765  0.675159  0.234796
5 -0.379150  0.401807  0.152139  0.537286
6  0.650438  0.421642  0.069308  0.595564

# np.ceil() 正向取整
data3 = data/10**np.ceil(np.log10(data.abs().max()))
print(data3)
# result
       0      1      2       3
0  0.078  0.521  0.602  0.2863
1  0.144 -0.600 -0.521  0.2245
2  0.095 -0.457  0.468 -0.1283
3  0.069  0.596  0.695  0.1054
4  0.190  0.527  0.691  0.2051
5  0.101  0.403  0.470  0.2487
6  0.146  0.413  0.435  0.2571

 

8.replace替換

data = pd.Series([1., -999., 2., -999., -1000., 3.])
print(data)
# result
0       1.0
1    -999.0
2       2.0
3    -999.0
4   -1000.0
5       3.0
dtype: float64

# 基本替換方式
print(data.replace(-999, np.nan))
# result
0       1.0
1       NaN
2       2.0
3       NaN
4   -1000.0
5       3.0
dtype: float64

# 使用列表分別替換對應位置的元素
print(data.replace([-999, -1000], [np.nan, 0]))
# result
0    1.0
1    NaN
2    2.0
3    NaN
4    0.0
5    3.0
dtype: float64

# 使用字典進行更明確的替換
print(data.replace({-999: np.nan, -1000: 0}))
# result
0    1.0
1    NaN
2    2.0
3    NaN
4    0.0
5    3.0
dtype: float64

 

9.重命名軸索引:

data = pd.DataFrame(np.arange(12).reshape((3, 4)),
                 index=['Ohio', 'Colorado', 'New York'],
                 columns=['one', 'two', 'three', 'four'])
data.index = data.index.map(str.upper)

print(data)
# result
          one  two  three  four
OHIO        0    1      2     3
COLORADO    4    5      6     7
NEW YORK    8    9     10    11

# 重命名索引及列名
print(data.rename(index=str.title, columns=str.upper))
# result
          ONE  TWO  THREE  FOUR
Ohio        0    1      2     3
Colorado    4    5      6     7
New York    8    9     10    11

# 使用字典映射新索引及新列名
print(data.rename(index={'OHIO': 'INDIANA'}, columns={'three': 'peekaboo'}))
# result
          one  two  peekaboo  four
INDIANA     0    1         2     3
COLORADO    4    5         6     7
NEW YORK    8    9        10    11

 

10.數據離散化與面元劃分

ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32]
bins = [18, 25, 35, 60, 100]

# 按照bins中的區間劃分ages中的元素
cats = pd.cut(ages, bins)
print(cats)
# result
[(18, 25], (18, 25], (18, 25], (25, 35], (18, 25], ..., (25, 35], (60, 100], (35, 60], (35, 60], (25, 35]]
Length: 12
Categories (4, interval[int64]): [(18, 25] < (25, 35] < (35, 60] < (60, 100]]

# 查看元素屬於哪一個區間
print(cats.labels)    # python2用法
print(cats.codes)     # python3用法
# result
[0 0 0 ..., 2 2 1]

# 統計元素分佈狀況
print(pd.value_counts(cats))
# result
(18, 25]     5
(35, 60]     3
(25, 35]     3
(60, 100]    1
dtype: int64

 

# 默認的區間訪問爲左開右閉,指定right=False後,變成左閉右開
print(pd.cut(ages, [18, 26, 36, 61, 100], right=False))
# result
[[18, 26), [18, 26), [18, 26), [26, 36), [18, 26), ..., [26, 36), [61, 100), [36, 61), [36, 61), [26, 36)]
Length: 12
Categories (4, interval[int64]): [[18, 26) < [26, 36) < [36, 61) < [61, 100)]

# 手動設置標籤,用來替換默認的區間
group_names = ['Youth', 'YoungAdult', 'MiddleAged', 'Senior']
cat2 = pd.cut(ages, bins, labels=group_names)   
print(cat2.value_counts())
# result
MiddleAged    3
Senior        1
YoungAdult    3
Youth         5
dtype: int64

 

# 指定區間的劃分精度
data = np.random.rand(20)
print(pd.cut(data, 4, precision=2))
# result
[(0.054, 0.27], (0.71, 0.93], (0.27, 0.49], (0.27, 0.49], (0.054, 0.27], ..., (0.71, 0.93], (0.71, 0.93], (0.71, 0.93], (0.054, 0.27], (0.71, 0.93]]
Length: 20
Categories (4, interval[float64]): [(0.054, 0.27] < (0.27, 0.49] < (0.49, 0.71] < (0.71, 0.93]]

 

# 自定義分位點
print(pd.qcut(data, [0, 0.1, 0.5, 0.9, 1]))
# result
[(0.0953, 0.431], (0.893, 0.929], (0.431, 0.893], (0.0953, 0.431], (0.0953, 0.431], ..., (0.431, 0.893], (0.431, 0.893], (0.431, 0.893], (0.0536, 0.0953], (0.431, 0.893]]
Length: 20
Categories (4, interval[float64]): [(0.0536, 0.0953] < (0.0953, 0.431] < (0.431, 0.893] < (0.893, 0.929]]

 

11.排列與隨機採樣

 

# np.random.permutation()
df = pd.DataFrame(np.arange(5 * 4).reshape((5, 4)))
print(df)
# result
    0   1   2   3
0   0   1   2   3
1   4   5   6   7
2   8   9  10  11
3  12  13  14  15
4  16  17  18  19

# 隨機取5個數組成一個排列
sampler = np.random.permutation(5)
print(sampler)
# result
[0 1 2 4 3]

# 按照排列獲取df中的數據
print(df.take(sampler))
# result
    0   1   2   3
0   0   1   2   3
1   4   5   6   7
2   8   9  10  11
4  16  17  18  19
3  12  13  14  15

# 只取排列中的後三行數據
print(df.take(np.random.permutation(len(df))[:3]))
# result
    0   1   2   3
1   4   5   6   7
4  16  17  18  19
0   0   1   2   3

 

# np.random.randint()
bag = np.array([5, 7, -1, 6, 4])

# 從0到5中隨機取10個數
sampler = np.random.randint(0, len(bag), size=10)
print(sampler)
# result
[4 0 0 3 3 4 3 0 1 1]

# 將sampler做爲索引值,獲取bag的對應元素
draws = bag.take(sampler)
print(draws)
print(bag[sampler])  # 簡化寫法,可得一樣結果
# result
[4 5 5 6 6 4 6 5 7 7]

 

12.啞向量的使用

啞向量一般用來表示一組彼此間相互獨立的屬性,也成爲因子。將他們的關係用只有0和1的向量表示,就叫作啞向量。

 

# 對某列取啞向量
df = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'], 'data1': range(6), 'data2': [1, 3, 5, 7, 9, 11]})
print(pd.get_dummies(df['key']))
# result
   a  b  c
0  0  1  0
1  0  1  0
2  1  0  0
3  0  0  1
4  1  0  0
5  0  1  0

print(pd.get_dummies(df['data2']))
# result
   1   3   5   7   9   11
0   1   0   0   0   0   0
1   0   1   0   0   0   0
2   0   0   1   0   0   0
3   0   0   0   1   0   0
4   0   0   0   0   1   0
5   0   0   0   0   0   1

 

# 對列名加前綴
dummies = pd.get_dummies(df['key'], prefix='key')
# 將啞向量與df[data1]鏈接在一塊兒
df_with_dummy = df[['data1']].join(dummies)
print(df_with_dummy)
# result
   data1  key_a  key_b  key_c
0      0      0      1      0
1      1      0      1      0
2      2      1      0      0
3      3      0      0      1
4      4      1      0      0
5      5      0      1      0

 

# 啞向量例子
# 讀入影評數據
movies = pd.read_table('./data/movies.dat', sep='::', header=None, names=mnames)

數據文件內容:
# 設置列名
mnames = ['movie_id', 'title', 'genres']

# 提取genres列中的數據,將分離的元素組成集合
genre_iter = (set(x.split('|')) for x in movies.genres)
# 對genre_iter中的set集合解壓後去重,再排序
genres = sorted(set.union(*genre_iter))

# 生成DataFrame啞向量
dummies = pd.DataFrame(np.zeros((len(movies), len(genres))), columns=genres)    # 先根據數據文件生成一個元素均爲0的DF
for i, gen in enumerate(movies.genres): # 對genres進行循環
    dummies.loc[i, gen.split('|')] = 1   # 將genres中的項按照行號設置爲1,使其成爲啞向量

# 將啞向量df與原df合併到一塊兒
movies_windic = movies.join(dummies.add_prefix('Genre_'))
# 查看第一行數據(Series格式)
print(movies_windic.iloc[0])
# result
movie_id                                       1
title                           Toy Story (1995)
genres               Animation|Children's|Comedy
Genre_Action                                   0
Genre_Adventure                                0
Genre_Animation                                1
Genre_Children's                               1
Genre_Comedy                                   1
Genre_Crime                                    0
Genre_Documentary                              0
Genre_Drama                                    0
Genre_Fantasy                                  0
Genre_Film-Noir                                0
Genre_Horror                                   0
Genre_Musical                                  0
Genre_Mystery                                  0
Genre_Romance                                  0
Genre_Sci-Fi                                   0
Genre_Thriller                                 0
Genre_War                                      0
Genre_Western                                  0
Name: 0, dtype: object

 

# 使用pd.cut()進行分類,而後轉換成啞向量
values = np.random.rand(10)
bins = [0, 0.2, 0.4, 0.6, 0.8, 1]
pd.get_dummies(pd.cut(values, bins))
# result
   (0.0, 0.2]  (0.2, 0.4]  (0.4, 0.6]  (0.6, 0.8]  (0.8, 1.0]
0           1           0           0           0           0
1           0           0           0           1           0
2           0           0           0           0           1
3           0           0           1           0           0
4           1           0           0           0           0
5           0           0           0           1           0
6           0           0           1           0           0
7           0           0           0           0           1
8           0           1           0           0           0
9           0           1           0           0           0
相關文章
相關標籤/搜索