################## pd tf 相關使用技巧 ##################
python 函數只能都放在一個包裏。。。
python 的with 至關於 golang 的 defer
python 包在導入時不能互相嵌套, test1 import test2 test2 import test1, 一旦如此, 就會沒法調用;python
## 列表、字典判斷 IO 異常處理 ##golang
#列表、字典不爲空的判斷:
if l == []:
if m == {}:sql
#字典中查找是否存在key:
if 'key' in test.keys():數組
#LinkedMap
from collections import OrderedDict, defaultdictbash
#IO
with open("./sql.txt",'w') as fw:
l = map(lambda x:x+"\n",l)
fw.writelines(list(l))session
with open("./sql.txt",'r') as fr:
lines = fr.readlines()
print(lines)dom
os.system(ex) # 執行bash命令函數
#獲取時間
datetime.today().strftime('%Y%m%d')測試
#獲取文件地址:
path = os.getcwd()
file_path = os.path.join(path,'prod.cfg')fetch
#字符串切分: 直接用[]
if line.find("#")!=-1:
line=line[0:line.find('#')]
#異常處理:
try:
except Exception as e:
raise e
## 匿名函數 ##
# map sorted filter
from typing import Any, Tuple, Iterator
# map
l = [1, 2, 3, 4, 5]
t1: Iterator[Tuple[Any, int]] = map(lambda x: (x, 1), l)
# sorted
m = dict({"a": 1, "b": 0})
t = sorted(m.items(), key=lambda d: d[1], reverse=False) # 按照value排序, 升序
# filter
f = list(filter(lambda x: x[0].find('b') == -1, m))
## 枚舉迭代刪除 ##
# for enumerate
some_string = "wtf"
some_dict = {}
for i, some_dict[i] in enumerate(some_string):
pass
print(some_dict)
# for zip
index = [1,2,3]
words = ['a','b','c']
for i, w in zip(index, words):
pass
# 反轉列表
for i in reversed(index):
pass
for i in index[::-1]:
pass
# all any 判斷列表中的全部值是否與條件匹配;
r = any(i != 1 for i in index)
print(r)
## 在迭代時 刪除原表 須要藉助副本 ##
list_3 = [1,2,3,4]
for idx, item in enumerate(list_3[:]):
list_3.remove(item)
print(list_3)
list_3 = [1,2,3,4]
list_temp = list_3.copy()
for idx, item in enumerate(list_3[:]):
list_3.remove(item)
print(list_3)
## Pandas操做 ##
import pandas as pd
data = {'a':[1,2,3],
'c':[4,5,6],
'b':[7,8,9]
}
# 建立DF
frame = pd.DataFrame(data,index=['1','2','3'])
# group by
d: Union[Union[SeriesGroupBy, DataFrameGroupBy], Any] = df.groupby("vin")
for key, group_data in d:
# key, group_data 是一個list[DataFrame]
for i in range(0, len(group_data)):
group_data.iloc[i]["mileage"] #遍歷iloc第i行df, 取出mileage列;
pass
# 若是想改變值, 沒法在iloc切片上直接改變, 須要複製一份加到list中;
# 讀取多列:
result = df[["task_name","task_name_en"]]
# 讀取多行:
result.iloc[[0,1,2,3]]
# df拼接
pd.DataFrame([c1,c2]), pd.concat([p1,p2])時, 首先要保證各df的列數相同,若是仍是不行:
t = {"task_name":result["task_name"].to_list(),"table_name":result["table_name"].to_list(),"content_crt":l_crt, "content_ist":l_ist}
f = pd.DataFrame(t)
#numpy:
np.random.randint(-1,1,size=(5,5)) | np.random.uniform #不重複的
numpy.take(m,1) # 取出每一行的第m列
## 經過map 改變pd字段的值;
gender_map = {'F':0, 'M':1}
users['Gender'] = users['Gender'].map(gender_map)
age_map = {val:ii for ii,val in enumerate(set(users['Age'])) } # 用字典作替換: { 原始值 : 新值 }
users['Age'] = users['Age'].map(age_map)
pattern = re.compile(r'^(.*)\((\d+)\)$') # 反斜槓+( 轉義( Toy Story (1995) -> Toy Story
title_map = {val: pattern.match(val).group(1) for ii,val in enumerate(set(movies['Title'])) }
#### tensorflow 測試 ####
1. 組件 使用graph -> 表示計算任務 使用session.context -> 執行任務 使用tensor -> 表示數據 每一個Tensor 是多維數組[batch, height, width, channels] .ndarray 使用Variable -> 維護狀態 使用feed fetch -> 賦值與獲取數據 2. 組件使用 -> 一個常量爲一個節點 op, 例如建立兩個節點 matrix = tf.constant([[2., 1.], [2., 1.]]) # 常量不須要初始化, 而變量須要 product = tf.matmul(matrix, matrix) -> 執行計算任務 使用with自動釋放資源, 代替sess.close() with tf.Session() as sess: #with tf.device("/gpu:1"): rs = sess.run(product) print(rs)
-> 變量須要初始化, 使用InteractiveSession()交互環境, Tensor.eval() 和 Operation.run() 方法代替 Session.run() sess = tf.InteractiveSession() #不須要使用sess.run() 而是直接 op.run() 便可初始化變量, 它能讓你在運行圖的時候,插入一些計算圖 x = tf.Variable([[1.0, 2.0], [2.0, 4.0]]) x.initializer.run() # 若是不使用交互模式, 須要tf.initialize_all_variables() sub = tf.subtract(x, matrix) print(sub.eval()) -> 改變一個節點op的狀態, 即計數器 state = tf.Variable(0, name="count") one = tf.constant(1) add_op = tf.add(state, one) update = tf.assign(state, add_op) #更新節點狀態
-> Fetch 與 Feed input_1 = tf.placeholder(tf.dtypes.float32) # 經過 run(feed_dict:) feed input_2 = tf.placeholder(tf.dtypes.float32) out_1 = tf.add(input_1, input_2) # 經過run fetch out_2 = tf.subtract(input_1, input_2) with tf.Session() as sess: o1, o2 = sess.run([out_1, out_2], feed_dict={input_1: [7.], input_2: [5.]}) print(o1, o2)
4. 可視化與保存 # # 訓練可視化 # summary_op = tf.merge_all_summaries() # summary_writer = tf.train.SummaryWriter("train_dir", # graph_def=sess.graph_def) # # summary_str = sess.run(summary_op, feed_dict=feed_dict) # summary_writer.add_summary(summary_str, step) # # # 保存參數 # saver = tf.train.Saver() # saver.save(sess, FLAGS.train_dir, global_step=step) # # saver.restore(sess, FLAGS.train_dir) # 啓動TensorBoard # python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory # tensorboard --logdir=/path/to/log-directory # ################# 使用 Tensor.get_shape()[1] # 它兩必須一塊兒 sess.run(tf.initialize_all_variables()) d1 = sess.run(h_gen, feed_dict={x: x_data, z:z_d})
init_op = tf.initialize_all_variables() with tf.Session() as sess: # 在任務中初始化變量 sess.run(init_op) for _ in range(3): sess.run(update) print(state.eval())
import tensorflow as tf
import numpy as np
uid_max = 500
batch_size = 10
embed_dim = 32
filter_num = 8
feature_num = 20
data = np.zeros((batch_size,feature_num))
uid_data = np.reshape(data, [batch_size, feature_num])
sess = tf.InteractiveSession()
uid = tf.placeholder(tf.int32, [None, feature_num], name="uid")
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, 32], -1, 1),
name="uid_embed_matrix")
# 根據指定用戶ID找到他對應的嵌入層
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid,
name="uid_embed_layer")
new_layer = tf.reduce_sum(uid_embed_layer, axis=1, keep_dims=True) # 按列加和, 維度保持不變;
new_layer = tf.expand_dims(uid_embed_layer, -1) # 對卷積而言特殊使用的, 轉爲 (batch_size, feature_num, 32, 1)
filter_weights = tf.Variable(tf.truncated_normal([2, embed_dim, 1, filter_num],stddev=0.1),name = "filter_weights") # 卷積部分
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(new_layer, filter_weights, [1,1,1,1], padding="VALID", name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer,filter_bias), name ="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1,15 - 2 + 1 ,1,1], [1,1,1,1], padding="VALID", name="maxpool_layer")
sess.run(tf.initialize_all_variables())
feed_dict = {uid:uid_data}
layer = uid_embed_layer.eval(feed_dict)
print(layer.shape)