BPR是一種採用pairwise訓練的個性化推薦算法,其在netflex算法大賽被首次提出,損失函數表示爲當有訓練數據中的偏序集合時,最可能的用戶向量表示和item向量表示矩陣,推導以下 算法
總體訓練過程 bash
把模型西塔,用矩陣分解的形式表示爲 dom
求各自的偏導,這裏的f時(u, i ,j)三元祖的關係x~uij~。 能夠簡單有x~uij~=x~ui~-x~uj~ 函數
利用上述的訓練過程的推導,訓練代碼實現以下:flex
r_ui = np.dot(self.U[u], self.V[i].T) + self.biasV[i]
r_uj = np.dot(self.U[u], self.V[j].T) + self.biasV[j]
r_uij = r_ui - r_uj
loss_func = -1.0 / (1 + np.exp(r_uij))
# update U and V
self.U[u] += -self.lr * (loss_func * (self.V[i] - self.V[j]) + self.reg * self.U[u])
self.V[i] += -self.lr * (loss_func * self.U[u] + self.reg * self.V[i])
self.V[j] += -self.lr * (loss_func * (-self.U[u]) + self.reg * self.V[j])
# update biasV
self.biasV[i] += -self.lr * (loss_func + self.reg * self.biasV[i])
self.biasV[j] += -self.lr * (-loss_func + self.reg * self.biasV[j])
複製代碼
上述r_ui 表示用戶u對item i 的評分,U表示用戶表示矩陣,V表示item表示矩陣,bias是偏置。ui
完整代碼以下:lua
import random
from collections import defaultdict
import numpy as np
from sklearn.metrics import roc_auc_score
import scores
class BPR:
user_count = 10000
item_count = 5000
latent_factors = 10
lr = 0.01
reg = 0.01
train_count = 1000
train_data_path = 'train.txt'
test_data_path = 'test.txt'
size_u_i = user_count * item_count
U = np.random.rand(user_count, latent_factors) * 0.01
V = np.random.rand(item_count, latent_factors) * 0.01
biasV = np.random.rand(item_count) * 0.01
test_data = np.zeros((user_count, item_count))
test = np.zeros(size_u_i)
predict_ = np.zeros(size_u_i)
def load_data(self, path):
user_ratings = defaultdict(set)
with open(path, 'r') as f:
for line in f.readlines():
u, i = line.split(" ")
u = int(u)
i = int(i)
user_ratings[u].add(i)
return user_ratings
def load_test_data(self, path):
file = open(path, 'r')
for line in file:
line = line.split(' ')
user = int(line[0])
item = int(line[1])
self.test_data[user - 1][item - 1] = 1
def train(self, user_ratings_train):
for user in range(self.user_count):
# sample a user
u = random.randint(1, self.user_count)
if u not in user_ratings_train.keys():
continue
# sample a positive item from the observed items
i = random.sample(user_ratings_train[u], 1)[0]
# sample a negative item from the unobserved items
j = random.randint(1, self.item_count)
while j in user_ratings_train[u]:
j = random.randint(1, self.item_count)
u -= 1
i -= 1
j -= 1
r_ui = np.dot(self.U[u], self.V[i].T) + self.biasV[i]
r_uj = np.dot(self.U[u], self.V[j].T) + self.biasV[j]
r_uij = r_ui - r_uj
loss_func = -1.0 / (1 + np.exp(r_uij))
# update U and V
self.U[u] += -self.lr * (loss_func * (self.V[i] - self.V[j]) + self.reg * self.U[u])
self.V[i] += -self.lr * (loss_func * self.U[u] + self.reg * self.V[i])
self.V[j] += -self.lr * (loss_func * (-self.U[u]) + self.reg * self.V[j])
# update biasV
self.biasV[i] += -self.lr * (loss_func + self.reg * self.biasV[i])
self.biasV[j] += -self.lr * (-loss_func + self.reg * self.biasV[j])
def predict(self, user, item):
predict = np.mat(user) * np.mat(item.T)
return predict
def main(self):
user_ratings_train = self.load_data(self.train_data_path)
self.load_test_data(self.test_data_path)
for u in range(self.user_count):
for item in range(self.item_count):
if int(self.test_data[u][item]) == 1:
self.test[u * self.item_count + item] = 1
else:
self.test[u * self.item_count + item] = 0
# training
for i in range(self.train_count):
self.train(user_ratings_train)
predict_matrix = self.predict(self.U, self.V)
# prediction
self.predict_ = predict_matrix.getA().reshape(-1)
self.predict_ = pre_handel(user_ratings_train, self.predict_, self.item_count)
auc_score = roc_auc_score(self.test, self.predict_)
print('AUC:', auc_score)
# Top-K evaluation
scores.topK_scores(self.test, self.predict_, 5, self.user_count, self.item_count)
def pre_handel(set, predict, item_count):
# Ensure the recommendation cannot be positive items in the training set.
for u in set.keys():
for j in set[u]:
predict[(u - 1) * item_count + j - 1] = 0
return predict
if __name__ == '__main__':
bpr = BPR()
bpr.main()
複製代碼