-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbai24_CollaborativeFiltering.py
More file actions
196 lines (162 loc) · 6.53 KB
/
bai24_CollaborativeFiltering.py
File metadata and controls
196 lines (162 loc) · 6.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:31:23 2020
@author: phamk
"""
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
class CF(object):
"""docstring for CF"""
def __init__(self, Y_data, k, dist_func = cosine_similarity, uuCF = 1):
self.uuCF = uuCF # user-user (1) or item-item (0) CF
self.Y_data = Y_data if uuCF else Y_data[:, [1, 0, 2]]
self.k = k # number of neighbor points
self.dist_func = dist_func
self.Ybar_data = None
# number of users and items. Remember to add 1 since id starts from 0
self.n_users = int(np.max(self.Y_data[:, 0])) + 1
self.n_items = int(np.max(self.Y_data[:, 1])) + 1
def add(self, new_data):
"""
Update Y_data matrix when new ratings come.
For simplicity, suppose that there is no new user or item.
"""
self.Y_data = np.concatenate((self.Y_data, new_data), axis = 0)
def normalize_Y(self):
users = self.Y_data[:, 0] # all users - first col of the Y_data
self.Ybar_data = self.Y_data.copy()
self.mu = np.zeros((self.n_users,))
for n in range(self.n_users):
# row indices of rating done by user n
# since indices need to be integers, we need to convert
ids = np.where(users == n)[0].astype(np.int32)
# indices of all ratings associated with user n
item_ids = self.Y_data[ids, 1]
# and the corresponding ratings
ratings = self.Y_data[ids, 2]
# take mean
m = np.mean(ratings)
if np.isnan(m):
m = 0 # to avoid empty array and nan value
# normalize
self.Ybar_data[ids, 2] = ratings - self.mu[n]
################################################
# form the rating matrix as a sparse matrix. Sparsity is important
# for both memory and computing efficiency. For example, if #user = 1M,
# #item = 100k, then shape of the rating matrix would be (100k, 1M),
# you may not have enough memory to store this. Then, instead, we store
# nonzeros only, and, of course, their locations.
self.Ybar = sparse.coo_matrix((self.Ybar_data[:, 2],
(self.Ybar_data[:, 1], self.Ybar_data[:, 0])), (self.n_items, self.n_users))
self.Ybar = self.Ybar.tocsr()
def similarity(self):
self.S = self.dist_func(self.Ybar.T, self.Ybar.T)
def refresh(self):
"""
Normalize data and calculate similarity matrix again (after
some few ratings added)
"""
self.normalize_Y()
self.similarity()
def fit(self):
self.refresh()
def __pred(self, u, i, normalized = 1):
"""
predict the rating of user u for item i (normalized)
if you need the un
"""
# Step 1: find all users who rated i
ids = np.where(self.Y_data[:, 1] == i)[0].astype(np.int32)
# Step 2:
users_rated_i = (self.Y_data[ids, 0]).astype(np.int32)
# Step 3: find similarity btw the current user and others
# who already rated i
sim = self.S[u, users_rated_i]
# Step 4: find the k most similarity users
a = np.argsort(sim)[-self.k:]
# and the corresponding similarity levels
nearest_s = sim[a]
# How did each of 'near' users rated item i
r = self.Ybar[i, users_rated_i[a]]
if normalized:
# add a small number, for instance, 1e-8, to avoid dividing by 0
return (r*nearest_s)[0]/(np.abs(nearest_s).sum() + 1e-8)
return (r*nearest_s)[0]/(np.abs(nearest_s).sum() + 1e-8) + self.mu[u]
def pred(self, u, i, normalized = 1):
"""
predict the rating of user u for item i (normalized)
if you need the un
"""
if self.uuCF: return self.__pred(u, i, normalized)
return self.__pred(i, u, normalized)
def recommend(self, u, normalized = 1):
"""
Determine all items should be recommended for user u. (uuCF =1)
or all users who might have interest on item u (uuCF = 0)
The decision is made based on all i such that:
self.pred(u, i) > 0. Suppose we are considering items which
have not been rated by u yet.
"""
ids = np.where(self.Y_data[:, 0] == u)[0]
items_rated_by_u = self.Y_data[ids, 1].tolist()
recommended_items = []
for i in range(self.n_items):
if i not in items_rated_by_u:
rating = self.__pred(u, i)
if rating > 0:
recommended_items.append(i)
return recommended_items
def print_recommendation(self):
"""
print all items which should be recommended for each user
"""
print('Recommendation: ')
for u in range(self.n_users):
recommended_items = self.recommend(u)
if self.uuCF:
print(' Recommend item(s):', recommended_items, 'to user', u)
else:
print(' Recommend item', u, 'to user(s) : ', recommended_items)
# data file
r_cols = ['user_id', 'item_id', 'rating']
ratings = pd.read_csv('ex.dat', sep = ' ', names = r_cols, encoding='latin-1')
Y_data = ratings.values
#User-user CF
rs = CF(Y_data, k = 2, uuCF = 1)
rs.fit()
rs.print_recommendation()
#Item-item CF
rs = CF(Y_data, k = 2, uuCF = 0)
rs.fit()
rs.print_recommendation()
#Apply for MovieLens_DB
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings_base = pd.read_csv('ml-100k/ub.base', sep='\t', names=r_cols, encoding='latin-1')
ratings_test = pd.read_csv('ml-100k/ub.test', sep='\t', names=r_cols, encoding='latin-1')
rate_train = ratings_base.values
rate_test = ratings_test.values
# indices start from 0
rate_train[:, :2] -= 1
rate_test[:, :2] -= 1
#User-user CF
rs = CF(rate_train, k = 30, uuCF = 1)
rs.fit()
n_tests = rate_test.shape[0]
SE = 0 # squared error
for n in range(n_tests):
pred = rs.pred(rate_test[n, 0], rate_test[n, 1], normalized = 0)
SE += (pred - rate_test[n, 2])**2
RMSE = np.sqrt(SE/n_tests)
print('User-user CF, RMSE =', RMSE)
#Item-item CF
rs = CF(rate_train, k = 30, uuCF = 0)
rs.fit()
n_tests = rate_test.shape[0]
SE = 0 # squared error
for n in range(n_tests):
pred = rs.pred(rate_test[n, 0], rate_test[n, 1], normalized = 0)
SE += (pred - rate_test[n, 2])**2
RMSE = np.sqrt(SE/n_tests)
print('Item-item CF, RMSE =', RMSE)