样例一:
一个简单的例子,train是一个字典,先将train进行“one-hot” coding,然后输入相关特征向量,可以预测相关性。
from pyfm import pylibfm
from sklearn.feature_extraction import DictVectorizer
import numpy as np
train = [
{"user": "1", "item": "5", "age": 19},
{"user": "2", "item": "43", "age": 33},
{"user": "3", "item": "20", "age": 55},
{"user": "4", "item": "10", "age": 20},
]
v = DictVectorizer()
X = v.fit_transform(train)
print(X.toarray())
y = np.repeat(1.0,X.shape[0])
#print(X.shape[0])
fm = pylibfm.FM()
fm.fit(X,y)
fm.predict(v.transform({"user": "1", "item": "10", "age": 40}))
输出:
[[19. 0. 0. 0. 1. 1. 0. 0. 0.]
[33. 0. 0. 1. 0. 0. 1. 0. 0.]
[55. 0. 1. 0. 0. 0. 0. 1. 0.]
[20. 1. 0. 0. 0. 0. 0. 0. 1.]]
4
Creating validation dataset of 0.01 of training for adaptive regularization
-- Epoch 1
Training log loss: 0.37518
array([0.9999684])
样例二:
是基于真实的电影评分数据来训练。数据集点击下载即可。
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from pyfm import pylibfm
# Read in data
def loadData(filename,path="ml-100k/"):
data = []
y = []
users=set()
items=set()
with open(path+filename) as f:
for line in f:
(user,movieid,rating,ts)=line.split('\t')
data.append({ "user_id": str(user), "movie_id": str(movieid)})
y.append(float(rating))
users.add(user)
items.add(movieid)
return (data, np.array(y), users, items)
(train_data, y_train, train_users, train_items) = loadData("ua.base")
(test_data, y_test, test_users, test_items) = loadData("ua.test")
v = DictVectorizer()
X_train = v.fit_transform(train_data)
X_test = v.transform(test_data)
# Build and train a Factorization Machine
fm = pylibfm.FM(num_factors=10, num_iter=100, verbose=True, task="regression", initial_learning_rate=0.001, learning_rate_schedule="optimal")
fm.fit(X_train,y_train)
# Evaluate
preds = fm.predict(X_test)
from sklearn.metrics import mean_squared_error
print("FM MSE: %.4f" % mean_squared_error(y_test,preds))
输出:
Creating validation dataset of 0.01 of training for adaptive regularization
-- Epoch 1
Training MSE: 0.59525
-- Epoch 2
Training MSE: 0.51804
-- Epoch 3
Training MSE: 0.49046
-- Epoch 4
Training MSE: 0.47458
-- Epoch 5
Training MSE: 0.46416
-- Epoch 6
Training MSE: 0.45662
-- Epoch 7
Training MSE: 0.45099
-- Epoch 8
Training MSE: 0.44639
-- Epoch 9
Training MSE: 0.44264
-- Epoch 10
Training MSE: 0.43949
-- Epoch 11
Training MSE: 0.43675
-- Epoch 12
Training MSE: 0.43430
-- Epoch 13
Training MSE: 0.43223
-- Epoch 14
Training MSE: 0.43020
-- Epoch 15
Training MSE: 0.42851
-- Epoch 16
Training MSE: 0.42691
-- Epoch 17
Training MSE: 0.42531
-- Epoch 18
Training MSE: 0.42389
-- Epoch 19
Training MSE: 0.42255
-- Epoch 20
Training MSE: 0.42128
-- Epoch 21
Training MSE: 0.42003
-- Epoch 22
Training MSE: 0.41873
-- Epoch 23
Training MSE: 0.41756
-- Epoch 24
Training MSE: 0.41634
-- Epoch 25
Training MSE: 0.41509
-- Epoch 26
Training MSE: 0.41391
-- Epoch 27
Training MSE: 0.41274
-- Epoch 28
Training MSE: 0.41149
-- Epoch 29
Training MSE: 0.41032
-- Epoch 30
Training MSE: 0.40891
-- Epoch 31
Training MSE: 0.40774
-- Epoch 32
Training MSE: 0.40635
-- Epoch 33
Training MSE: 0.40495
-- Epoch 34
Training MSE: 0.40354
-- Epoch 35
Training MSE: 0.40203
-- Epoch 36
Training MSE: 0.40047
-- Epoch 37
Training MSE: 0.39889
-- Epoch 38
Training MSE: 0.39728
-- Epoch 39
Training MSE: 0.39562
-- Epoch 40
Training MSE: 0.39387
-- Epoch 41
Training MSE: 0.39216
-- Epoch 42
Training MSE: 0.39030
-- Epoch 43
Training MSE: 0.38847
-- Epoch 44
Training MSE: 0.38655
-- Epoch 45
Training MSE: 0.38461
-- Epoch 46
Training MSE: 0.38269
-- Epoch 47
Training MSE: 0.38068
-- Epoch 48
Training MSE: 0.37864
-- Epoch 49
Training MSE: 0.37657
-- Epoch 50
Training MSE: 0.37459
-- Epoch 51
Training MSE: 0.37253
-- Epoch 52
Training MSE: 0.37045
-- Epoch 53
Training MSE: 0.36845
-- Epoch 54
Training MSE: 0.36647
-- Epoch 55
Training MSE: 0.36448
-- Epoch 56
Training MSE: 0.36254
-- Epoch 57
Training MSE: 0.36067
-- Epoch 58
Training MSE: 0.35874
-- Epoch 59
Training MSE: 0.35690
-- Epoch 60
Training MSE: 0.35511
-- Epoch 61
Training MSE: 0.35333
-- Epoch 62
Training MSE: 0.35155
-- Epoch 63
Training MSE: 0.34992
-- Epoch 64
Training MSE: 0.34829
-- Epoch 65
Training MSE: 0.34675
-- Epoch 66
Training MSE: 0.34538
-- Epoch 67
Training MSE: 0.34393
-- Epoch 68
Training MSE: 0.34258
-- Epoch 69
Training MSE: 0.34129
-- Epoch 70
Training MSE: 0.34006
-- Epoch 71
Training MSE: 0.33885
-- Epoch 72
Training MSE: 0.33773
-- Epoch 73
Training MSE: 0.33671
-- Epoch 74
Training MSE: 0.33564
-- Epoch 75
Training MSE: 0.33468
-- Epoch 76
Training MSE: 0.33375
-- Epoch 77
Training MSE: 0.33292
-- Epoch 78
Training MSE: 0.33211
-- Epoch 79
Training MSE: 0.33131
-- Epoch 80
Training MSE: 0.33065
-- Epoch 81
Training MSE: 0.33002
-- Epoch 82
Training MSE: 0.32930
-- Epoch 83
Training MSE: 0.32882
-- Epoch 84
Training MSE: 0.32813
-- Epoch 85
Training MSE: 0.32764
-- Epoch 86
Training MSE: 0.32722
-- Epoch 87
Training MSE: 0.32677
-- Epoch 88
Training MSE: 0.32635
-- Epoch 89
Training MSE: 0.32591
-- Epoch 90
Training MSE: 0.32550
-- Epoch 91
Training MSE: 0.32513
-- Epoch 92
Training MSE: 0.32481
-- Epoch 93
Training MSE: 0.32451
-- Epoch 94
Training MSE: 0.32421
-- Epoch 95
Training MSE: 0.32397
-- Epoch 96
Training MSE: 0.32363
-- Epoch 97
Training MSE: 0.32341
-- Epoch 98
Training MSE: 0.32319
-- Epoch 99
Training MSE: 0.32293
-- Epoch 100
Training MSE: 0.32268
FM MSE: 0.8873
样例三:是一个分类的样例文章来源:https://www.toymoban.com/news/detail-603972.html
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from pyfm import pylibfm
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=1000,n_features=100, n_clusters_per_class=1)
data = [ {v: k for k, v in dict(zip(i, range(len(i)))).items()} for i in X]
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.1, random_state=42)
v = DictVectorizer()
X_train = v.fit_transform(X_train)
X_test = v.transform(X_test)
fm = pylibfm.FM(num_factors=50, num_iter=10, verbose=True, task="classification", initial_learning_rate=0.0001, learning_rate_schedule="optimal")
fm.fit(X_train,y_train)
from sklearn.metrics import log_loss
print("Validation log loss: %.4f" % log_loss(y_test,fm.predict(X_test)))
输出:
Creating validation dataset of 0.01 of training for adaptive regularization
-- Epoch 1
Training log loss: 2.12467
-- Epoch 2
Training log loss: 1.74185
-- Epoch 3
Training log loss: 1.42232
-- Epoch 4
Training log loss: 1.16085
-- Epoch 5
Training log loss: 0.94964
-- Epoch 6
Training log loss: 0.78052
-- Epoch 7
Training log loss: 0.64547
-- Epoch 8
Training log loss: 0.53758
-- Epoch 9
Training log loss: 0.45132
-- Epoch 10
Training log loss: 0.38187
Validation log loss: 1.3678
代码:pyFM/pyfm/pylibfm.py at master · coreylynch/pyFM (github.com)文章来源地址https://www.toymoban.com/news/detail-603972.html
到了这里,关于Factorization Machines(论文笔记)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!