# video link: https://www.youtube.com/watch?v=Souzjv6WfrY
# Gradient Descent for Linuear Regresision
# yhat = wx + b
# loss = (y - yhat) ** 2 / N
import sys
import numpy as np
# Initialise some parameters
x = np.random.randn(10, 1)
y = 2 * x + np.random.rand()
# Parameters
w = 0.0
b = 0.0
# Hyperparameter
learning_rate = float(sys.argv[1]) if len(sys.argv) == 2 else 0.001
# Create gradient descent function
def descend(x, y, w, b, lr):
dldw = 0
dldb = 0
N = x.shape[0]
# loss = (y - (wx+ b)) ** 2
for xi, yi in zip(x, y):
dldw = -2 * xi * (yi - (w * xi + b))
dldb = -2 * (yi - (w * xi + b))
# Make a update to the parameter
w -= lr * dldw * (1 / N)
b -= lr * dldb * (1 / N)
return w, b
# Iteratively make updates
for epoch in range(400):
# Run gradient descent
w, b = descend(x, y, w, b, learning_rate)
yhat = w * x + b
loss = np.divide(np.sum((y - yhat) ** 2, axis = 0), x.shape[0])
print(f"{epoch} loss is {loss}, parameter: w is {w}, b is {b}")
文章来源地址https://www.toymoban.com/news/detail-432246.html
文章来源:https://www.toymoban.com/news/detail-432246.html
到了这里,关于线性回归梯度下降py实现的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!