废话不说,直接看代码。文章来源地址https://www.toymoban.com/news/detail-651368.html
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader,TensorDataset
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# 假数据
n_data = torch.ones(100, 2) # 数据的基本形态
x0 = torch.normal(2 * n_data, 1) # 类型0 x data (tensor), shape=(100, 2),好处共享均值和设置张量size
y0 = torch.zeros(100) # 类型0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2 * n_data, 1) # 类型1 x data (tensor), shape=(100, 1)
y1 = torch.ones(100) # 类型1 y data (tensor), shape=(100, 1)
# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # FloatTensor = 32-bit floating
y = torch.cat((y0, y1), 0).type(torch.FloatTensor) # LongTensor = 64-bit integer
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
# class LogisticRegression(nn.Module):
# def __init__(self):
# super(LogisticRegression, self).__init__()
# self.linear = nn.Linear(2, 1)
# self.sm = nn.Sigmoid()
# def forward(self, x):
# x = self.lr(x)
# x = self.sm(x)
# return x
logistic_model = nn.Sequential()
logistic_model.add_module('linear',nn.Linear(2,1))
logistic_model.add_module('sm',nn.Sigmoid())
# logistic_model = LogisticRegression()
if torch.cuda.is_available():
logistic_model.cuda()
# 定义损失函数和优化器
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(logistic_model.parameters(), lr=1e-3, momentum=0.9)
# batch_size = 32
# data_iter = load_data(X,Y,batch_size)
def set_data(X,Y):
index_slice = list(range(X.shape[0]))
np.random.shuffle(index_slice)
x = X[index_slice]
y = Y[index_slice]
if torch.cuda.is_available():
x_data = Variable(x).cuda()
y_data = Variable(y).cuda()
else:
x_data = Variable(x)
y_data = Variable(y)
return x_data,y_data
Train_Loss_list = []
Train_acc_list = []
# 开始训练
for epoch in range(10000):
x_data,y_data = set_data(x,y)
out = logistic_model(x_data)
out = out.view(-1,1)
y_data = y_data.view(-1,1)
loss = criterion(out, y_data)
print_loss = loss.data.item()
mask = out.ge(0.5).float() # 以0.5为阈值进行分类
correct = (mask == y_data).sum() # 计算正确预测的样本个数
acc = correct.item() / x_data.size(0) # 计算精度
optimizer.zero_grad()
loss.backward()
optimizer.step()
Train_Loss_list.append(print_loss)
Train_acc_list.append(acc)
# 每隔2000轮打印一下当前的误差和精度
if (epoch + 1) % 2000== 0:
print('-' * 20)
print('epoch {}'.format(epoch + 1)) # 训练轮数
print('当前损失 {:.6f}'.format(print_loss)) # 误差
print('当前精度 {:.6f}'.format(acc)) # 精度
logistic_model.state_dict()
x11= range(0,10000)
y11= Train_Loss_list
plt.xlabel('Train loss vs. epoches')
plt.ylabel('Train loss')
plt.plot(x11, y11,'.',c='b',label="Train_Loss")
plt.legend()
plt.show()
x11= range(0,10000)
y11= Train_acc_list
plt.xlabel('Train acc vs. epoches')
plt.ylabel('Train acc')
plt.plot(x11, y11,'.',c='b',label="Train_acc")
plt.legend()
plt.show()
# 结果可视化
w0, w1 = logistic_model.linear.weight[0]
w0 = float(w0.item())
w1 = float(w1.item())
b = float(logistic_model.linear.bias.item())
plot_x = np.arange(-7, 7, 0.1)
plot_y = (-w0 * plot_x - b) / w1
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.plot(plot_x, plot_y)
plt.show()
文章来源:https://www.toymoban.com/news/detail-651368.html
到了这里,关于逻辑回归torch.nn实现的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!