文章来源地址https://www.toymoban.com/news/detail-771539.html
python实现神经网络模型算法
今天,厾罗和大家分享用Python实现神经网络模型算法,仅用于技术学习交流。
实现技巧
1.导入依赖库
主要是安装相关的依赖库。本文实现的环境为:python 3.7。
from __future__ import division
import math
import random
import pandas as pd
2.构建BP神经网络类
主要是构建三层反向传播神经网络类。
""" 三层反向传播神经网络 """
class NN:
def __init__(self, ni, nh, no):
self.ni = ni + 1 # 输入层节点
self.nh = nh + 1 # 隐藏层节点
self.no = no # 输出层种类
self.ai = [1.0] * self.ni
self.ah = [1.0] * self.nh
self.ao = [1.0] * self.no
self.wi = self.makeMatrix(self.ni, self.nh) # 输出层到隐藏层的映射矩阵
self.wo = self.makeMatrix(self.nh, self.no) # 隐藏层到输出层的映射矩阵
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = self.rand(-0.2, 0.2)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = self.rand(-2, 2)
#前向传播,激活神经网络的所有节点
def update(self, inputs):
if len(inputs) != self.ni - 1:
print(len(inputs),self.ni - 1)
raise ValueError('与输入层节点数不符!')
for i in range(self.ni - 1):
self.ai[i] = inputs[i]
for j in range(self.nh): # self.nh表示隐藏层的节点数
sum = 0.0 # 激活项a = g(z) z = Θ^T x ;sum相当于z,每次循环归零
for i in range(self.ni): #通过循环z = Θ^T x ,因为Θ、x均为向量
sum = sum + self.ai[i] * self.wi[i][j] #〖 Z〗^((2))=Θ^((1)) a^((1))
self.ah[j] = self.sigmoid(sum) # a^((2))=g(z^((2))),这里使用sigmoid()函数作为激活函数
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k] #〖 Z〗^((3))=Θ^((2)) a^((2))
self.ao[k] = self.sigmoid(sum) # a^((3))=g(z^((3)))
return self.ao[:]
#反向传播,计算节点激活项的误差
def backPropagate(self, targets, lr): # targets为某样本实际种类分类,lr为梯度下降算法的学习率
output_deltas = [0.0] * self.no
for k in range(self.no):
error = targets[k] - np.round_(self.ao[k])
output_deltas[k] = self.dsigmoid(self.ao[k]) * error
# 计算隐藏层的误差
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k] * self.wo[j][k]
hidden_deltas[j] = self.dsigmoid(self.ah[j]) * error
# 更新输出层权重
for j in range(self.nh): # 反向传播算法,求出每个节点的误差后,反向更新权重
for k in range(self.no):
change = output_deltas[k] * self.ah[j]
self.wo[j][k] = self.wo[j][k] + lr * change
# 更新输入层权重
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j] * self.ai[i]
self.wi[i][j] = self.wi[i][j] + lr * change
# 计算误差
error = 0.0
for k in range(self.no):
error += 0.5 * (targets[k] - np.round_(self.ao[k])) ** 2
return error
#用测试集输出准确率
def test(self, patterns):
count = 0
num=0
for p in patterns:
target = p[1]
result = self.update(p[0])
print(p[0], ':', target, '->', np.round_(result))
num=0
for k in range(self.no):
if (target[k] == np.round_(result[k])):
num +=1
print(num)
if num==3:
count +=1
print("******************",(target) == (np.round_(result)),"******************")
accuracy = int(float(count / len(patterns))*100)
print('accuracy: %-.9f' % accuracy,"%")
#输出训练过后神经网络的权重矩阵
def weights(self):
print('输入层权重:')
for i in range(self.ni):
print(self.wi[i])
print()
print('输出层权重:')
for j in range(self.nh):
print(self.wo[j])
#用训练集训练神经网络
def train(self, patterns, iterations=1000, lr=0.1):
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, lr)
if i % 100 == 0:
print("percent:",int(i/iterations*100),"%",' error: %-.9f' % error)
#生成区间[a, b)内的随机数
def rand(self, a, b):
return (b - a) * random.random() + a
# 生成大小 I*J 的矩阵,默认零矩阵
def makeMatrix(self, I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill] * J)
return m
# 函数 sigmoid,bp神经网络前向传播的激活函数
def sigmoid(self, x):
return 1.0 / (1.0 + math.exp(-x))
# 函数 sigmoid 的导数,反向传播时使用
def dsigmoid(self, x):
return x * (1 - x)
3.读取数据并进行预处理
主要是读取构建分类模型的数据,并进行预处理。
data = []
raw = pd.read_csv('iris.csv')
raw_data = raw.values
raw_feature = raw_data[1:, 1:5]
for i in range(len(raw_feature)):
ele = []
ele.append(list(raw_feature[i]))
if raw_data[i][5] == 0:
ele.append([0, 0,1])
elif raw_data[i][5] == 1:
ele.append([0,1, 0])
elif raw_data[i][5] == 2:
ele.append([1, 1,1])
else:
ele.append([0, 0,0])
data.append(ele)
4.利用构建的BP神经网络预测类,创建神经网络模型
主要是用BP神经网络预测类创建神经网络类模型。
nn = NN(4, 10, 3)
5.BP分类模型训练及预测
主要是划分训练集和测试集,并进行BP分类模型训练和预测。
training = data[1:100]
test = data[101:]
nn.train(training, iterations=1000)
nn.test(test)
完整源代码
from __future__ import division
import math
import random
import pandas as pd
import numpy as np
""" 三层反向传播神经网络 """
class NN:
def __init__(self, ni, nh, no):
self.ni = ni + 1 # 输入层节点
self.nh = nh + 1 # 隐藏层节点
self.no = no # 输出层种类
self.ai = [1.0] * self.ni
self.ah = [1.0] * self.nh
self.ao = [1.0] * self.no
self.wi = self.makeMatrix(self.ni, self.nh) # 输出层到隐藏层的映射矩阵
self.wo = self.makeMatrix(self.nh, self.no) # 隐藏层到输出层的映射矩阵
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = self.rand(-0.2, 0.2)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = self.rand(-2, 2)
#前向传播,激活神经网络的所有节点
def update(self, inputs):
if len(inputs) != self.ni - 1:
print(len(inputs),self.ni - 1)
raise ValueError('与输入层节点数不符!')
for i in range(self.ni - 1):
self.ai[i] = inputs[i]
for j in range(self.nh): # self.nh表示隐藏层的节点数
sum = 0.0 # 激活项a = g(z) z = Θ^T x ;sum相当于z,每次循环归零
for i in range(self.ni): #通过循环z = Θ^T x ,因为Θ、x均为向量
sum = sum + self.ai[i] * self.wi[i][j] #〖 Z〗^((2))=Θ^((1)) a^((1))
self.ah[j] = self.sigmoid(sum) # a^((2))=g(z^((2))),这里使用sigmoid()函数作为激活函数
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k] #〖 Z〗^((3))=Θ^((2)) a^((2))
self.ao[k] = self.sigmoid(sum) # a^((3))=g(z^((3)))
return self.ao[:]
#反向传播,计算节点激活项的误差
def backPropagate(self, targets, lr): # targets为某样本实际种类分类,lr为梯度下降算法的学习率
output_deltas = [0.0] * self.no
for k in range(self.no):
error = targets[k] - np.round_(self.ao[k])
output_deltas[k] = self.dsigmoid(self.ao[k]) * error
# 计算隐藏层的误差
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k] * self.wo[j][k]
hidden_deltas[j] = self.dsigmoid(self.ah[j]) * error
# 更新输出层权重
for j in range(self.nh): # 反向传播算法,求出每个节点的误差后,反向更新权重
for k in range(self.no):
change = output_deltas[k] * self.ah[j]
self.wo[j][k] = self.wo[j][k] + lr * change
# 更新输入层权重
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j] * self.ai[i]
self.wi[i][j] = self.wi[i][j] + lr * change
# 计算误差
error = 0.0
for k in range(self.no):
error += 0.5 * (targets[k] - np.round_(self.ao[k])) ** 2
return error
#用测试集输出准确率
def test(self, patterns):
count = 0
num=0
for p in patterns:
target = p[1]
result = self.update(p[0])
print(p[0], ':', target, '->', np.round_(result))
num=0
for k in range(self.no):
if (target[k] == np.round_(result[k])):
num +=1
print(num)
if num==3:
count +=1
print("******************",(target) == (np.round_(result)),"******************")
accuracy = int(float(count / len(patterns))*100)
print('accuracy: %-.9f' % accuracy,"%")
#输出训练过后神经网络的权重矩阵
def weights(self):
print('输入层权重:')
for i in range(self.ni):
print(self.wi[i])
print()
print('输出层权重:')
for j in range(self.nh):
print(self.wo[j])
#用训练集训练神经网络
def train(self, patterns, iterations=1000, lr=0.1):
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, lr)
if i % 100 == 0:
print("percent:",int(i/iterations*100),"%",' error: %-.9f' % error)
#生成区间[a, b)内的随机数
def rand(self, a, b):
return (b - a) * random.random() + a
# 生成大小 I*J 的矩阵,默认零矩阵
def makeMatrix(self, I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill] * J)
return m
# 函数 sigmoid,bp神经网络前向传播的激活函数
def sigmoid(self, x):
return 1.0 / (1.0 + math.exp(-x))
# 函数 sigmoid 的导数,反向传播时使用
def dsigmoid(self, x):
return x * (1 - x)
if __name__ == '__main__':
data = []
raw = pd.read_csv('iris.csv')
raw_data = raw.values
raw_feature = raw_data[1:, 1:5]
for i in range(len(raw_feature)):
ele = []
ele.append(list(raw_feature[i]))
if raw_data[i][5] == 0:
ele.append([0, 0,1])
elif raw_data[i][5] == 1:
ele.append([0,1, 0])
elif raw_data[i][5] == 2:
ele.append([1, 1,1])
else:
ele.append([0, 0,0])
data.append(ele)
nn = NN(4, 10, 3)
training = data[1:100]
test = data[101:]
nn.train(training, iterations=1000)
nn.test(test)
文章来源:https://www.toymoban.com/news/detail-771539.html
到了这里,关于Python实战演练之python实现神经网络模型算法的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!