Tensorflow实现BP神经网络
摘要:深度学习中基本模型为BP深度神经网络,其包括输入层、隐含层和输出层。输入层的神经元个数取决于数据集属性特征的个数,输出层神经元个数取决于划分类标的个数。BP神经网络通过梯度下降法不断调整权重矩阵和偏向进行调参,实现神经网络的训练。
本人为初学者,自己尝试编写了tensorflow实现BP神经网络,输入层为三个神经元。隐含层为四个神经元、输出层为两个神经元,权重矩阵和偏向均为正态分布随机数。
本人对神经网络进行的剖析,深度神经网络的详细解读:深度神经网络。
1、main函数:
#code by WangJianing
#email:851019059@qq.com or lygwjn@126.com
#time:2018.11.24import tensorflow as tf
import numpy as np
from neural_network import NN
#从文件中读取数据
def readFile(filename):"""read file from txt"""input_x = []input_y = []with open(filename,'r') as f:while True:line = f.readline()if line == '':breakelse:line = line.replace('\n','')sample = line.split(' ') x = sample[0:3]x = list(map(np.float32, x))y = sample[3]y = list(map(np.int32, y))input_x.append(x)input_y.append(y)return input_x,input_y#main函数
if __name__ == '__main__':config = tf.ConfigProto()config.gpu_options.allow_growth = Trueconfig.gpu_options.per_process_gpu_memory_fraction = 0.2 # need ~700MB GPU memorytrain_x,train_y = readFile('./data.txt')test_x,test_y = readFile('./data_test.txt')sample_size = [len(train_y),len(test_y)]print(sample_size)train_x = np.transpose(train_x)input_y = np.zeros([2,sample_size[0]])test_x = np.transpose(test_x)test_y = np.transpose(test_y)for ei,i in enumerate(train_y):input_y[i[0]][ei]=1# print(ei,i)#build neural networkn = NN(train_x, input_y, test_x, test_y, 'GradientDescentOptimizer', sample_size, config, learning_rate=0.05)#trainn.train1()#testn.test()
2、神经网络类:
#code by WangJianing
#email:851019059@qq.com or lygwjn@126.com
#time:2018.11.24import tensorflow as tf
import numpy as npclass NN(object):"""docstring for NN"""def __init__(self, train_x, train_y, test_x, test_y, optimize, sample_size, config, learning_rate=0.05):super(NN, self).__init__()self.train_x = tf.to_float(train_x, name='ToFloat1')self.train_y = tf.to_float(train_y, name='ToFloat2')self.test_x = tf.to_float(test_x, name='ToFloat3')self.test_y = tf.to_float(test_y, name='ToFloat4')self.learning_rate = learning_rateself.optimize = optimizeself.sess = tf.Session() self.sample_size = sample_sizeself.config = configself.para = [[],[],[],[],0]self.bildGraph()# self.train()#创建计算图(训练时)def bildGraph(self):self.parameter_op()self.towards_op()self.loss_op()self.backwords_op()# self.test_towords()self.init_op()#创建评估测试计算图def testBuildGraph(self):self.parameter_op()self.towards_op()#创建参数初始化结点def parameter_op(self):self.weight1 = tf.Variable(tf.random_normal([4, 3], stddev=0.03), dtype=tf.float32, name='weight1')self.bias1 = tf.Variable(tf.random_normal([4, 1]), dtype=tf.float32, name='bias1')self.weight2 = tf.Variable(tf.random_normal([2, 4], stddev=0.03), dtype=tf.float32, name='weight2')self.bias2 = tf.Variable(tf.random_normal([2, 1]), dtype=tf.float32, name='bias2')self.input_xx = tf.Variable(self.train_x,name='xx1')self.input_xx_test = tf.Variable(self.test_x,name='xx3')self.input_yy = tf.Variable(self.train_y,name='xx2')#该方法是将一个一维向量v复制size次并拼起来def appendVector(self, v, size, kind):_v = tf.transpose(v)[0]# print('_v=',_v)new_v = []if kind == 0:for i in range(size):new_v.append(_v)self.bias1_train = tf.Variable(new_v, dtype=tf.float32, name='bias1_train')self.bias1_train = tf.transpose(self.bias1_train)elif kind == 1:for i in range(size):new_v.append(_v)self.bias2_train = tf.Variable(new_v, dtype=tf.float32, name='bias2_train')self.bias2_train = tf.transpose(self.bias2_train) elif kind == 2:for i in range(size):new_v.append(_v)self.bias1_test = tf.Variable(new_v, dtype=tf.float32, name='bias1_test')self.bias1_test = tf.transpose(self.bias1_test) elif kind == 3:for i in range(size):new_v.append(_v)self.bias2_test = tf.Variable(new_v, dtype=tf.float32, name='bias2_test')self.bias2_test = tf.transpose(self.bias2_test) #前向传播def towards_op(self):self.m1 = tf.matmul(self.weight1, self.input_xx, name='matmul1')# print('m1=',self.m1)self.appendVector(self.bias1, self.sample_size[0], 0)# print('self.bias1_train=',self.bias1_train)self.z1 = tf.add(self.m1 ,self.bias1_train, name='z1')self.a1 = tf.nn.sigmoid(self.z1,name='a1')self.appendVector(self.bias2, self.sample_size[0], 1)self.z2 = tf.add(tf.matmul(self.weight2, self.a1, name='matmul2'),self.bias2_train, name='z2')self.a2 = tf.transpose(tf.nn.softmax(tf.transpose(self.z2,[1,0]),name='a2'),[1,0])#测试时前向传播def test_towords(self):self.t_m1 = tf.matmul(self.para[0], self.input_xx_test, name='matmul3')self.appendVector(self.para[2], self.sample_size[1], 2)self.t_z1 = tf.add(self.t_m1 ,self.bias1_test, name='z1')self.t_a1 = tf.nn.sigmoid(self.t_z1,name='a1')self.appendVector(self.para[3], self.sample_size[1], 3)self.t_z2 = tf.add(tf.matmul(self.para[1], self.t_a1, name='matmul4'),self.bias2_test, name='z2')self.t_a2 = tf.transpose(tf.nn.softmax(tf.transpose(self.t_z2,[1,0]),name='a2'),[1,0])#损失函数def loss_op(self):self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.train_y, logits=self.a2))self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)#反向传播def backwords_op(self):self.train = self.optimizer.minimize(self.loss)#初始化所有全局变量def init_op(self):self.init_op = tf.global_variables_initializer()#训练def train1(self):with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.333))) as sess:sess.run(self.init_op)for i in range(10):sess.run(self.train)self.para = [sess.run(self.weight1),sess.run(self.weight2),sess.run(self.bias1),sess.run(self.bias2),sess.run(self.loss)]print("==========step",i,"==========")print("weight1:\n",self.para[0],"\nb1:\n",self.para[2])print("\nweight2:\n",self.para[1],"\nb2:\n",self.para[3])print("\nloss=",self.para[4])#测试def test(self):self.test_towords()with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.333))) as sess:sess.run(tf.global_variables_initializer())sess.run([self.bias1_test,self.bias2_test])#每个样本的每个类标取值的概率predict_proba = sess.run(self.t_a2) #预测每个样本的类标(0或1)predict_proba = np.transpose(predict_proba)print('\npredict_proba=',predict_proba)predict_value = np.argmax(predict_proba,axis=1)print('\npredic_value=',predict_value)#计算准确率:# accuracy = 0# # print(test_y[0][0])# for ei,i in enumerate(predict_value):# if i == self.test_y[0][ei]:# accuracy += 1# accuracy /= sample_size# print('\naccuracy=',accuracy)
可以详细阅读程序,并尝试在自己的PC上运行。若使用自己的数据集,可修改程序中的相应超参数(学习率、神经网络各层的神经网络个数、train1函数中迭代次数、参数初始化方式、最优化策略、损失函数等)。
博客记录着学习的脚步,分享着最新的技术,非常感谢您的阅读,本博客将不断进行更新,希望能够给您在技术上带来帮助。