|  | 
 
 发表于 2025-7-20 15:27:38
|
显示全部楼层 
| #coding=utf-8 import random
 import numpy as np
 import matplotlib.pyplot as plt
 class ANN(): # 人工神经网络
 def __init__(self):
 self.lr = 1 # 学习率
 self.reg = 0 # 纠正率
 self.unify = True # 是否归一
 self.layers = None # 层数据
 self.datX = None
 self.datY = None
 self.minX = None
 self.minY = None
 self.maxX = None
 self.maxY = None
 self.numX = 0 # 输入层结点数
 self.numY = 0 # 输出层结点数
 self.act = None
 def normalize(a, amin, amax):
 if amin == None:
 amin, amax = a.min(), a.max() # 求最大最小值
 return (a-amin) / (amax-amin) # (矩阵元素-最小值)/(最大值-最小值)
 def rand_init(a, b):
 return (b - a) * random.random() + a
 def act_func(self, x, act="sigmoid"): # 激活函数
 if act == "sigmoid":
 return 1/(1+np.exp(-x))
 elif act == "tanh":
 return np.tanh(x)
 #elif act == "softmax":
 #return np.exp(x)/np.sum(np.exp(x), axis=1, keepdims=True)
 return "no: "+act
 def act_func_d(self, x, act="sigmoid"): # 激活函数的导数
 if act == "sigmoid":
 return x*(1-x)
 elif act == "tanh":
 return 1 - np.power(x, 2)
 #elif act == "softmax":
 #x[range(len(self.datX)), self.datY] -= 1
 return x
 return "no: "+act
 def _get_in_out(self, a, amin=None, amax=None):
 if type(a[0]) not in [list, np.ndarray]: # 一维变二维
 a = [[v] for v in a]
 if type(a)!= np.ndarray:
 a = np.array(a)
 if not self.unify:
 return a, amin, amax
 if amin is None:
 amin, amax = a.min(), a.max() # 求最大最小值
 return ANN.normalize(a, amin, amax), amin, amax
 def X(self, a, amin=None, amax=None): # get_input
 self.datX, self.minX, self.maxX = self._get_in_out(a, amin, amax)
 self.numX = len(self.datX[0])
 return self.datX
 def Y(self, a, amin=None, amax=None): # get_output
 self.datY, self.minY, self.maxY = self._get_in_out(a, amin, amax)
 self.numY = len(self.datY[0])
 return self.datY
 def setAct(self, *act):
 self.act = act
 # 'act':sigmoid
 # 输入:2  输出:1  隐藏:[3,2]
 #layers = [{"b":[[0,0,0]], "w":[[0, 0, 0], [0, 0, 0]]}, # 输入层:2[行]*(隐藏:3[行])
 #{"b":[[0,0]], "w":[[0, 0], [0, 0], [0, 0]]}, # 隐藏:3*(隐藏:2)
 #{"b":[[0]], "w":[[0, 0]]}] # 隐藏:2*(输出:1)
 #... # 输出层:1
 def init(self, *num_hide):
 if len(num_hide) == 0: # 默认一个中间结点
 num_hide = [1]
 if self.act and self.act[-1] == "softmax":
 self.datY = np.array([v[0] for v in self.datY]) # 展平
 self.numY = self.datY.max() + 1
 self.layers = []
 num_dat = [self.numX] + list(num_hide) + [self.numY]
 np.random.seed(0)
 for i in range(1, len(num_dat)):
 a = {}
 a["b"] = [[0 for j in range(num_dat)]] # 使用0值初始化b
 #a["b"] = [[ANN.rand_init(-1, 1) for j in range(num_dat)]] # 使用均匀分布初始化b
 a["w"] = np.random.randn(num_dat[i-1], num_dat) / np.sqrt(num_dat[i-1]) # 使用高斯分布初始化w
 #a["w"] = np.array([[ANN.rand_init(-1, 1) for k in range(num_dat)] for j in range(num_dat[i-1])]) # 使用均匀分布初始化w
 if self.act and i-1 < len(self.act) and self.act[i-1]:
 a['act'] = self.act[i-1]
 else:
 a['act'] = "sigmoid"
 self.layers.append(a)
 return self.layers
 def _predict(self, y1): # 预测,向前传播
 layers_data = [y1]
 for v in self.layers:
 val = y1.dot(v['w']) + v['b']
 y1 = self.act_func(val, v['act'])
 layers_data.append(y1)
 return layers_data
 def back_prop(self, layers, layers_data): # 向后传播
 L = layers_data[-1]
 nLen = len(layers) - 1
 err = self.datY - L # 计算最后的误差
 #ret = np.sqrt(np.sum(err**2))
 ret = np.mean(np.abs(err))
 for i in range(nLen, -1, -1): # 从后向前传导
 v = layers
 delta = err * self.act_func_d(L, v['act']) # 计算上一轮的增量
 L = layers_data
 err = delta.dot(v['w'].T)
 v['w'] += self.lr * (L.T.dot(delta) + self.reg*v['w'])
 v['b'] += self.lr * np.sum(delta, axis=0, keepdims=True)
 return ret
 def train(self, times=500, lr=1, reg=0):
 self.lr = -lr if self.act and self.act[-1] == "softmax" else lr
 self.reg = reg
 for i in range(times):
 dat = self._predict(self.datX)
 err = self.back_prop(self.layers, dat)
 if str(err) == 'nan':
 break
 if i % 500 == 0:
 print("误差: " + str(err))
 if err < 0.0001:
 break
 def predict(self, X): # 预测,利用训练的数据来拟合
 X, _, _ = self._get_in_out(X, self.minX, self.maxX)
 y1 = self._predict(X)[-1]
 if not self.unify:
 return y1
 return y1 * (self.maxY - self.minY) + self.minY # 反归一化
 def plot(self, X1, Y1): # X1:要测试的数据  Y1:正确答案
 plt.rcParams['figure.dpi'] = 70 # 分辨率
 plt.rcParams['figure.figsize'] = (4.0, 3.0) # 图像尺寸(点)
 plt.rcParams['font.sans-serif'] = ["Microsoft YaHei"] # 指定默认字体
 plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
 plt.rcParams['font.size'] = 12
 plt.subplots_adjust(left=0.22, bottom=0.21, right=0.96, top=0.88, wspace=None, hspace=None)
 P = self.predict(X1)
 P = [v[0] for v in P]
 plotY = plt.plot(X1, Y1,  'r', label='正确答案')
 plotP = plt.plot(X1, P, 'b--', label='拟合数据')
 plt.title("神经网络拟合测试")
 plt.xlabel("第n个数")
 plt.ylabel("检验值")
 plt.legend(loc='best', fontsize=11)
 plt.show()
 #----------------------------------------------
 def test(): # 测试拟合:abs(cos(x))
 def test_func(x):
 return np.abs(np.cos(x))
 X, Y = [], []
 for i in range(100):
 x = ANN.rand_init(-2*np.pi, 2*np.pi) # 随机生成-2π~2π之间的100个数字
 y = test_func(x)
 X.append([x])
 Y.append([y])
 ann = ANN()
 ann.X(X)
 ann.Y(Y)
 ann.init(10) # 隐藏层10个节点
 ann.train(300000, lr=0.1) # 训练次数,lr学习率
 # 开始预测
 X1, Y1 = [], []
 for i in range(-60, 60):
 x = i / 10
 y = test_func(x)
 X1.append([x])
 Y1.append([y])
 ann.plot(X1, Y1)
 test()
 | 
 |