本文實例講述了Python基于numpy靈活定義神經網絡結構的方法。分享給大家供大家參考,具體如下:
用numpy可以靈活定義神經網絡結構,還可以應用numpy強大的矩陣運算功能!
一、用法
1). 定義一個三層神經網絡:
'''示例一'''nn = NeuralNetworks([3,4,2]) # 定義神經網絡nn.fit(X,y) # 擬合print(nn.predict(X)) #預測
說明:
  輸入層節點數目:3
  隱藏層節點數目:4
  輸出層節點數目:2
2).定義一個五層神經網絡:
'''示例二'''nn = NeuralNetworks([3,5,7,4,2]) # 定義神經網絡nn.fit(X,y) # 擬合print(nn.predict(X)) #預測
說明:
  輸入層節點數目:3
  隱藏層1節點數目:5
  隱藏層2節點數目:7
  隱藏層3節點數目:4
  輸出層節點數目:2
二、實現
如下實現方式為本人(@hhh5460)原創。 要點: dtype=object
import numpy as npclass NeuralNetworks(object): '''''' def __init__(self, n_layers=None, active_type=None, n_iter=10000, error=0.05, alpha=0.5, lamda=0.4): '''搭建神經網絡框架''' # 各層節點數目 (向量) self.n = np.array(n_layers) # 'n_layers必須為list類型,如:[3,4,2] 或 n_layers=[3,4,2]' self.size = self.n.size # 層的總數 # 層 (向量) self.z = np.empty(self.size, dtype=object) # 先占位(置空),dtype=object !如下皆然 self.a = np.empty(self.size, dtype=object) self.data_a = np.empty(self.size, dtype=object) # 偏置 (向量) self.b = np.empty(self.size, dtype=object) self.delta_b = np.empty(self.size, dtype=object) # 權 (矩陣) self.w = np.empty(self.size, dtype=object) self.delta_w = np.empty(self.size, dtype=object) # 填充 for i in range(self.size): self.a[i] = np.zeros(self.n[i]) # 全零 self.z[i] = np.zeros(self.n[i]) # 全零 self.data_a[i] = np.zeros(self.n[i]) # 全零 if i < self.size - 1: self.b[i] = np.ones(self.n[i+1]) # 全一 self.delta_b[i] = np.zeros(self.n[i+1]) # 全零 mu, sigma = 0, 0.1 # 均值、方差 self.w[i] = np.random.normal(mu, sigma, (self.n[i], self.n[i+1])) # # 正態分布隨機化 self.delta_w[i] = np.zeros((self.n[i], self.n[i+1])) # 全零
下面完整代碼是我學習斯坦福機器學習教程,完全自己敲出來的:
import numpy as np'''參考:http://ufldl.stanford.edu/wiki/index.php/%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C'''class NeuralNetworks(object):  ''''''  def __init__(self, n_layers=None, active_type=None, n_iter=10000, error=0.05, alpha=0.5, lamda=0.4):    '''搭建神經網絡框架'''    self.n_iter = n_iter # 迭代次數    self.error = error # 允許最大誤差    self.alpha = alpha # 學習速率    self.lamda = lamda # 衰減因子 # 此處故意拼寫錯誤!    if n_layers is None:      raise '各層的節點數目必須設置!'    elif not isinstance(n_layers, list):      raise 'n_layers必須為list類型,如:[3,4,2] 或 n_layers=[3,4,2]'    # 節點數目 (向量)    self.n = np.array(n_layers)    self.size = self.n.size # 層的總數    # 層 (向量)    self.a = np.empty(self.size, dtype=object) # 先占位(置空),dtype=object !如下皆然    self.z = np.empty(self.size, dtype=object)    # 偏置 (向量)    self.b = np.empty(self.size, dtype=object)    self.delta_b = np.empty(self.size, dtype=object)    # 權 (矩陣)    self.w = np.empty(self.size, dtype=object)    self.delta_w = np.empty(self.size, dtype=object)    # 殘差 (向量)    self.data_a = np.empty(self.size, dtype=object)    # 填充    for i in range(self.size):      self.a[i] = np.zeros(self.n[i]) # 全零      self.z[i] = np.zeros(self.n[i]) # 全零      self.data_a[i] = np.zeros(self.n[i]) # 全零      if i < self.size - 1:        self.b[i] = np.ones(self.n[i+1])  # 全一        self.delta_b[i] = np.zeros(self.n[i+1]) # 全零        mu, sigma = 0, 0.1 # 均值、方差        self.w[i] = np.random.normal(mu, sigma, (self.n[i], self.n[i+1])) # # 正態分布隨機化        self.delta_w[i] = np.zeros((self.n[i], self.n[i+1])) # 全零    # 激活函數    self.active_functions = {      'sigmoid': self.sigmoid,      'tanh': self.tanh,      'radb': self.radb,      'line': self.line,    }    # 激活函數的導函數    self.derivative_functions = {      'sigmoid': self.sigmoid_d,      'tanh': self.tanh_d,      'radb': self.radb_d,      'line': self.line_d,    }    if active_type is None:      self.active_type = ['sigmoid'] * (self.size - 1) # 默認激活函數類型    else:      self.active_type = active_type  def sigmoid(self, z):    if np.max(z) > 600:      z[z.argmax()] = 600    return 1.0 / (1.0 + np.exp(-z))  def tanh(self, z):    return (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))  def radb(self, z):    return np.exp(-z * z)  def line(self, z):    return z  def sigmoid_d(self, z):    return z * (1.0 - z)  def tanh_d(self, z):    return 1.0 - z * z  def radb_d(self, z):    return -2.0 * z * np.exp(-z * z)  def line_d(self, z):    return np.ones(z.size) # 全一  def forward(self, x):    '''正向傳播(在線)'''     # 用樣本 x 走一遍,刷新所有 z, a    self.a[0] = x    for i in range(self.size - 1):      self.z[i+1] = np.dot(self.a[i], self.w[i]) + self.b[i]       self.a[i+1] = self.active_functions[self.active_type[i]](self.z[i+1]) # 加了激活函數  def err(self, X, Y):    '''誤差'''    last = self.size-1    err = 0.0    for x, y in zip(X, Y):      self.forward(x)      err += 0.5 * np.sum((self.a[last] - y)**2)    err /= X.shape[0]    err += sum([np.sum(w) for w in self.w[:last]**2])    return err  def backward(self, y):    '''反向傳播(在線)'''    last = self.size - 1    # 用樣本 y 走一遍,刷新所有delta_w, delta_b    self.data_a[last] = -(y - self.a[last]) * self.derivative_functions[self.active_type[last-1]](self.z[last]) # 加了激活函數的導函數    for i in range(last-1, 1, -1):      self.data_a[i] = np.dot(self.w[i], self.data_a[i+1]) * self.derivative_functions[self.active_type[i-1]](self.z[i]) # 加了激活函數的導函數      # 計算偏導      p_w = np.outer(self.a[i], self.data_a[i+1]) # 外積!感謝 numpy 的強大!      p_b = self.data_a[i+1]      # 更新 delta_w, delta_w      self.delta_w[i] = self.delta_w[i] + p_w      self.delta_b[i] = self.delta_b[i] + p_b  def update(self, n_samples):    '''更新權重參數'''    last = self.size - 1    for i in range(last):      self.w[i] -= self.alpha * ((1/n_samples) * self.delta_w[i] + self.lamda * self.w[i])      self.b[i] -= self.alpha * ((1/n_samples) * self.delta_b[i])  def fit(self, X, Y):    '''擬合'''    for i in range(self.n_iter):      # 用所有樣本,依次      for x, y in zip(X, Y):        self.forward(x) # 前向,更新 a, z;        self.backward(y) # 后向,更新 delta_w, delta_b      # 然后,更新 w, b      self.update(len(X))      # 計算誤差      err = self.err(X, Y)      if err < self.error:        break      # 整千次顯示誤差(否則太無聊!)      if i % 1000 == 0:        print('iter: {}, error: {}'.format(i, err))  def predict(self, X):    '''預測'''    last = self.size - 1    res = []    for x in X:      self.forward(x)      res.append(self.a[last])    return np.array(res)if __name__ == '__main__':  nn = NeuralNetworks([2,3,4,3,1], n_iter=5000, alpha=0.4, lamda=0.3, error=0.06) # 定義神經網絡  X = np.array([[0.,0.], # 準備數據         [0.,1.],         [1.,0.],         [1.,1.]])  y = np.array([0,1,1,0])  nn.fit(X,y)     # 擬合  print(nn.predict(X)) # 預測            
新聞熱點
疑難解答