国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python實現隱馬爾科夫模型HMM

2020-02-22 23:32:49
字體:
來源:轉載
供稿:網友

一份完全按照李航<<統計學習方法>>介紹的HMM代碼,供大家參考,具體內容如下

#coding=utf8 ''''' Created on 2017-8-5 里面的代碼許多地方可以精簡,但為了百分百還原公式,就沒有精簡了。 @author: adzhua '''  import numpy as np  class HMM(object):   def __init__(self, A, B, pi):     '''''     A: 狀態轉移概率矩陣     B: 輸出觀察概率矩陣     pi: 初始化狀態向量     '''     self.A = np.array(A)     self.B = np.array(B)     self.pi = np.array(pi)     self.N = self.A.shape[0]  # 總共狀態個數     self.M = self.B.shape[1]  # 總共觀察值個數            # 輸出HMM的參數信息   def printHMM(self):     print ("==================================================")     print ("HMM content: N =",self.N,",M =",self.M)     for i in range(self.N):       if i==0:         print ("hmm.A ",self.A[i,:]," hmm.B ",self.B[i,:])       else:         print ("   ",self.A[i,:],"    ",self.B[i,:])     print ("hmm.pi",self.pi)     print ("==================================================")                 # 前向算法    def forwar(self, T, O, alpha, prob):     '''''     T: 觀察序列的長度     O: 觀察序列     alpha: 運算中用到的臨時數組     prob: 返回值所要求的概率     '''            # 初始化     for i in range(self.N):       alpha[0, i] = self.pi[i] * self.B[i, O[0]]      # 遞歸     for t in range(T-1):       for j in range(self.N):         sum = 0.0         for i in range(self.N):           sum += alpha[t, i] * self.A[i, j]         alpha[t+1, j] = sum * self.B[j, O[t+1]]              # 終止     sum = 0.0     for i in range(self.N):       sum += alpha[T-1, i]          prob[0] *= sum         # 帶修正的前向算法   def forwardWithScale(self, T, O, alpha, scale, prob):     scale[0] = 0.0          # 初始化     for i in range(self.N):       alpha[0, i] = self.pi[i] * self.B[i, O[0]]       scale[0] += alpha[0, i]            for i in range(self.N):       alpha[0, i] /= scale[0]          # 遞歸     for t in range(T-1):       scale[t+1] = 0.0       for j in range(self.N):         sum = 0.0         for i in range(self.N):           sum += alpha[t, i] * self.A[i, j]                  alpha[t+1, j] = sum * self.B[j, O[t+1]]         scale[t+1] += alpha[t+1, j]              for j in range(self.N):         alpha[t+1, j] /= scale[t+1]           # 終止     for t in range(T):       prob[0] += np.log(scale[t])                     def back(self, T, O, beta, prob):      '''''     T: 觀察序列的長度  len(O)     O: 觀察序列     beta: 計算時用到的臨時數組     prob: 返回值;所要求的概率     '''           # 初始化             for i in range(self.N):       beta[T-1, i] = 1.0          # 遞歸     for t in range(T-2, -1, -1): # 從T-2開始遞減;即T-2, T-3, T-4, ..., 0       for i in range(self.N):         sum = 0.0         for j in range(self.N):           sum += self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j]                  beta[t, i] = sum          # 終止     sum = 0.0     for i in range(self.N):       sum += self.pi[i]*self.B[i,O[0]]*beta[0,i]          prob[0] = sum               # 帶修正的后向算法   def backwardWithScale(self, T, O, beta, scale):     '''''     T: 觀察序列的長度 len(O)     O: 觀察序列     beta: 計算時用到的臨時數組     '''     # 初始化     for i in range(self.N):       beta[T-1, i] = 1.0          # 遞歸             for t in range(T-2, -1, -1):       for i in range(self.N):         sum = 0.0         for j in range(self.N):           sum += self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j]                  beta[t, i] = sum / scale[t+1]                   # viterbi算法         def viterbi(self, O):     '''''     O: 觀察序列     '''     T = len(O)     # 初始化     delta = np.zeros((T, self.N), np.float)     phi = np.zeros((T, self.N), np.float)     I = np.zeros(T)          for i in range(self.N):       delta[0, i] = self.pi[i] * self.B[i, O[0]]       phi[0, i] = 0.0          # 遞歸     for t in range(1, T):       for i in range(self.N):         delta[t, i] = self.B[i, O[t]] * np.array([delta[t-1, j] * self.A[j, i] for j in range(self.N)] ).max()         phi = np.array([delta[t-1, j] * self.A[j, i] for j in range(self.N)]).argmax()            # 終止     prob = delta[T-1, :].max()     I[T-1] = delta[T-1, :].argmax()          for t in range(T-2, -1, -1):       I[t] = phi[I[t+1]]                 return prob, I         # 計算gamma(計算A所需的分母;詳情見李航的統計學習) : 時刻t時馬爾可夫鏈處于狀態Si的概率   def computeGamma(self, T, alpha, beta, gamma):     ''''''''     for t in range(T):       for i in range(self.N):         sum = 0.0         for j in range(self.N):           sum += alpha[t, j] * beta[t, j]                  gamma[t, i] = (alpha[t, i] * beta[t, i]) / sum        # 計算sai(i,j)(計算A所需的分子) 為給定訓練序列O和模型lambda時   def computeXi(self, T, O, alpha, beta, Xi):          for t in range(T-1):       sum = 0.0       for i in range(self.N):         for j in range(self.N):           Xi[t, i, j] = alpha[t, i] * self.A[i, j] * self.B[j, O[t+1]] * beta[t+1, j]           sum += Xi[t, i, j]              for i in range(self.N):         for j in range(self.N):           Xi[t, i, j] /= sum         # 輸入 L個觀察序列O,初始模型:HMM={A,B,pi,N,M}   def BaumWelch(self, L, T, O, alpha, beta, gamma):                       DELTA = 0.01 ; round = 0 ; flag = 1 ; probf = [0.0]     delta = 0.0; probprev = 0.0 ; ratio = 0.0 ; deltaprev = 10e-70          xi = np.zeros((T, self.N, self.N)) # 計算A的分子     pi = np.zeros((T), np.float)  # 狀態初始化概率          denominatorA = np.zeros((self.N), np.float) # 輔助計算A的分母的變量     denominatorB = np.zeros((self.N), np.float)     numeratorA = np.zeros((self.N, self.N), np.float)  # 輔助計算A的分子的變量     numeratorB = np.zeros((self.N, self.M), np.float)  # 針對輸出觀察概率矩陣     scale = np.zeros((T), np.float)          while True:       probf[0] =0              # E_step       for l in range(L):         self.forwardWithScale(T, O[l], alpha, scale, probf)         self.backwardWithScale(T, O[l], beta, scale)         self.computeGamma(T, alpha, beta, gamma)  # (t, i)         self.computeXi(T, O[l], alpha, beta, xi)  #(t, i, j)                  for i in range(self.N):           pi[i] += gamma[0, i]           for t in range(T-1):             denominatorA[i] += gamma[t, i]             denominatorB[i] += gamma[t, i]           denominatorB[i] += gamma[T-1, i]                    for j in range(self.N):             for t in range(T-1):               numeratorA[i, j] += xi[t, i, j]                        for k in range(self.M): # M為觀察狀態取值個數             for t in range(T):               if O[l][t] == k:                 numeratorB[i, k] += gamma[t, i]                                 # M_step。 計算pi, A, B       for i in range(self.N): # 這個for循環也可以放到for l in range(L)里面         self.pi[i] = 0.001 / self.N + 0.999 * pi[i] / L                  for j in range(self.N):           self.A[i, j] = 0.001 / self.N + 0.999 * numeratorA[i, j] / denominatorA[i]                     numeratorA[i, j] = 0.0                  for k in range(self.M):           self.B[i, k] = 0.001 / self.N + 0.999 * numeratorB[i, k] / denominatorB[i]           numeratorB[i, k] = 0.0                    #重置         pi[i] = denominatorA[i] = denominatorB[i] = 0.0                if flag == 1:         flag = 0         probprev = probf[0]         ratio = 1         continue              delta = probf[0] - probprev        ratio = delta / deltaprev         probprev = probf[0]       deltaprev = delta       round += 1              if ratio <= DELTA :         print('num iteration: ', round)           break       if __name__ == '__main__':   print ("python my HMM")      # 初始的狀態概率矩陣pi;狀態轉移矩陣A;輸出觀察概率矩陣B; 觀察序列   pi = [0.5,0.5]   A = [[0.8125,0.1875],[0.2,0.8]]   B = [[0.875,0.125],[0.25,0.75]]   O = [      [1,0,0,1,1,0,0,0,0],      [1,1,0,1,0,0,1,1,0],      [0,0,1,1,0,0,1,1,1]     ]   L = len(O)   T = len(O[0])  # T等于最長序列的長度就好了      hmm = HMM(A, B, pi)   alpha = np.zeros((T,hmm.N),np.float)   beta = np.zeros((T,hmm.N),np.float)   gamma = np.zeros((T,hmm.N),np.float)      # 訓練   hmm.BaumWelch(L,T,O,alpha,beta,gamma)      # 輸出HMM參數信息   hmm.printHMM()              
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 二连浩特市| 玉林市| 思茅市| 五指山市| 文水县| 承德市| 卓尼县| 邹城市| 永丰县| 封丘县| 兴义市| 贡嘎县| 天峻县| 宜兰市| 赤峰市| 双流县| 阳原县| 甘泉县| 龙山县| 漾濞| 耒阳市| 洮南市| 清徐县| 廉江市| 桃园县| 璧山县| 澜沧| 马公市| 马鞍山市| 玛曲县| 宜兴市| 乌拉特中旗| 玛纳斯县| 涞源县| 扎囊县| 大化| 恩平市| 云浮市| 佛坪县| 阳江市| 博白县|