Last active
March 16, 2023 01:36
-
-
Save piyush01123/7d61e23f099b8a8dbfb809c266bd166c to your computer and use it in GitHub Desktop.
Hidden Markov Models - Viterbi Algorithm
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
def viterbi(mood_sequence, priors, transmission_probs, emission_probs): | |
n = len(mood_sequence) | |
weather_matrix = np.zeros((n, 2)) | |
history = [(None,None)] | |
for i, mood in enumerate(mood_sequence): | |
if i==0: | |
weather_matrix[i] = priors['s']*emission_probs['s'+mood], priors['r']*emission_probs['r'+mood] | |
else: | |
ss, sr, rs, rr = transmission_probs['ss'], transmission_probs['sr'], transmission_probs['rs'], transmission_probs['rr'] | |
s, r = weather_matrix[i-1] | |
S_probs, R_probs = np.array([s*ss, r*rs]), np.array([s*sr, r*rr]) | |
prev_S = np.argmax(S_probs).item() | |
prev_R = np.argmax(R_probs).item() | |
prev = "SR"[prev_S], "SR"[prev_R] | |
history.append(prev) | |
prob_S = S_probs.max() * emission_probs['s'+mood] | |
prob_R = R_probs.max() * emission_probs['r'+mood] | |
weather_matrix[i] = prob_S, prob_R | |
final_prob, previous = weather_matrix[-1].max(), "SR"[weather_matrix[-1].argmax()] | |
weather_sequence = [previous] | |
for i in range(n-2,-1,-1): | |
previous = history[i+1]["SR".index(previous)] | |
weather_sequence.insert(0,previous) | |
return "Most Probale Sequence is {} with probability {}".format(''.join(weather_sequence) , final_prob) | |
def main(): | |
priors = {'s': 2/3, 'r': 1/3} | |
transmission_probs = {'ss': 8/10, 'sr': 2/10, 'rs': 2/5, 'rr': 3/5} | |
emission_probs = {'sh': 8/10, 'sg': 2/10, 'rh': 2/5, 'rg': 3/5} | |
print(viterbi('hhgggh', priors, transmission_probs, emission_probs)) | |
if __name__=="__main__": | |
main() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
HMM Logic for this Sunny(S), Rainy(R), Happy(H), Grumpy(G) program is:
Given: 1. Priors: P(S), P(R) 2. Transmission Probabilities: P(S|S), P(S|R), P(R|S), P(R|R) 3. Emission probabilities: P(H|S), P(G|S), P(H|R), P(G|R)
Also given: Mood Sequence of H,G is given as HHGGGH
Find: Most probable weather sequence of S,R
We will calculate a weather matrix where we calculate scores of S,R for each day:
After making this table, we need to backtrace the best_previous_state at each step from the end to get the most likely sequence