Last active
April 17, 2020 05:08
-
-
Save paullaffitte/4609985b4ced78002196355c8b8fcce5 to your computer and use it in GitHub Desktop.
Hidden Makrov Models examples in javascript and python
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* Example from page 22 | |
const makrovChain = { | |
q: { | |
property: 0.7, | |
transitions: { | |
q: 0.7, | |
r: 0.1, | |
s: 0.2, | |
}, | |
emissions: { | |
a: 0.8, | |
b: 0.2, | |
} | |
}, | |
r: { | |
property: 0.2, | |
transitions: { | |
q: 0.25, | |
r: 0.5, | |
s: 0.25, | |
}, | |
emissions: { | |
a: 0.4, | |
b: 0.6, | |
} | |
}, | |
s: { | |
property: 0.1, | |
transitions: { | |
q: 0.4, | |
r: 0.4, | |
s: 0.2, | |
}, | |
emissions: { | |
a: 0.1, | |
b: 0.9, | |
} | |
}, | |
}; | |
*/ | |
/* This example should always output 1 with the sequence 'aaa' or any sequence composed only with 'a' | |
const makrovChain = { | |
q: { | |
property: 1, | |
transitions: { | |
q: 1, | |
r: 0, | |
}, | |
emissions: { | |
a: 1, | |
b: 0, | |
} | |
}, | |
r: { | |
property: 0, | |
transitions: { | |
q: 1, | |
r: 0, | |
}, | |
emissions: { | |
a: 1, | |
b: 0, | |
} | |
}, | |
}; | |
*/ | |
const makrovChain = { | |
q: { | |
property: 1, | |
transitions: { | |
q: 0.6, | |
r: 0.4, | |
}, | |
emissions: { | |
a: 0.7, | |
b: 0.3, | |
} | |
}, | |
r: { | |
property: 0, | |
transitions: { | |
q: 0.3, | |
r: 0.7, | |
}, | |
emissions: { | |
a: 0.4, | |
b: 0.6, | |
} | |
}, | |
}; | |
const HMM = (chain, precision=-1) => { | |
const getAssociativeProbabilities = key => Object.entries(chain).reduce((acc, [right, value]) => ({ | |
...acc, | |
...Object.entries(value[key]).reduce((acc, [left, value]) => ({ | |
...acc, | |
[`${right}->${left}`]: value | |
}), {}) | |
}), {}); | |
const states = Object.keys(chain); | |
const symbols = ['a', 'b']; | |
const probabilities = { | |
initialState: Object.values(chain).map(({ property }) => property), | |
transitions: getAssociativeProbabilities('transitions'), | |
emissions: getAssociativeProbabilities('emissions') | |
}; | |
const initialStates = Object.entries(chain).reduce((acc, [state, { property }]) => ({ ...acc, [state]: property }), {}); | |
console.log('S =', states); | |
console.log('Σ =', symbols); | |
console.log('Π =', probabilities.initialState); | |
console.log('B =', probabilities.transitions); | |
console.log('S =', probabilities.emissions); | |
const getTransition = (left, right) => chain[left].transitions[right]; | |
const getEmission = (node, symbol) => chain[node].emissions[symbol]; | |
const withPrecision = (value) => precision > 0 | |
? Math.round(value * Math.pow(10, precision)) / Math.pow(10, precision) | |
: value; | |
let reverse; | |
const showDebug = (debug, key, value, i, join) => console.log(`${key}${(reverse ? states.length - i : i) + 1}`, '=', join(debug), '=', value); | |
const sumReduce = { | |
value: (a, b) => a + b, | |
debug: values => values.join(' + '), | |
}; | |
const maxReduce = { | |
value: (a, b) => Math.max(a, b), | |
debug: values => 'max(' + values.join(', ') + ')', | |
}; | |
const compute = (currentStates, sequence, reducer=sumReduce) => { | |
const history = []; | |
sequence.forEach((symbol, i) => { | |
console.log(); | |
history.push(currentStates); | |
let nextStates = { ...currentStates }; | |
Object.entries(currentStates).map(([key]) => { | |
const { debug, value } = states.reduce((acc, state) => { | |
const source = reverse ? key : state; | |
const target = reverse ? state : key; | |
const emission = getEmission(target, symbol); | |
const property = currentStates[state]; | |
const transition = getTransition(source, target); | |
acc.value = reducer.value(acc.value, property * transition * emission); | |
acc.debug.push(`(${property} * ${transition} * ${emission})`); | |
return acc; | |
}, { debug: [], value: 0 }); | |
const roundedValue = withPrecision(value); | |
nextStates[key] = roundedValue; | |
showDebug(debug, key, roundedValue, i + 1, reducer.debug); | |
}); | |
currentStates = nextStates; | |
console.log('---'); | |
}); | |
return { | |
history: [ ...history, currentStates ], | |
lastState: currentStates, | |
}; | |
}; | |
const start = (name, sequence, init, compute) => { | |
console.log(); | |
let probability; | |
let args; | |
if (init) { | |
args = init(); | |
console.log('---'); | |
} | |
if (compute) { | |
const { value, debug: [ join, ...debug ] } = compute(args); | |
console.log(`\n${name} probability of sequence "${sequence}" = ${join(debug)} =`, withPrecision(value), '\n------'); | |
} | |
return probability; | |
}; | |
const initForward = sequence => () => { | |
reverse = false; | |
let nextStates = { ...initialStates }; | |
const [ symbol, ...restSequence ] = sequence; | |
Object.entries(initialStates).map(([key, property]) => { | |
const emission = getEmission(key, symbol); | |
const { debug, value } = { | |
debug: [ `${property} * ${emission}` ], | |
value: property * emission, | |
}; | |
const roundedValue = withPrecision(value); | |
nextStates[key] = roundedValue; | |
showDebug(debug, key, roundedValue, 0, sumReduce.debug); | |
}); | |
return { nextStates, restSequence }; | |
}; | |
return { | |
forward: sequence => start('Forward', sequence, initForward(sequence), ({ nextStates, restSequence }) => { | |
const { history, lastState } = compute(nextStates, restSequence); | |
return Object.values(lastState).reduce((acc, property) => ({ | |
debug: [ ...acc.debug, property ], | |
value: acc.value + property, | |
}), { debug: [ sumReduce.debug ], value: 0 }); | |
}), | |
backward: sequence => start('Backward', sequence, () => { | |
reverse = true; | |
const currentStates = Object.entries(chain).reduce((acc, [state]) => ({ ...acc, [state]: 1 }), {}); | |
Object.entries(currentStates).map(([key, property]) => { | |
console.log(`${key}${states.length + 1}`, '=', property); | |
}); | |
return { currentStates }; | |
}, ({ currentStates }) => { | |
const [ symbol, ...restSequence ] = sequence; | |
const { history, lastState } = compute(currentStates, restSequence.reverse()); | |
return Object.entries(lastState).reduce((acc, [key, property]) => { | |
const emission = getEmission(key, symbol); | |
const initialState = initialStates[key]; | |
acc.debug.push(`(${property} * ${initialState} * ${emission})`); | |
acc.value += property * initialState * emission; | |
return acc; | |
}, { debug: [ sumReduce.debug ], value: 0 }); | |
}), | |
viterbi: sequence => start('Viterbi', sequence, initForward(sequence), ({ nextStates, restSequence }) => { | |
const { history, lastState } = compute(nextStates, restSequence, maxReduce); | |
const mostLikelyPath = history.map(state => { | |
return Object.entries(state).reduce((acc, [key, property]) => property > acc[1] ? [key, property] : acc, ['', 0]); | |
}) | |
console.log('Most likely path:', mostLikelyPath); | |
return Object.values(lastState).reduce((acc, property) => ({ | |
debug: [ ...acc.debug, property ], | |
value: maxReduce.value(acc.value, property), | |
}), { debug: [ maxReduce.debug ], value: 0 }); | |
}), | |
} | |
}; | |
const { forward, backward, viterbi } = HMM(makrovChain, 4); | |
forward('aab'); | |
backward('aab'); | |
viterbi('aab'); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm#Python_example | |
def fwd_bkw(observations, states, start_prob, trans_prob, emm_prob, end_st): | |
fwd = [] | |
f_prev = {} | |
for i, observation_i in enumerate(observations): | |
f_curr = {} | |
for st in states: | |
if i == 0: | |
# base case for the forward part | |
prev_f_sum = start_prob[st] | |
else: | |
prev_f_sum = sum(f_prev[k]*trans_prob[k][st] for k in states) | |
f_curr[st] = emm_prob[st][observation_i] * prev_f_sum | |
fwd.append(f_curr) | |
f_prev = f_curr | |
p_fwd = sum(f_curr[k] * trans_prob[k][end_st] for k in states) | |
# Backward part of the algorithm | |
bkw = [] | |
b_prev = {} | |
for i, observation_i_plus in enumerate(reversed(observations[1:]+(None,))): | |
b_curr = {} | |
for st in states: | |
if i == 0: | |
# base case for backward part | |
b_curr[st] = trans_prob[st][end_st] | |
else: | |
b_curr[st] = sum(trans_prob[st][l] * emm_prob[l][observation_i_plus] * b_prev[l] for l in states) | |
bkw.insert(0,b_curr) | |
b_prev = b_curr | |
p_bkw = sum(start_prob[l] * emm_prob[l][observations[0]] * b_curr[l] for l in states) | |
# Merging the two parts | |
posterior = [] | |
for i in range(len(observations)): | |
posterior.append({st: fwd[i][st] * bkw[i][st] / p_fwd for st in states}) | |
assert p_fwd == p_bkw | |
return fwd, bkw, posterior, p_fwd, p_bkw | |
states = ('q', 'r') | |
end_state = 'E' | |
observations = ('a', 'a', 'b') | |
start_probability = {'q': 1, 'r': 0} | |
transition_probability = { | |
'q' : {'q': 0.6, 'r': 0.4, 'E': 1}, | |
'r' : {'q': 0.3, 'r': 0.7, 'E': 1}, | |
} | |
emission_probability = { | |
'q' : {'a': 0.7, 'b': 0.3}, | |
'r' : {'a': 0.4, 'b': 0.6}, | |
} | |
def example(): | |
return fwd_bkw(observations, states, start_probability, transition_probability, emission_probability, end_state) | |
for line in example(): | |
print(line) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// https://medium.com/@Ayra_Lux/hidden-markov-models-part-1-the-likelihood-problem-8dd1066a784e | |
// https://github.com/Mary62442/mary-markov/blob/master/index.js | |
// https://www.npmjs.com/package/mary-markov | |
const maryMarkov = require('mary-markov'); | |
let hiddenStates = [ | |
{state: 'q', prob: [0.6, 0.4]}, | |
{state: 'r', prob: [0.3, 0.7]} | |
]; | |
let observables = [ | |
{obs: 'a', prob: [0.7, 0.4]}, | |
{obs: 'b', prob: [0.3, 0.6]}, | |
]; | |
let hiddenInit = [1, 0]; | |
let LeaHMModel = maryMarkov.HMM(hiddenStates, observables, hiddenInit); | |
let obSequence = ['a','a','b']; | |
let forwardProbability = LeaHMModel.forwardAlgorithm(obSequence); | |
console.log(forwardProbability) | |
let backwardProbability = LeaHMModel.backwardAlgorithm(obSequence); | |
console.log(backwardProbability) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment