Skip to content

Instantly share code, notes, and snippets.

@perryism
Created May 2, 2023 19:05
Show Gist options
  • Save perryism/1e452c8d117fc80373c13b20f53da474 to your computer and use it in GitHub Desktop.
Save perryism/1e452c8d117fc80373c13b20f53da474 to your computer and use it in GitHub Desktop.
Markov model
inputs = """red pepper pasta
garlic noodles
pizza
biryani
roti curry
flat rice noodles
hakka noodles
bhel puri
sushi
ramen"""
dishes = inputs.split("\n")
import random
sequence = []
for i in range(100):
sequence.append(random.sample(dishes, 1)[0])
def markov(sequence):
# Initialize an empty dictionary to store the transition probabilities
transitions = {}
# Initialize the current state to be the first event in the sequence
current_state = sequence[0]
# Loop through the sequence starting from the second event
for event in sequence[1:]:
# If the current state is not already in the dictionary, add it with an empty dictionary
if current_state not in transitions:
transitions[current_state] = {}
# If the next event is not already in the dictionary for the current state, add it with count 1
if event not in transitions[current_state]:
transitions[current_state][event] = 1
# If the next event is already in the dictionary for the current state, increment its count by 1
else:
transitions[current_state][event] += 1
# Set the current state to be the next event
current_state = event
# Loop through the transition probabilities dictionary and normalize the counts to get probabilities
for state in transitions:
total_count = sum(transitions[state].values())
for next_state in transitions[state]:
transitions[state][next_state] /= total_count
return transitions
likelihoods = markov(sequence)
prev_dish = likelihoods["garlic noodles"]
next_dish = max(prev_dish, key=prev_dish.get)
print(f"If you had garlic noodles, you will likely to have {next_dish} next")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment