Last active
May 16, 2019 03:23
-
-
Save kuczmama/186f785ef67042e4edd2152389106ddd to your computer and use it in GitHub Desktop.
n level markov chain to randomly generate text
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class String | |
def blank? | |
self.nil? || self.empty? | |
end | |
def present? | |
!self.blank? | |
end | |
end | |
class Array | |
def blank? | |
self.nil? || self.empty? | |
end | |
def present? | |
!self.blank? | |
end | |
end | |
class NilClass | |
def blank? | |
self.nil? || self.empty? | |
end | |
def present? | |
!self.blank? | |
end | |
end | |
class MarkovChain | |
attr_accessor :memory, :level | |
def initialize(level: 1) | |
@level = level | |
@memory = [].tap{|m| level.times{m.push({})}} | |
end | |
def train(path: "",raw: "") | |
tokens = nil | |
if path.present? | |
tokens = File.read(path).split(" ") | |
elsif raw.present? | |
tokens = raw.split(" ") | |
end | |
(0...(tokens.length - @level)).each do |i| | |
prev_key = nil | |
prev_value = nil | |
(0...@level).each do |level| | |
value = tokens[i + level + 1] | |
key = (prev_key.present? && prev_value.present?) ? prev_key + [prev_value] : [tokens[i]] | |
# puts "i=#{i}, level=#{level}, key=#{key}, value=#{value}, memory=#{memory}" | |
@memory[level][key] = [] if @memory[level][key].blank? | |
@memory[level][key] = value | |
prev_key = key.dup | |
prev_value = value.dup | |
end | |
end | |
end | |
def next_word(curr_state=[]) | |
puts "next word curr_state: #{curr_state}" | |
result = @memory[0].keys.sample.first if curr_state.blank? | |
if result.present? | |
puts "level 0: #{result}" | |
return result | |
end | |
level = @level - 1 | |
while level >= 0 | |
result = @memory[level][curr_state[0...@level]] | |
if result.present? | |
puts "level #{level + 1}: #{result}" | |
return result | |
end | |
level -= 1 | |
end | |
puts "level 0: #{result}" | |
@memory[0].keys.sample.first | |
end | |
def babble(length: 2) | |
result = next_word | |
(0..length).each do | |
tokens = result.split(" ") | |
range = ((-1 * (@level >= tokens.length ? tokens.length : @level))..-1) | |
state = tokens[range] | |
word = next_word(state) | |
result += " " + word | |
end | |
result | |
end | |
end | |
# Usage | |
o = MarkovChain.new(level: 3) | |
o.train(path: './training_data/obama.txt') | |
o.babble(length: 1000) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment