Skip to content

Instantly share code, notes, and snippets.

@hans
Created November 8, 2018 18:33
Show Gist options
  • Save hans/9a6ec1a5a6ad0684a39ef52ef1b88191 to your computer and use it in GitHub Desktop.
Save hans/9a6ec1a5a6ad0684a39ef52ef1b88191 to your computer and use it in GitHub Desktop.
// WebPPL code for a proof-of-concept model of
// question-guided visual search.
var images = [0, 1]
// deterministic p(description | image)
var descriptions = [
{blocked_intent: true, missing_object: false},
{blocked_intent: false, missing_object: true}
]
var utterances = [
"Why couldn't he reach the goal".split(" "),
"Where did the ball go".split(" "),
]
var vocabulary = _.flatten(utterances)
// p(question | description)
var drawQuestion = function(description) {
if (description.blocked_intent) {
return "blocked_intent"
} else if (description.missing_object) {
return "missing_object"
}
}
// conditional p(utterance | question)
// is a simple Markov chain, from noising up the ground-truth utterances
var drawUtterance = function(question) {
// Build a naive noisy-channel model
var sampleNoisy = function(utterance) {
map(function(actual_word) {
if (flip(0.1)) {
return uniformDraw(vocabulary);
} else {
return actual_word;
}
}, utterance)
}
if (question == "blocked_intent") {
sampleNoisy(utterances[0])
} else if (question == "missing_object") {
sampleNoisy(utterances[1])
}
}
// Infer a posterior p(image | w_1, w_2, ...)
var inferImage = function(wordSeq) {
Infer({method: "enumerate"}, function () {
var image = uniformDraw(images)
var description = descriptions[image]
var question = drawQuestion(description)
var utterance = drawUtterance(question)
condition(utterance.length >= wordSeq.length)
map2(function(utt, obs) {
condition(utt == obs)
}, utterance.slice(0, wordSeq.length), wordSeq)
image
})
}
inferImage(["Why"])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment