Last active
April 9, 2020 00:23
-
-
Save baggepinnen/e00a8b31bc011b6a3d4f271d00c1cc3d to your computer and use it in GitHub Desktop.
Covid model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
cd(@__DIR__) | |
using Pkg | |
pkg"activate ." | |
# pkg"add Turing, NamedTupleTools, MonteCarloMeasurements, StatsPlots" | |
# pkg"dev Turing2MonteCarloMeasurements" | |
using Turing, Turing2MonteCarloMeasurements, NamedTupleTools, MonteCarloMeasurements | |
## Some helper functions stolen from Soss ====================================== | |
particles(v::Vector{<:Real}) = Particles(v) | |
using IterTools | |
function particles(v::Vector{Vector{T} }) where {T} | |
map(eachindex(v[1])) do j particles([x[j] for x in v]) end | |
end | |
function particles(v::Vector{Array{T,N} }) where {T,N} | |
map(CartesianIndices(v[1])) do j particles([x[j] for x in v]) end | |
end | |
function particles(s::Vector{NamedTuple{vs, T}}) where {vs, T} | |
nt = NamedTuple() | |
for k in keys(s[1]) | |
nt = merge(nt, [k => particles(getproperty.(s,k))]) | |
end | |
nt | |
end | |
## ============================================================================ | |
# Turing.@model model(Ir, p, ::Type{T}=Float64) where {T} = begin | |
# | |
# N = length(Ir) | |
# np = length(p) | |
# I,R = ntuple(i->Vector{T}(undef, N), 2) | |
# zI ~ MvNormal(zeros(N), ones(N)) # Innovations | |
# zIr ~ MvNormal(zeros(N), ones(N)) # Innovations | |
# zR ~ MvNormal(zeros(N), ones(N)) # Innovations | |
# | |
# nq = 1 # Assuming one lag for now | |
# q = Vector{T}(undef, 1) | |
# q[1] ~ Uniform(0.01,0.99) # this is a bad prior, but something to get us started | |
# | |
# # Distribution of initial state | |
# Ir[1] = zIr[1] #truncated(Normal(1,1), 0, Inf) | |
# I[1] = zI[1] #Gamma(10) | |
# R[1] = zR[1] #Normal(3,0.5) # A guess | |
# | |
# σI ~ Gamma(100) # These are all bad choices and have to be tuned with prior predictive checks | |
# σIr ~ Gamma(50) | |
# σR ~ truncated(Normal(0,0.03), 1e-6, Inf) # Random walk for R | |
# | |
# if !isfinite(σI) || !isfinite(σIr) || !isfinite(σR) | |
# @logpdf() = -Inf | |
# return @namedtuple(I,Ir,R,σI, σIr, σR) | |
# end | |
# for k = 2:N | |
# μI = R[k] * sum(p[i]*I[k-i] for i = 1:min(np,k-1)) | |
# μIr = sum(q[i]*I[k-i] for i = 1:nq) | |
# μR = R[k-1] | |
# I[k] = μI + σI*zI[k] | |
# Ir[k] = μIr + σIr*zIr[k] | |
# R[k] = μR + σR*zR[k] | |
# end | |
# @namedtuple(I,Ir,R,σI, σIr, σR) # These parameters will be saved | |
# end; | |
Turing.@model model(Ir, p, ::Type{T}=Float64) where {T} = begin | |
N = length(Ir) | |
np = length(p) | |
I,R = ntuple(i->Vector{T}(undef, N), 2) | |
nq = 1 # Assuming one lag for now | |
q = Vector{T}(undef, 1) | |
q[1] ~ Uniform(0.01,0.99) # this is a bad prior, but something to get us started | |
# Distribution of initial state | |
Ir[1] ~ truncated(Normal(1,1), 0, Inf) | |
I[1] ~ Gamma(10) | |
R[1] ~ Normal(3,0.5) # A guess | |
σI ~ Gamma(100) # These are all bad choices and have to be tuned with prior predictive checks | |
σIr ~ Gamma(50) | |
σR ~ truncated(Normal(0,0.03), 1e-6, Inf) # Random walk for R | |
if !isfinite(σI) || !isfinite(σIr) || !isfinite(σR) | |
@logpdf() = -Inf | |
return @namedtuple(I,Ir,R,σI, σIr, σR) | |
end | |
for k = 2:N | |
R[k] ~ Normal(R[k-1], σR) | |
I[k] ~ truncated(Normal(R[k] * sum(p[i]*I[k-i] for i = 1:min(np,k-1)), σI), 0, Inf) | |
Ir[k] ~ Normal(sum(q[i]*I[k-i] for i = 1:nq), σIr) | |
end | |
@namedtuple(I,Ir,R,σI, σIr, σR) # These parameters will be saved | |
end; | |
fake_Ir_data = round.(exp.(0.2 .* (1:30))) | |
p = rand(Dirichlet(4, 1)) # A random vector that sums to 1 | |
m = model(fake_Ir_data, p) | |
# Draw some samples from the prior (technically p(all_priors | data) since the data is given. To actually draw also Ir from the prior, replace the data with missing values) | |
prior = [m() for _ in 1:500] |> particles | |
mcplot(prior.R, title="Draws from prior implication for R") |> display | |
mcplot(prior.I, title="Draws from prior implication for I") |> display | |
@time chain = sample(m, NUTS(), 800) # Sample using Hamiltonian MC | |
# @time chain = sample(m, PG(100), 1000) # Sample using particle Gibbs | |
dc = describe(chain, digits=3, q=[0.1, 0.5, 0.9]) |> display | |
p = Particles(chain, crop=100); # These particles can now be used like they were real numbers, but they internally represent the sampled posterior | |
## | |
using StatsPlots | |
plot(p.R, title="Posterior R") |> display | |
plot(p.I, title="Posterior I") |> display | |
density(p.σR, title="Posterior σR") |> display | |
density(p.σI, title="Posterior σI") |> display | |
density(p.σIr, title="Posterior σIr") |> display |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment