|
import Pkg |
|
Pkg.activate(temp=true) |
|
@info "Installing Ipopt, Optimization, ADNLPModels and JuMP. Output is suppressed." |
|
Pkg.add([ |
|
Pkg.PackageSpec(name="Ipopt", version="1.1.0"), |
|
Pkg.PackageSpec(name="Optimization", version="3.10.0"), |
|
Pkg.PackageSpec(name="OptimizationMOI", version="0.1.5"), |
|
Pkg.PackageSpec(name="ADNLPModels", version="0.4.0"), |
|
Pkg.PackageSpec(name="NLPModelsIpopt", version="0.10.0"), |
|
Pkg.PackageSpec(name="JuMP", version="1.6.0"), |
|
Pkg.PackageSpec(name="MathOptInterface", version="1.11.2") |
|
], io=devnull) |
|
Pkg.status() |
|
|
|
@info "Loading OptimizationMOI code..." |
|
module CodeOptimizationJL |
|
|
|
import Optimization, OptimizationMOI |
|
import Ipopt |
|
|
|
const AV{T} = AbstractVector{T} |
|
|
|
function model_constraints!(out::AV{<:Real}, u::AV{<:Real}, data) |
|
# Model parameters |
|
dt, a, b = u |
|
out[1] = a - 1/dt # Must be < 0 |
|
end |
|
|
|
function model_variance(u::AV{T}, data::AV{<:Real}) where T<:Real |
|
dt, a, b = u # Model parameters |
|
variance = zeros(T, length(data)) |
|
variance[1] = one(T) |
|
for t in 1:(length(data) - 1) |
|
variance[t+1] = (1 - dt * a) * variance[t] + dt * data[t]^2 + dt * b |
|
end |
|
variance |
|
end |
|
|
|
function model_loss(u::AV{T}, data::AV{<:Real})::T where T<:Real |
|
variance = model_variance(u, data) |
|
N = length(data) |
|
-sum( |
|
-(log(2π) + log(var) + r^2 / var) / 2 |
|
for (r, var) in zip(data, variance) |
|
) / N |
|
end |
|
|
|
function model_fit(u0::AV{T}, data::AV{<:Real}) where T<:Real |
|
func = Optimization.OptimizationFunction( |
|
model_loss, Optimization.AutoForwardDiff(), |
|
cons=model_constraints! |
|
) |
|
prob = Optimization.OptimizationProblem( |
|
func, u0, data, |
|
# 0 < dt < 1 && 1 < a < Inf && 0 < b < Inf |
|
lb=T[0.0, 1.0, 0.0], ub=T[1.0, Inf, Inf], |
|
# ^dt ^a ^b ^dt ^a ^b <= model parameters |
|
lcons=T[-Inf], ucons=T[0.0] # a - 1/dt < 0 |
|
) |
|
sol = Optimization.solve(prob, Ipopt.Optimizer()) |
|
sol.u |
|
end |
|
|
|
end # module CodeOptimization |
|
|
|
@info "Loading JuMP.jl code..." |
|
module CodeJuMP |
|
|
|
using JuMP |
|
import MathOptInterface as MOI |
|
import Ipopt |
|
|
|
const AV{T} = AbstractVector{T} |
|
|
|
function model_variance(u::AV, data::AV{<:Real}, model) |
|
dt, a, b = u |
|
variance = Vector{JuMP.NonlinearExpression}(undef, length(data)) |
|
variance[1] = @NLexpression(model, 1.0) |
|
for t in 1:(length(data) - 1) |
|
variance[t+1] = @NLexpression( |
|
model, |
|
(1 - dt * a) * variance[t] + dt * data[t]^2 + dt * b |
|
) |
|
end |
|
variance |
|
end |
|
|
|
model_variance_value(u::AV, data::AV{<:Real}) = |
|
value.(model_variance(u, data, Model())) |
|
|
|
function model_loss!(u::AV, data::AV{<:Real}, model) |
|
variance = model_variance(u, data, model) |
|
|
|
N = length(data) |
|
@NLobjective( |
|
model, Min, |
|
-sum( |
|
-(log(2π) + log(var) + r^2 / var) / 2 |
|
for (r, var) in zip(data, variance) |
|
) / N |
|
) |
|
end |
|
|
|
function model_loss_value(u::AV{T}, data::AV{<:Real}) where T<:Real |
|
model = Model() |
|
@variable(model, dt) |
|
@variable(model, a) |
|
@variable(model, b) |
|
|
|
model_loss!([dt, a, b], data, model) |
|
|
|
# https://jump.dev/JuMP.jl/stable/manual/nlp/#Querying-derivatives-from-a-JuMP-model |
|
u_jump = zeros(T, 3) |
|
u_jump[JuMP.index(dt).value] = u[1] |
|
u_jump[JuMP.index(a).value] = u[2] |
|
u_jump[JuMP.index(b).value] = u[3] |
|
|
|
# WTF?! |
|
ev = JuMP.NLPEvaluator(model) |
|
MOI.initialize(ev, [:Grad]) |
|
MOI.eval_objective(ev, u_jump) |
|
end |
|
|
|
function model_fit(u0::AV{T}, data::AV{<:Real}) where T<:Real |
|
model = JuMP.Model(Ipopt.Optimizer) |
|
@variable(model, 0 <= dt <= 1, start=u0[1]) |
|
@variable(model, 1 <= a, start=u0[2]) |
|
@variable(model, 0 <= b, start=u0[3]) |
|
@NLconstraint(model, a <= 1 / dt) |
|
|
|
model_loss!([dt, a, b], data, model) |
|
|
|
optimize!(model) |
|
value.([dt, a, b]) |
|
end |
|
|
|
end # module CodeJuMP |
|
|
|
@info "Loading NLPModelsIpopt.jl code..." |
|
module CodeADNLP |
|
|
|
using Statistics: mean |
|
import ADNLPModels, NLPModelsIpopt |
|
|
|
import ..CodeOptimizationJL: model_variance, model_loss |
|
|
|
const AV{T} = AbstractVector{T} |
|
|
|
function model_constraints(u::AV{<:Real}, data) |
|
dt, a, b = u # Model parameters |
|
[a - 1/dt] # Must be <0 |
|
end |
|
|
|
function model_fit(u0::AV{T}, data::AV{<:Real}) where T<:Real |
|
problem = ADNLPModels.ADNLPModel( |
|
u -> model_loss(u, data), # Objective |
|
u0, T[0.0, 1.0, 0.0], T[1.0, Inf, Inf], # Init value & bounds |
|
u -> model_constraints(u, data), # Constraints |
|
T[-Inf], T[0.0] # Constraints bounds (must be NEGATIVE) |
|
) |
|
solver = NLPModelsIpopt.IpoptSolver(problem) |
|
stats = NLPModelsIpopt.solve!(solver, problem) |
|
stats.solution |
|
end |
|
|
|
end # module CodeADNLP |
|
|
|
# ========== DRIVER CODE ========== |
|
|
|
@info "Defining data" |
|
data = [ |
|
2.1217711584057386, -0.28350145551002465, 2.3593492969513004, 0.192856733601849, 0.4566485836385113, 1.332717934013979, -1.286716619379847, 0.9868669960185211, 2.2358674776395224, -2.7933975791568098, |
|
1.2555871497124622, 1.276879759908467, -0.8392016987911409, -1.1580875182201849, 0.33201646080578456, -0.17212553408696898, 1.1275285626369556, 0.23041139849229036, 1.648423577528424, 2.384823597473343, |
|
-0.4005518932539747, -1.117737311211693, -0.9490152960583265, -1.1454539355078672, 1.4158585811404159, -0.18926972177257692, -0.2867541528181491, -1.2077459688543788, -0.6397173049620141, 0.66147783407023, |
|
0.049805188778543466, 0.902540117368457, -0.7018417933284938, 0.47342354473843684, 1.2620345361591596, -1.1483844812087018, -0.06487285080802752, 0.39020117013487715, -0.38454491504165356, 1.5125786171885645, |
|
-0.6751768274451174, 0.490916740658628, 0.012872300530924086, 0.46532447715746716, 0.34734421531357157, 0.3830452463549559, -0.8730874028738718, 0.4333151627834603, -0.40396180775692375, 2.0794821773418497, |
|
-0.5392735774960918, 0.6519326323752113, -1.4844713145398716, 0.3688828625691108, 1.010912990717231, 0.5018274939956874, 0.36656889279915833, -0.11403975693239479, -0.6460314660359935, -0.41997005020823147, |
|
0.9652752515820495, -0.37375868692702047, -0.5780729659197872, 2.642742798278919, 0.5076984117208074, -0.4906395089461916, -1.804352047187329, -0.8596663844837792, -0.7510485548262176, -0.07922589350581195, |
|
1.7201304839487317, 0.9024493222130577, -1.8216089665357902, 1.3929269238775426, -0.08410752079538407, 0.6423068180438288, 0.6615201016351212, 0.18546977816594887, -0.717521690742993, -1.0224309324751113, |
|
1.7748350222721971, 0.1929546575877559, -0.1581871639724676, 0.20198379311238596, -0.6919373947349301, -0.9253274269423383, 0.549366272989534, -1.9302106783541606, 0.7197247279281573, -1.220334158468621, |
|
-0.9187468058921053, -2.1452607604834184, -2.1558650694862687, -0.9387913392336701, -0.676637835687265, -0.16621998352492198, 0.5637177022958897, -0.5258315560278541, 0.8413359958184765, -0.9096866525337141 |
|
]; |
|
# u0 = [0 < dt < 1, 1 < a < 1/dt, 0 < b < Inf] |
|
u0 = [0.3, 2.3333333333333335, 0.33333333333333337] |
|
|
|
@info "Initial point" u0 |
|
|
|
@info "Model variances must be equal for OptimizationMOI, NLPModelsIpopt and JuMP. Code will crash if not." |
|
@assert CodeOptimizationJL.model_variance(u0, data) ≈ CodeADNLP.model_variance(u0, data) |
|
@assert CodeADNLP.model_variance(u0, data) ≈ CodeJuMP.model_variance_value(u0, data) |
|
|
|
@info "Model losses must be equal for OptimizationMOI, NLPModelsIpopt and JuMP. Code will crash if not." |
|
@assert CodeOptimizationJL.model_loss(u0, data) ≈ CodeADNLP.model_loss(u0, data) |
|
@assert CodeADNLP.model_loss(u0, data) ≈ CodeJuMP.model_loss_value(u0, data) |
|
|
|
@info "Fitting with OptimizationMOI. This is expected to error out." |
|
try |
|
CodeOptimizationJL.model_fit(u0, data) |
|
catch e |
|
@error "OptimizationMOI.jl" e |
|
end |
|
|
|
@info "Fitting with NLPModelsIpopt. This is expected to error out." |
|
try |
|
CodeADNLP.model_fit(u0, data) |
|
catch e |
|
@error "NLPModelsIpopt.jl" e |
|
end |
|
|
|
@info "Fitting with JuMP. This is NOT expected to error out." |
|
u_opt = CodeJuMP.model_fit(u0, data) |
|
@show u_opt |