1
- using Setfield
2
- using DynamicPPL : DefaultContext, LikelihoodContext
3
- using DynamicPPL : DynamicPPL
4
- import . Optim
5
- import . Optim : optimize
6
- import .. ForwardDiff
7
- import NamedArrays
8
- import StatsBase
9
- import Printf
10
- import StatsAPI
11
-
1
+ module TuringOptimExt
2
+
3
+ if isdefined (Base, :get_extension )
4
+ import Turing
5
+ import Turing : Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Setfield, Statistics, StatsAPI, StatsBase
6
+ import Optim
7
+ else
8
+ import .. Turing
9
+ import .. Turing : Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Setfield, Statistics, StatsAPI, StatsBase
10
+ import .. Optim
11
+ end
12
12
13
13
"""
14
14
ModeResult{
@@ -23,7 +23,7 @@ A wrapper struct to store various results from a MAP or MLE estimation.
23
23
struct ModeResult{
24
24
V<: NamedArrays.NamedArray ,
25
25
O<: Optim.MultivariateOptimizationResults ,
26
- M<: OptimLogDensity
26
+ M<: Turing. OptimLogDensity
27
27
} <: StatsBase.StatisticalModel
28
28
" A vector with the resulting point estimates."
29
29
values:: V
@@ -57,10 +57,10 @@ function StatsBase.coeftable(m::ModeResult; level::Real=0.95)
57
57
estimates = m. values. array[:, 1 ]
58
58
stderrors = StatsBase. stderror (m)
59
59
zscore = estimates ./ stderrors
60
- p = map (z -> StatsAPI. pvalue (Normal (), z; tail= :both ), zscore)
60
+ p = map (z -> StatsAPI. pvalue (Distributions . Normal (), z; tail= :both ), zscore)
61
61
62
62
# Confidence interval (CI)
63
- q = quantile (Normal (), (1 + level) / 2 )
63
+ q = Statistics . quantile (Distributions . Normal (), (1 + level) / 2 )
64
64
ci_low = estimates .- q .* stderrors
65
65
ci_high = estimates .+ q .* stderrors
66
66
@@ -80,7 +80,7 @@ function StatsBase.informationmatrix(m::ModeResult; hessian_function=ForwardDiff
80
80
# Hessian is computed with respect to the untransformed parameters.
81
81
linked = DynamicPPL. istrans (m. f. varinfo)
82
82
if linked
83
- @set! m. f. varinfo = invlink!! (m. f. varinfo, m. f. model)
83
+ Setfield . @set! m. f. varinfo = DynamicPPL . invlink!! (m. f. varinfo, m. f. model)
84
84
end
85
85
86
86
# Calculate the Hessian.
@@ -90,7 +90,7 @@ function StatsBase.informationmatrix(m::ModeResult; hessian_function=ForwardDiff
90
90
91
91
# Link it back if we invlinked it.
92
92
if linked
93
- @set! m. f. varinfo = link!! (m. f. varinfo, m. f. model)
93
+ Setfield . @set! m. f. varinfo = DynamicPPL . link!! (m. f. varinfo, m. f. model)
94
94
end
95
95
96
96
return NamedArrays. NamedArray (info, (varnames, varnames))
@@ -126,18 +126,18 @@ mle = optimize(model, MLE())
126
126
mle = optimize(model, MLE(), NelderMead())
127
127
```
128
128
"""
129
- function Optim. optimize (model:: Model , :: MLE , options:: Optim.Options = Optim. Options (); kwargs... )
129
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MLE , options:: Optim.Options = Optim. Options (); kwargs... )
130
130
return _mle_optimize (model, options; kwargs... )
131
131
end
132
- function Optim. optimize (model:: Model , :: MLE , init_vals:: AbstractArray , options:: Optim.Options = Optim. Options (); kwargs... )
132
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MLE , init_vals:: AbstractArray , options:: Optim.Options = Optim. Options (); kwargs... )
133
133
return _mle_optimize (model, init_vals, options; kwargs... )
134
134
end
135
- function Optim. optimize (model:: Model , :: MLE , optimizer:: Optim.AbstractOptimizer , options:: Optim.Options = Optim. Options (); kwargs... )
135
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MLE , optimizer:: Optim.AbstractOptimizer , options:: Optim.Options = Optim. Options (); kwargs... )
136
136
return _mle_optimize (model, optimizer, options; kwargs... )
137
137
end
138
138
function Optim. optimize (
139
- model:: Model ,
140
- :: MLE ,
139
+ model:: DynamicPPL. Model ,
140
+ :: Turing. MLE ,
141
141
init_vals:: AbstractArray ,
142
142
optimizer:: Optim.AbstractOptimizer ,
143
143
options:: Optim.Options = Optim. Options ();
@@ -146,9 +146,9 @@ function Optim.optimize(
146
146
return _mle_optimize (model, init_vals, optimizer, options; kwargs... )
147
147
end
148
148
149
- function _mle_optimize (model:: Model , args... ; kwargs... )
150
- ctx = OptimizationContext (DynamicPPL. LikelihoodContext ())
151
- return _optimize (model, OptimLogDensity (model, ctx), args... ; kwargs... )
149
+ function _mle_optimize (model:: DynamicPPL. Model , args... ; kwargs... )
150
+ ctx = Turing . OptimizationContext (DynamicPPL. LikelihoodContext ())
151
+ return _optimize (model, Turing . OptimLogDensity (model, ctx), args... ; kwargs... )
152
152
end
153
153
154
154
"""
@@ -172,18 +172,18 @@ map_est = optimize(model, MAP(), NelderMead())
172
172
```
173
173
"""
174
174
175
- function Optim. optimize (model:: Model , :: MAP , options:: Optim.Options = Optim. Options (); kwargs... )
175
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MAP , options:: Optim.Options = Optim. Options (); kwargs... )
176
176
return _map_optimize (model, options; kwargs... )
177
177
end
178
- function Optim. optimize (model:: Model , :: MAP , init_vals:: AbstractArray , options:: Optim.Options = Optim. Options (); kwargs... )
178
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MAP , init_vals:: AbstractArray , options:: Optim.Options = Optim. Options (); kwargs... )
179
179
return _map_optimize (model, init_vals, options; kwargs... )
180
180
end
181
- function Optim. optimize (model:: Model , :: MAP , optimizer:: Optim.AbstractOptimizer , options:: Optim.Options = Optim. Options (); kwargs... )
181
+ function Optim. optimize (model:: DynamicPPL. Model , :: Turing. MAP , optimizer:: Optim.AbstractOptimizer , options:: Optim.Options = Optim. Options (); kwargs... )
182
182
return _map_optimize (model, optimizer, options; kwargs... )
183
183
end
184
184
function Optim. optimize (
185
- model:: Model ,
186
- :: MAP ,
185
+ model:: DynamicPPL. Model ,
186
+ :: Turing. MAP ,
187
187
init_vals:: AbstractArray ,
188
188
optimizer:: Optim.AbstractOptimizer ,
189
189
options:: Optim.Options = Optim. Options ();
@@ -192,9 +192,9 @@ function Optim.optimize(
192
192
return _map_optimize (model, init_vals, optimizer, options; kwargs... )
193
193
end
194
194
195
- function _map_optimize (model:: Model , args... ; kwargs... )
196
- ctx = OptimizationContext (DynamicPPL. DefaultContext ())
197
- return _optimize (model, OptimLogDensity (model, ctx), args... ; kwargs... )
195
+ function _map_optimize (model:: DynamicPPL. Model , args... ; kwargs... )
196
+ ctx = Turing . OptimizationContext (DynamicPPL. DefaultContext ())
197
+ return _optimize (model, Turing . OptimLogDensity (model, ctx), args... ; kwargs... )
198
198
end
199
199
200
200
"""
203
203
Estimate a mode, i.e., compute a MLE or MAP estimate.
204
204
"""
205
205
function _optimize (
206
- model:: Model ,
207
- f:: OptimLogDensity ,
206
+ model:: DynamicPPL. Model ,
207
+ f:: Turing. OptimLogDensity ,
208
208
optimizer:: Optim.AbstractOptimizer = Optim. LBFGS (),
209
209
args... ;
210
210
kwargs...
@@ -213,8 +213,8 @@ function _optimize(
213
213
end
214
214
215
215
function _optimize (
216
- model:: Model ,
217
- f:: OptimLogDensity ,
216
+ model:: DynamicPPL. Model ,
217
+ f:: Turing. OptimLogDensity ,
218
218
options:: Optim.Options = Optim. Options (),
219
219
args... ;
220
220
kwargs...
@@ -223,8 +223,8 @@ function _optimize(
223
223
end
224
224
225
225
function _optimize (
226
- model:: Model ,
227
- f:: OptimLogDensity ,
226
+ model:: DynamicPPL. Model ,
227
+ f:: Turing. OptimLogDensity ,
228
228
init_vals:: AbstractArray = DynamicPPL. getparams (f),
229
229
options:: Optim.Options = Optim. Options (),
230
230
args... ;
@@ -234,8 +234,8 @@ function _optimize(
234
234
end
235
235
236
236
function _optimize (
237
- model:: Model ,
238
- f:: OptimLogDensity ,
237
+ model:: DynamicPPL. Model ,
238
+ f:: Turing. OptimLogDensity ,
239
239
init_vals:: AbstractArray = DynamicPPL. getparams (f),
240
240
optimizer:: Optim.AbstractOptimizer = Optim. LBFGS (),
241
241
options:: Optim.Options = Optim. Options (),
@@ -244,8 +244,8 @@ function _optimize(
244
244
)
245
245
# Convert the initial values, since it is assumed that users provide them
246
246
# in the constrained space.
247
- @set! f. varinfo = DynamicPPL. unflatten (f. varinfo, init_vals)
248
- @set! f. varinfo = DynamicPPL. link!! (f. varinfo, model)
247
+ Setfield . @set! f. varinfo = DynamicPPL. unflatten (f. varinfo, init_vals)
248
+ Setfield . @set! f. varinfo = DynamicPPL. link!! (f. varinfo, model)
249
249
init_vals = DynamicPPL. getparams (f)
250
250
251
251
# Optimize!
@@ -258,10 +258,10 @@ function _optimize(
258
258
259
259
# Get the VarInfo at the MLE/MAP point, and run the model to ensure
260
260
# correct dimensionality.
261
- @set! f. varinfo = DynamicPPL. unflatten (f. varinfo, M. minimizer)
262
- @set! f. varinfo = invlink!! (f. varinfo, model)
261
+ Setfield . @set! f. varinfo = DynamicPPL. unflatten (f. varinfo, M. minimizer)
262
+ Setfield . @set! f. varinfo = DynamicPPL . invlink!! (f. varinfo, model)
263
263
vals = DynamicPPL. getparams (f)
264
- @set! f. varinfo = link!! (f. varinfo, model)
264
+ Setfield . @set! f. varinfo = DynamicPPL . link!! (f. varinfo, model)
265
265
266
266
# Make one transition to get the parameter names.
267
267
ts = [Turing. Inference. Transition (
@@ -275,3 +275,5 @@ function _optimize(
275
275
276
276
return ModeResult (vmat, M, - M. minimum, f)
277
277
end
278
+
279
+ end # module
0 commit comments