@@ -42,8 +42,8 @@ defined in an optimizer-dependent manner.
42
42
43
43
The current default AD choice is dependent on the number of parameters.
44
44
For <50 parameters both ForwardDiff.jl and Zygote.jl gradients are evaluated
45
- and the fastest is used. If both methods fail, finite difference method
46
- is used as a fallback. For ≥50 parameters Zygote.jl is used.
45
+ and the fastest is used. If both methods fail, finite difference method
46
+ is used as a fallback. For ≥50 parameters Zygote.jl is used.
47
47
More refinements to the techniques are planned.
48
48
49
49
## Default Optimizer Choice
@@ -98,7 +98,7 @@ function sciml_train(loss, θ, opt=nothing, adtype=nothing, args...;
98
98
error (" Automatic optimizer determination requires deterministic loss functions (and no data) or maxiters must be specified." )
99
99
end
100
100
101
- if isempty (args) && deterministic
101
+ if isempty (args) && deterministic && lower_bounds === nothing && upper_bounds === nothing
102
102
# If determinsitic then ADAM -> finish with BFGS
103
103
if maxiters === nothing
104
104
res1 = GalacticOptim. solve (optprob, ADAM (0.01 ), args... ; maxiters= 300 , kwargs... )
@@ -110,6 +110,9 @@ function sciml_train(loss, θ, opt=nothing, adtype=nothing, args...;
110
110
optfunc, res1. u; lb= lower_bounds, ub= upper_bounds, kwargs... )
111
111
res1 = GalacticOptim. solve (
112
112
optprob2, BFGS (initial_stepnorm= 0.01 ), args... ; maxiters, kwargs... )
113
+ elseif isempty (args) && deterministic
114
+ res1 = GalacticOptim. solve (
115
+ optprob, BFGS (initial_stepnorm= 0.01 ), args... ; maxiters, kwargs... )
113
116
else
114
117
res1 = GalacticOptim. solve (optprob, ADAM (0.1 ), args... ; maxiters, kwargs... )
115
118
end
0 commit comments