@@ -22,37 +22,46 @@ _p = [1.0, 100.0]
2222f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
2323l1 = rosenbrock(x0, _p)
2424prob = OptimizationProblem(f, x0, _p)
25+ ```
2526
2627## Optim.jl Solvers
2728
28- using OptimizationOptimJL
29-
30- # Start with some derivative-free optimizers
29+ ### Start with some derivative-free optimizers
3130
31+ ``` @example rosenbrock
32+ using OptimizationOptimJL
3233sol = solve(prob, SimulatedAnnealing())
3334prob = OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
3435sol = solve(prob, SAMIN())
3536
3637l1 = rosenbrock(x0, _p)
3738prob = OptimizationProblem(rosenbrock, x0, _p)
3839sol = solve(prob, NelderMead())
40+ ```
3941
40- # Now a gradient-based optimizer with forward-mode automatic differentiation
42+ ### Now a gradient-based optimizer with forward-mode automatic differentiation
4143
44+ ``` @example rosenbrock
4245optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
4346prob = OptimizationProblem(optf, x0, _p)
4447sol = solve(prob, BFGS())
48+ ```
4549
46- # Now a second order optimizer using Hessians generated by forward-mode automatic differentiation
50+ ### Now a second order optimizer using Hessians generated by forward-mode automatic differentiation
4751
52+ ``` @example rosenbrock
4853sol = solve(prob, Newton())
54+ ```
4955
50- # Now a second order Hessian-free optimizer
56+ ### Now a second order Hessian-free optimizer
5157
58+ ``` @example rosenbrock
5259sol = solve(prob, Optim.KrylovTrustRegion())
60+ ```
5361
54- # Now derivative-based optimizers with various constraints
62+ ### Now derivative-based optimizers with various constraints
5563
64+ ``` @example rosenbrock
5665cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2]
5766optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons)
5867
@@ -68,24 +77,34 @@ sol = solve(prob, IPNewton())
6877
6978prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5],
7079 lb = [-500.0, -500.0], ub = [50.0, 50.0])
71- sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5:
72- # cons(sol.u, _p) = 0.49999999999999994
80+ sol = solve(prob, IPNewton())
81+
82+ # Notice now that x[1]^2 + x[2]^2 ≈ 0.5:
83+ res = zeros(1)
84+ cons(res, sol.u, _p)
85+ println(res)
86+ ```
7387
88+ ``` @example rosenbrock
7489function con_c(res, x, p)
7590 res .= [x[1]^2 + x[2]^2]
7691end
7792
7893optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = con_c)
7994prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2])
8095sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.u, _p) = 0.25^2
96+ ```
8197
8298## Evolutionary.jl Solvers
8399
100+ ``` @example rosenbrock
84101using OptimizationEvolutionary
85102sol = solve(prob, CMAES(μ = 40, λ = 100), abstol = 1e-15) # -Inf < cons_circ(sol.u, _p) = 0.25^2
103+ ```
86104
87105## IPOPT through OptimizationMOI
88106
107+ ``` @example rosenbrock
89108using OptimizationMOI, Ipopt
90109
91110function con2_c(res, x, p)
95114optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote(); cons = con2_c)
96115prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf, -Inf], ucons = [Inf, Inf])
97116sol = solve(prob, Ipopt.Optimizer())
117+ ```
98118
99- # Now let's switch over to OptimizationOptimisers with reverse-mode AD
119+ ## Now let's switch over to OptimizationOptimisers with reverse-mode AD
100120
121+ ``` @example rosenbrock
101122using OptimizationOptimisers
102123optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote())
103124prob = OptimizationProblem(optf, x0, _p)
104125sol = solve(prob, Adam(0.05), maxiters = 1000, progress = false)
126+ ```
105127
106128## Try out CMAEvolutionStrategy.jl's evolutionary methods
107129
130+ ``` @example rosenbrock
108131using OptimizationCMAEvolutionStrategy
109132sol = solve(prob, CMAEvolutionStrategyOpt())
133+ ```
110134
111135## Now try a few NLopt.jl solvers with symbolic differentiation via ModelingToolkit.jl
112136
137+ ``` @example rosenbrock
113138using OptimizationNLopt, ModelingToolkit
114139optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit())
115140prob = OptimizationProblem(optf, x0, _p)
116141
117142sol = solve(prob, Opt(:LN_BOBYQA, 2))
118143sol = solve(prob, Opt(:LD_LBFGS, 2))
144+ ```
119145
120- ## Add some box constraints and solve with a few NLopt.jl methods
146+ ### Add some box constraints and solve with a few NLopt.jl methods
121147
148+ ``` @example rosenbrock
122149prob = OptimizationProblem(optf, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
123150sol = solve(prob, Opt(:LD_LBFGS, 2))
124151sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiters = 10000) #a global optimizer with random starts of local optimization
152+ ```
125153
126154## BlackBoxOptim.jl Solvers
127155
156+ ``` @example rosenbrock
128157using OptimizationBBO
129- prob = Optimization.OptimizationProblem(rosenbrock, x0 , _p, lb = [-1.0, 0.2],
158+ prob = Optimization.OptimizationProblem(rosenbrock, [0.0, 0.3] , _p, lb = [-1.0, 0.2],
130159 ub = [0.8, 0.43])
131160sol = solve(prob, BBO_adaptive_de_rand_1_bin()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43
132161```
0 commit comments