Skip to content

Commit b55c403

Browse files
committed
Add problems from Optim.jl
1 parent 4e2520e commit b55c403

File tree

5 files changed

+418
-3
lines changed

5 files changed

+418
-3
lines changed

README.md

+3
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,6 @@
55
[![Coverage Status](https://coveralls.io/repos/pkofod/OptimTestProblems.jl/badge.svg?branch=master&service=github)](https://coveralls.io/github/pkofod/OptimTestProblems.jl?branch=master)
66

77
[![codecov.io](http://codecov.io/github/pkofod/OptimTestProblems.jl/coverage.svg?branch=master)](http://codecov.io/github/pkofod/OptimTestProblems.jl?branch=master)
8+
9+
The purpose of this repository is to provide test problems for JuliaNLSolvers
10+
packages.

src/OptimTestProblems.jl

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
module OptimTestProblems
22

3-
# package code goes here
3+
include("optim_tests/multivariate/unconstrained.jl")
4+
include("optim_tests/univariate/bounded.jl")
45

56
end # module
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,361 @@
1+
module UnconstrainedProblems
2+
3+
### Sources
4+
###
5+
### [1] Ali, Khompatraporn, & Zabinsky: A Numerical Evaluation of Several Stochastic Algorithms on Selected Continuous Global Optimization Test
6+
### Link: www.researchgate.net/profile/Montaz_Ali/publication/226654862_A_Numerical_Evaluation_of_Several_Stochastic_Algorithms_on_Selected_Continuous_Global_Optimization_Test_Problems/links/00b4952bef133a1a6b000000.pdf
7+
###
8+
### [2] Fletcher & Powell: A rapidly convergent descent method for minimization,
9+
10+
immutable OptimizationProblem
11+
name::AbstractString
12+
f::Function
13+
g!::Function
14+
h!::Function
15+
initial_x::Vector{Float64}
16+
solutions::Vector
17+
isdifferentiable::Bool
18+
istwicedifferentiable::Bool
19+
end
20+
21+
examples = Dict{AbstractString, OptimizationProblem}()
22+
23+
##########################################################################
24+
###
25+
### Exponential Function
26+
###
27+
##########################################################################
28+
29+
function exponential(x::Vector)
30+
return exp((2.0 - x[1])^2) + exp((3.0 - x[2])^2)
31+
end
32+
33+
function exponential_gradient!(storage::Vector, x::Vector)
34+
storage[1] = -2.0 * (2.0 - x[1]) * exp((2.0 - x[1])^2)
35+
storage[2] = -2.0 * (3.0 - x[2]) * exp((3.0 - x[2])^2)
36+
end
37+
38+
function exponential_hessian!(storage::Matrix, x::Vector)
39+
storage[1, 1] = 2.0 * exp((2.0 - x[1])^2) * (2.0 * x[1]^2 - 8.0 * x[1] + 9)
40+
storage[1, 2] = 0.0
41+
storage[2, 1] = 0.0
42+
storage[2, 2] = 2.0 * exp((3.0 - x[1])^2) * (2.0 * x[2]^2 - 12.0 * x[2] + 19)
43+
end
44+
45+
examples["Exponential"] = OptimizationProblem("Exponential",
46+
exponential,
47+
exponential_gradient!,
48+
exponential_hessian!,
49+
[0.0, 0.0],
50+
[2.0, 3.0],
51+
true,
52+
true)
53+
54+
##########################################################################
55+
###
56+
### Fletcher-Powell
57+
###
58+
### From [2]
59+
### Source: A rapidly convergent descent method for minimization
60+
### Fletcher & Powell
61+
##########################################################################
62+
63+
function fletcher_powell(x::Vector)
64+
function theta(x::Vector)
65+
if x[1] > 0
66+
return atan(x[2] / x[1]) / (2.0 * pi)
67+
else
68+
return (pi + atan(x[2] / x[1])) / (2.0 * pi)
69+
end
70+
end
71+
72+
return 100.0 * (x[3] - 10.0 * theta(x))^2 +
73+
(sqrt(x[1]^2 + x[2]^2) - 1.0)^2 + x[3]^2
74+
end
75+
76+
# TODO: Implement
77+
function fletcher_powell_gradient!(storage::Vector, x::Vector)
78+
return
79+
end
80+
81+
# TODO: Implement
82+
function fletcher_powell_hessian!(storage::Matrix, x::Vector)
83+
return
84+
end
85+
86+
examples["Fletcher-Powell"] = OptimizationProblem("Fletcher-Powell",
87+
fletcher_powell,
88+
fletcher_powell_gradient!,
89+
fletcher_powell_hessian!,
90+
[-1.0, 0.0, 0.0], # Same as in source
91+
[1.0, 0.0, 0.0],
92+
false,
93+
false)
94+
95+
##########################################################################
96+
###
97+
### Himmelblau's Function
98+
###
99+
##########################################################################
100+
101+
function himmelblau(x::Vector)
102+
return (x[1]^2 + x[2] - 11)^2 + (x[1] + x[2]^2 - 7)^2
103+
end
104+
105+
function himmelblau_gradient!(storage::Vector, x::Vector)
106+
storage[1] = 4.0 * x[1]^3 + 4.0 * x[1] * x[2] -
107+
44.0 * x[1] + 2.0 * x[1] + 2.0 * x[2]^2 - 14.0
108+
storage[2] = 2.0 * x[1]^2 + 2.0 * x[2] - 22.0 +
109+
4.0 * x[1] * x[2] + 4.0 * x[2]^3 - 28.0 * x[2]
110+
end
111+
112+
function himmelblau_hessian!(storage::Matrix, x::Vector)
113+
storage[1, 1] = 12.0 * x[1]^2 + 4.0 * x[2] - 42.0
114+
storage[1, 2] = 4.0 * x[1] + 4.0 * x[2]
115+
storage[2, 1] = 4.0 * x[1] + 4.0 * x[2]
116+
storage[2, 2] = 12.0 * x[2]^2 + 4.0 * x[1] - 26.0
117+
end
118+
119+
examples["Himmelblau"] = OptimizationProblem("Himmelblau",
120+
himmelblau,
121+
himmelblau_gradient!,
122+
himmelblau_hessian!,
123+
[2.0, 2.0],
124+
[3.0, 2.0],
125+
true,
126+
true)
127+
##########################################################################
128+
###
129+
### Hosaki's Problem
130+
###
131+
### Problem 20 in [1]
132+
##########################################################################
133+
134+
function hosaki(x::Vector)
135+
a = (1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4)
136+
return a * x[2]^2 * exp(-x[2])
137+
end
138+
139+
function hosaki_gradient!(storage::Vector, x::Vector)
140+
storage[1] = (x[1]^3 - 7.0 * x[1]^2 + 14.0 * x[1] - 8)* x[2]^2 * exp(-x[2])
141+
storage[2] = 2.0 * (1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4) * x[2] * exp(-x[2]) - (1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4) * x[2]^2 * exp(-x[2])
142+
end
143+
144+
function hosaki_hessian!(storage::Matrix, x::Vector)
145+
storage[1, 1] = (3.0 * x[1]^2 - 14.0 * x[1] + 14.0) * x[2]^2 * exp(-x[2])
146+
storage[1, 2] = 2.0 * (x[1]^3 - 7.0 * x[1]^2 + 14.0 * x[1] - 8.0) * x[2] * exp(-x[2]) - (x[1]^3 - 7.0 * x[1]^2 + 14.0 * x[1] - 8.0) * x[2]^2 * exp(-x[2])
147+
storage[2, 1] = 2.0 * (x[1]^3 - 7.0 * x[1]^2 + 14.0 * x[1] - 8.0) * x[2] * exp(-x[2]) - (x[1]^3 - 7.0 * x[1]^2 + 14.0 * x[1] - 8.0) * x[2]^2 * exp(-x[2])
148+
storage[2, 2] = 2.0 * (1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4) * exp(-x[2]) - 4.0 * ( 1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4) * x[2] * exp(-x[2]) + (1.0 - 8.0 * x[1] + 7.0 * x[1]^2 - (7.0 / 3.0) * x[1]^3 + (1.0 / 4.0) * x[1]^4) * x[2]^2 * exp(-x[2])
149+
end
150+
151+
examples["Hosaki"] = OptimizationProblem("Hosaki",
152+
hosaki,
153+
hosaki_gradient!,
154+
hosaki_hessian!,
155+
[3.6, 1.9],
156+
[4.0, 2.0],
157+
true,
158+
true)
159+
160+
##########################################################################
161+
###
162+
### Large-Scale Quadratic
163+
###
164+
##########################################################################
165+
166+
function large_polynomial(x::Vector)
167+
res = zero(x[1])
168+
for i in 1:250
169+
res += (i - x[i])^2
170+
end
171+
return res
172+
end
173+
174+
function large_polynomial_gradient!(storage::Vector, x::Vector)
175+
for i in 1:250
176+
storage[i] = -2.0 * (i - x[i])
177+
end
178+
end
179+
180+
function large_polynomial_hessian!(storage::Matrix, x::Vector)
181+
for i in 1:250
182+
for j in i:250
183+
if i == j
184+
storage[i, j] = 2.0
185+
else
186+
storage[i, j] = 0.0
187+
storage[j, i] = 0.0
188+
end
189+
end
190+
end
191+
end
192+
193+
examples["Large Polynomial"] = OptimizationProblem("Large Polynomial",
194+
large_polynomial,
195+
large_polynomial_gradient!,
196+
large_polynomial_hessian!,
197+
zeros(250),
198+
float([1:250;]),
199+
true,
200+
true)
201+
202+
##########################################################################
203+
###
204+
### Parabola
205+
###
206+
##########################################################################
207+
208+
function parabola(x::Vector)
209+
return (1.0 - x[1])^2 + (2.0 - x[2])^2 + (3.0 - x[3])^2 +
210+
(5.0 - x[4])^2 + (8.0 - x[5])^2
211+
end
212+
213+
function parabola_gradient!(storage::Vector, x::Vector)
214+
storage[1] = -2.0 * (1.0 - x[1])
215+
storage[2] = -2.0 * (2.0 - x[2])
216+
storage[3] = -2.0 * (3.0 - x[3])
217+
storage[4] = -2.0 * (5.0 - x[4])
218+
storage[5] = -2.0 * (8.0 - x[5])
219+
end
220+
221+
function parabola_hessian!(storage::Matrix, x::Vector)
222+
for i in 1:5
223+
for j in 1:5
224+
if i == j
225+
storage[i, j] = 2.0
226+
else
227+
storage[i, j] = 0.0
228+
end
229+
end
230+
end
231+
end
232+
233+
examples["Parabola"] = OptimizationProblem("Parabola",
234+
parabola,
235+
parabola_gradient!,
236+
parabola_hessian!,
237+
[0.0, 0.0, 0.0, 0.0, 0.0],
238+
[1.0, 2.0, 3.0, 5.0, 8.0],
239+
true,
240+
true)
241+
242+
##########################################################################
243+
###
244+
### Simple 4th-Degree Polynomial Example
245+
###
246+
##########################################################################
247+
248+
function polynomial(x::Vector)
249+
return (10.0 - x[1])^2 + (7.0 - x[2])^4 + (108.0 - x[3])^4
250+
end
251+
252+
function polynomial_gradient!(storage::Vector, x::Vector)
253+
storage[1] = -2.0 * (10.0 - x[1])
254+
storage[2] = -4.0 * (7.0 - x[2])^3
255+
storage[3] = -4.0 * (108.0 - x[3])^3
256+
end
257+
258+
function polynomial_hessian!(storage::Matrix, x::Vector)
259+
storage[1, 1] = 2.0
260+
storage[1, 2] = 0.0
261+
storage[1, 3] = 0.0
262+
storage[2, 1] = 0.0
263+
storage[2, 2] = 12.0 * (7.0 - x[2])^2
264+
storage[2, 3] = 0.0
265+
storage[3, 1] = 0.0
266+
storage[3, 2] = 0.0
267+
storage[3, 3] = 12.0 * (108.0 - x[3])^2
268+
end
269+
270+
examples["Polynomial"] = OptimizationProblem("Polynomial",
271+
polynomial,
272+
polynomial_gradient!,
273+
polynomial_hessian!,
274+
[0.0, 0.0, 0.0],
275+
[10.0, 7.0, 108.0],
276+
true,
277+
true)
278+
279+
##########################################################################
280+
###
281+
### Powell (d=4)
282+
###
283+
### Problem 35 in [1]
284+
### Difficult since the hessian is singular at the optimum
285+
##########################################################################
286+
287+
function powell(x::Vector)
288+
return (x[1] + 10.0 * x[2])^2 + 5.0 * (x[3] - x[4])^2 +
289+
(x[2] - 2.0 * x[3])^4 + 10.0 * (x[1] - x[4])^4
290+
end
291+
292+
function powell_gradient!(storage::Vector, x::Vector)
293+
storage[1] = 2.0 * (x[1] + 10.0 * x[2]) + 40.0 * (x[1] - x[4])^3
294+
storage[2] = 20.0 * (x[1] + 10.0 * x[2]) + 4.0 * (x[2] - 2.0 * x[3])^3
295+
storage[3] = 10.0 * (x[3] - x[4]) - 8.0 * (x[2] - 2.0 * x[3])^3
296+
storage[4] = -10.0 * (x[3] - x[4]) - 40.0 * (x[1] - x[4])^3
297+
end
298+
299+
function powell_hessian!(storage::Matrix, x::Vector)
300+
storage[1, 1] = 2.0 + 120.0 * (x[1] - x[4])^2
301+
storage[1, 2] = 20.0
302+
storage[1, 3] = 0.0
303+
storage[1, 4] = -120.0 * (x[1] - x[4])^2
304+
storage[2, 1] = 20.0
305+
storage[2, 2] = 200.0 + 12.0 * (x[2] - 2.0 * x[3])^2
306+
storage[2, 3] = -24.0 * (x[2] - 2.0 * x[3])^2
307+
storage[2, 4] = 0.0
308+
storage[3, 1] = 0.0
309+
storage[3, 2] = -24.0 * (x[2] - 2.0 * x[3])^2
310+
storage[3, 3] = 10.0 + 48.0 * (x[2] - 2.0 * x[3])^2
311+
storage[3, 4] = -10.0
312+
storage[4, 1] = -120.0 * (x[1] - x[4])^2
313+
storage[4, 2] = 0.0
314+
storage[4, 3] = -10.0
315+
storage[4, 4] = 10.0 + 120.0 * (x[1] - x[4])^2
316+
end
317+
318+
examples["Powell"] = OptimizationProblem("Powell",
319+
powell,
320+
powell_gradient!,
321+
powell_hessian!,
322+
[3.0, -1.0, 0.0, 1.0],
323+
[0.0, 0.0, 0.0, 0.0],
324+
true,
325+
true)
326+
327+
##########################################################################
328+
###
329+
### Rosenbrock (2D)
330+
###
331+
### Problem 38 in [1]
332+
###
333+
### Saddle point makes optimization difficult
334+
##########################################################################
335+
336+
function rosenbrock(x::Vector)
337+
return (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2
338+
end
339+
340+
function rosenbrock_gradient!(storage::Vector, x::Vector)
341+
storage[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1]
342+
storage[2] = 200.0 * (x[2] - x[1]^2)
343+
end
344+
345+
function rosenbrock_hessian!(storage::Matrix, x::Vector)
346+
storage[1, 1] = 2.0 - 400.0 * x[2] + 1200.0 * x[1]^2
347+
storage[1, 2] = -400.0 * x[1]
348+
storage[2, 1] = -400.0 * x[1]
349+
storage[2, 2] = 200.0
350+
end
351+
352+
examples["Rosenbrock"] = OptimizationProblem("Rosenbrock",
353+
rosenbrock,
354+
rosenbrock_gradient!,
355+
rosenbrock_hessian!,
356+
[0.0, 0.0],
357+
[1.0, 1.0],
358+
true,
359+
true)
360+
361+
end # module

0 commit comments

Comments
 (0)