Rosenbrock.h
Go to the documentation of this file.
1//===========================================================================
2/*!
3 *
4 *
5 * \brief Generalized Rosenbrock benchmark function
6 *
7 * This non-convex benchmark function for real-valued optimization is
8 * a generalization from two to multiple dimensions of a classic
9 * function first proposed in:
10 *
11 * H. H. Rosenbrock. An automatic method for finding the greatest or
12 * least value of a function. The Computer Journal 3: 175-184, 1960
13 *
14 *
15 *
16 * \author -
17 * \date -
18 *
19 *
20 * \par Copyright 1995-2017 Shark Development Team
21 *
22 * <BR><HR>
23 * This file is part of Shark.
24 * <https://shark-ml.github.io/Shark/>
25 *
26 * Shark is free software: you can redistribute it and/or modify
27 * it under the terms of the GNU Lesser General Public License as published
28 * by the Free Software Foundation, either version 3 of the License, or
29 * (at your option) any later version.
30 *
31 * Shark is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU Lesser General Public License for more details.
35 *
36 * You should have received a copy of the GNU Lesser General Public License
37 * along with Shark. If not, see <http://www.gnu.org/licenses/>.
38 *
39 */
40//===========================================================================
41
42#ifndef SHARK_OBJECTIVEFUNCTIONS_BENCHMARK_ROSENBROCK_H
43#define SHARK_OBJECTIVEFUNCTIONS_BENCHMARK_ROSENBROCK_H
44
46#include <shark/Core/Random.h>
47
48namespace shark {namespace benchmarks{
49/*! \brief Generalized Rosenbrock benchmark function
50*
51* This non-convex benchmark function for real-valued optimization is a
52* generalization from two to multiple dimensions of a classic
53* function first proposed in:
54*
55* H. H. Rosenbrock. An automatic method for finding the greatest or
56* least value of a function. The Computer Journal 3: 175-184,
57* 1960
58* \ingroup benchmarks
59*/
61
62 /// \brief Constructs the problem
63 ///
64 /// \param dimensions number of dimensions to optimize
65 /// \param initialSpread spread of the initial starting point
66 Rosenbrock(std::size_t dimensions=23, double initialSpread = 1.0)
67 :m_numberOfVariables(dimensions), m_initialSpread(initialSpread) {
71 }
72
73 /// \brief From INameable: return the class name.
74 std::string name() const
75 { return "Rosenbrock"; }
76
77 std::size_t numberOfVariables()const{
78 return m_numberOfVariables;
79 }
80
82 return true;
83 }
84
86 m_numberOfVariables = numberOfVariables;
87 }
88
90 RealVector x(numberOfVariables());
91
92 for (std::size_t i = 0; i < x.size(); i++) {
93 x(i) = random::uni(*mep_rng, 0, m_initialSpread );
94 }
95 return x;
96 }
97
98 double eval( const SearchPointType & p ) const {
100
101 double sum = 0;
102
103 for( std::size_t i = 0; i < p.size()-1; i++ ) {
104 sum += 100*sqr( p(i+1) - sqr( p( i ) ) ) +sqr( 1. - p( i ) );
105 }
106
107 return( sum );
108 }
109
110 virtual ResultType evalDerivative( const SearchPointType & p, FirstOrderDerivative & derivative )const {
111 double result = eval(p);
112 size_t size = p.size();
113 derivative.resize(size);
114 derivative(0) = 2*( p(0) - 1 ) - 400 * ( p(1) - sqr( p(0) ) ) * p(0);
115 derivative(size-1) = 200 * ( p(size - 1) - sqr( p( size - 2 ) ) ) ;
116 for(size_t i=1; i != size-1; ++i){
117 derivative( i ) = 2 * ( p(i) - 1 ) - 400 * (p(i+1) - sqr( p(i) ) ) * p( i )+200 * ( p( i )- sqr( p(i-1) ) );
118 }
119 return result;
120
121 }
122
123 virtual ResultType evalDerivative( const SearchPointType & p, SecondOrderDerivative & derivative )const {
124 double result = eval(p);
125 size_t size = p.size();
126 derivative.gradient.resize(size);
127 derivative.hessian.resize(size,size);
128 derivative.hessian.clear();
129
130 derivative.gradient(0) = 2*( p(0) - 1 ) - 400 * ( p(1) - sqr( p(0) ) ) * p(0);
131 derivative.gradient(size-1) = 200 * ( p(size - 1) - sqr( p( size - 2 ) ) ) ;
132
133 derivative.hessian(0,0) = 2 - 400* (p(1) - 3*sqr(p(0))) ;
134 derivative.hessian(0,1) = -400 * p(0) ;
135
136 derivative.hessian(size-1,size-1) = 200;
137 derivative.hessian(size-1,size-2) = -400 * p( size - 2 );
138
139 for(size_t i=1; i != size-1; ++i){
140 derivative.gradient( i ) = 2 * ( p(i) - 1 ) - 400 * (p(i+1) - sqr( p(i) ) ) * p( i )+200 * ( p( i )- sqr( p(i-1) ) );
141
142 derivative.hessian(i,i) = 202 - 400 * ( p(i+1) - 3 * sqr(p(i)));
143 derivative.hessian(i,i+1) = - 400 * ( p(i) );
144 derivative.hessian(i,i-1) = - 400 * ( p(i-1) );
145
146 }
147 return result;
148 }
149
150private:
151 std::size_t m_numberOfVariables;
152 double m_initialSpread;
153};
154
155}}
156
157#endif