autogenu-jupyter
An automatic code generator and the continuation/GMRES (C/GMRES) based numerical solvers for nonlinear MPC
Loading...
Searching...
No Matches
continuation_gmres_condensing.hpp
Go to the documentation of this file.
1#ifndef CGMRES__CONTINUATION_GMRES_CONDENSING_HPP_
2#define CGMRES__CONTINUATION_GMRES_CONDENSING_HPP_
3
4#include <stdexcept>
5
6#include "cgmres/types.hpp"
7
9
10
11namespace cgmres {
12namespace detail {
13
14template <class NLP>
16public:
17 static constexpr int nx = NLP::nx;
18 static constexpr int nu = NLP::nu;
19 static constexpr int nc = NLP::nc;
20 static constexpr int nuc = nu + nc;
21 static constexpr int nub = NLP::nub;
22 static constexpr int dim = NLP::dim;
23 static constexpr int N = dim / nuc;
24
26 const Scalar finite_difference_epsilon,
27 const Scalar zeta)
28 : nlp_(nlp),
29 finite_difference_epsilon_(finite_difference_epsilon),
30 zeta_(zeta),
31 updated_solution_(Vector<dim>::Zero()),
32 fonc_hu_(Vector<dim>::Zero()),
33 fonc_hu_1_(Vector<dim>::Zero()),
34 fonc_hu_2_(Vector<dim>::Zero()),
35 fonc_hu_3_(Vector<dim>::Zero()),
36 x0_1_(Vector<nx>::Zero()),
37 dx_(Vector<nx>::Zero()) {
38 std::fill(x_1_.begin(), x_1_.end(), Vector<nx>::Zero());
39 std::fill(lmd_1_.begin(), lmd_1_.end(), Vector<nx>::Zero());
40 std::fill(fonc_f_.begin(), fonc_f_.end(), Vector<nx>::Zero());
41 std::fill(fonc_hx_.begin(), fonc_hx_.end(), Vector<nx>::Zero());
42 std::fill(fonc_f_1_.begin(), fonc_f_1_.end(), Vector<nx>::Zero());
43 std::fill(fonc_hx_1_.begin(), fonc_hx_1_.end(), Vector<nx>::Zero());
44 if constexpr (nub > 0) {
45 std::fill(dummy_1_.begin(), dummy_1_.end(), Vector<nub>::Zero());
46 std::fill(mu_1_.begin(), mu_1_.end(), Vector<nub>::Zero());
47 std::fill(fonc_hdummy_.begin(), fonc_hdummy_.end(), Vector<nub>::Zero());
48 std::fill(fonc_hmu_.begin(), fonc_hmu_.end(), Vector<nub>::Zero());
49 std::fill(fonc_hdummy_1_.begin(), fonc_hdummy_1_.end(), Vector<nub>::Zero());
50 std::fill(fonc_hmu_1_.begin(), fonc_hmu_1_.end(), Vector<nub>::Zero());
51 std::fill(dummy_update_.begin(), dummy_update_.end(), Vector<nub>::Zero());
52 std::fill(mu_update_.begin(), mu_update_.end(), Vector<nub>::Zero());
53 }
54
55 if (finite_difference_epsilon <= 0.0) {
56 throw std::invalid_argument("[ContinuationGMRESCondensing]: 'finite_difference_epsilon' must be positive!");
57 }
58 if (zeta <= 0.0) {
59 throw std::invalid_argument("[ContinuationGMRESCondensing]: 'zeta' must be positive!");
60 }
61 }
62
64
66
67 Scalar optError() const {
68 Scalar squared_error = fonc_hu_.squaredNorm();
69 if constexpr (nub > 0) {
70 for (const auto& e : fonc_hdummy_) {
71 squared_error += e.squaredNorm();
72 }
73 for (const auto& e : fonc_hmu_) {
74 squared_error += e.squaredNorm();
75 }
76 }
77 for (const auto& e : fonc_f_) {
78 squared_error += e.squaredNorm();
79 }
80 for (const auto& e : fonc_hx_) {
81 squared_error += e.squaredNorm();
82 }
83 return std::sqrt(squared_error);
84 }
85
86 template <typename VectorType>
87 void eval_fonc(const Scalar t, const MatrixBase<VectorType>& x0, const Vector<dim>& solution,
88 const std::array<Vector<nx>, N+1>& x, const std::array<Vector<nx>, N+1>& lmd,
89 const std::array<Vector<nub>, N>& dummy, const std::array<Vector<nub>, N>& mu) {
90 assert(x0.size() == nx);
91 nlp_.eval_fonc_hu(t, x0, solution, x, lmd, fonc_hu_);
92 if constexpr (nub > 0) {
93 nlp_.eval_fonc_hu(solution, dummy, mu, fonc_hu_);
94 nlp_.eval_fonc_hdummy(solution, dummy, mu, fonc_hdummy_);
95 nlp_.eval_fonc_hmu(solution, dummy, mu, fonc_hmu_);
96 }
97 nlp_.eval_fonc_f(t, x0, solution, x, fonc_f_);
98 nlp_.eval_fonc_hx(t, x0, solution, x, lmd, fonc_hx_);
99 }
100
101 template <typename VectorType1, typename VectorType2, typename VectorType3, typename VectorType4>
102 void eval_b(const Scalar t, const MatrixBase<VectorType1>& x0,
103 const MatrixBase<VectorType2>& solution,
104 const std::array<Vector<nx>, N+1>& x,
105 const std::array<Vector<nx>, N+1>& lmd,
106 const std::array<Vector<nub>, N>& dummy,
107 const std::array<Vector<nub>, N>& mu,
108 const MatrixBase<VectorType3>& solution_update,
109 const MatrixBase<VectorType4>& b_vec) {
110 assert(x0.size() == nx);
111 assert(solution.size() == dim);
112 assert(solution_update.size() == dim);
113 assert(b_vec.size() == dim);
114
115 const Scalar t1 = t + finite_difference_epsilon_;
116 nlp_.ocp().eval_f(t, x0.derived().data(), solution.derived().data(), dx_.data());
117 x0_1_ = x0 + finite_difference_epsilon_ * dx_;
118 updated_solution_ = solution + finite_difference_epsilon_ * solution_update;
119
120 nlp_.eval_fonc_hu(t, x0, solution, x, lmd, fonc_hu_);
121 nlp_.eval_fonc_hu(t1, x0_1_, solution, x, lmd, fonc_hu_1_);
122 if constexpr (nub > 0) {
123 nlp_.eval_fonc_hu(solution, dummy, mu, fonc_hu_);
124 nlp_.eval_fonc_hu(solution, dummy, mu, fonc_hu_1_);
125 }
126
127 // condensing of x and lmd
128 nlp_.eval_fonc_f(t, x0, solution, x, fonc_f_);
129 nlp_.eval_fonc_hx(t, x0, solution, x, lmd, fonc_hx_);
130 for (size_t i=0; i<=N; ++i) {
131 fonc_f_1_[i] = (1.0 - finite_difference_epsilon_*zeta_) * fonc_f_[i];
132 }
133 for (size_t i=0; i<=N; ++i) {
134 fonc_hx_1_[i] = (1.0 - finite_difference_epsilon_*zeta_) * fonc_hx_[i];
135 }
136 nlp_.retrive_x(t1, x0_1_, solution, x_1_, fonc_f_1_);
137 nlp_.retrive_lmd(t1, x0_1_, solution, x_1_, lmd_1_, fonc_hx_1_);
138
139 // condensing of dummy and mu
140 if constexpr (nub > 0) {
141 nlp_.eval_fonc_hdummy(solution, dummy, mu, fonc_hdummy_);
142 nlp_.eval_fonc_hmu(solution, dummy, mu, fonc_hmu_);
143 for (size_t i=0; i<N; ++i) {
144 dummy_1_[i] = - zeta_ * fonc_hdummy_[i];
145 }
146 for (size_t i=0; i<N; ++i) {
147 mu_1_[i] = - zeta_ * fonc_hmu_[i];
148 }
149 nlp_.multiply_hdummy_inv(dummy, mu, dummy_1_, mu_1_, fonc_hdummy_1_);
150 nlp_.multiply_hmu_inv(dummy, mu, dummy_1_, mu_1_, fonc_hdummy_1_, fonc_hmu_1_);
151 for (size_t i=0; i<N; ++i) {
152 mu_1_[i] = mu[i] + finite_difference_epsilon_ * fonc_hmu_1_[i];
153 }
154 }
155
156 nlp_.eval_fonc_hu(t1, x0_1_, solution, x_1_, lmd_1_, fonc_hu_3_);
157 if constexpr (nub > 0) {
158 nlp_.eval_fonc_hu(solution, dummy_1_, mu_1_, fonc_hu_3_);
159 }
160
161 nlp_.eval_fonc_f(t1, x0_1_, solution, x, fonc_f_1_);
162 nlp_.eval_fonc_hx(t1, x0_1_, solution, x, lmd, fonc_hx_1_);
163
164 nlp_.retrive_x(t1, x0_1_, updated_solution_, x_1_, fonc_f_1_);
165 nlp_.retrive_lmd(t1, x0_1_, updated_solution_, x_1_, lmd_1_, fonc_hx_1_);
166 if constexpr (nub > 0) {
167 nlp_.retrive_mu_update(solution, dummy, mu, solution_update, mu_update_);
168 for (size_t i=0; i<N; ++i) {
169 mu_1_[i] = mu[i] - finite_difference_epsilon_ * mu_update_[i];
170 }
171 }
172
173 nlp_.eval_fonc_hu(t1, x0_1_, updated_solution_, x_1_, lmd_1_, fonc_hu_2_);
174 if constexpr (nub > 0) {
175 nlp_.eval_fonc_hu(updated_solution_, dummy_1_, mu_1_, fonc_hu_2_);
176 }
177 CGMRES_EIGEN_CONST_CAST(VectorType4, b_vec) = (1.0/finite_difference_epsilon_ - zeta_) * fonc_hu_
178 - (fonc_hu_3_ + fonc_hu_2_ - fonc_hu_1_) / finite_difference_epsilon_;
179 }
180
181 template <typename VectorType1, typename VectorType2, typename VectorType3, typename VectorType4>
182 void eval_Ax(const Scalar t, const MatrixBase<VectorType1>& x0,
183 const MatrixBase<VectorType2>& solution,
184 const std::array<Vector<nx>, N+1>& x,
185 const std::array<Vector<nx>, N+1>& lmd,
186 const std::array<Vector<nub>, N>& dummy,
187 const std::array<Vector<nub>, N>& mu,
188 const MatrixBase<VectorType3>& solution_update,
189 const MatrixBase<VectorType4>& ax_vec) {
190 assert(x0.size() == nx);
191 assert(solution.size() == dim);
192 assert(solution_update.size() == dim);
193 assert(ax_vec.size() == dim);
194
195 const Scalar t1 = t + finite_difference_epsilon_;
196 updated_solution_ = solution + finite_difference_epsilon_ * solution_update;
197
198 nlp_.retrive_x(t1, x0_1_, updated_solution_, x_1_, fonc_f_1_);
199 nlp_.retrive_lmd(t1, x0_1_, updated_solution_, x_1_, lmd_1_, fonc_hx_1_);
200 if constexpr (nub > 0) {
201 nlp_.retrive_mu_update(solution, dummy, mu, solution_update, mu_update_);
202 for (size_t i=0; i<N; ++i) {
203 mu_1_[i] = mu[i] - finite_difference_epsilon_ * mu_update_[i];
204 }
205 }
206
207 nlp_.eval_fonc_hu(t1, x0_1_, updated_solution_, x_1_, lmd_1_, fonc_hu_2_);
208 if constexpr (nub > 0) {
209 nlp_.eval_fonc_hu(updated_solution_, dummy_1_, mu_1_, fonc_hu_2_);
210 }
211 CGMRES_EIGEN_CONST_CAST(VectorType4, ax_vec) = (fonc_hu_2_ - fonc_hu_1_) / finite_difference_epsilon_;
212 }
213
214 template <typename VectorType1, typename VectorType2, typename VectorType3>
215 void expansion(const Scalar t, const MatrixBase<VectorType1>& x0,
216 const MatrixBase<VectorType2>& solution,
217 std::array<Vector<nx>, N+1>& x,
218 std::array<Vector<nx>, N+1>& lmd,
219 std::array<Vector<nub>, N>& dummy,
220 std::array<Vector<nub>, N>& mu,
221 const MatrixBase<VectorType3>& solution_update,
222 const Scalar dt, const Scalar min_dummy) {
223 assert(x0.size() == nx);
224 const Scalar t1 = t + finite_difference_epsilon_;
225 updated_solution_ = solution + finite_difference_epsilon_ * solution_update;
226 for (size_t i=0; i<N+1; ++i) {
227 fonc_f_1_[i] = (1.0 - finite_difference_epsilon_*zeta_) * fonc_f_[i];
228 }
229 for (size_t i=0; i<N+1; ++i) {
230 fonc_hx_1_[i] = (1.0 - finite_difference_epsilon_*zeta_) * fonc_hx_[i];
231 }
232 nlp_.retrive_x(t1, x0_1_, updated_solution_, x_1_, fonc_f_1_);
233 nlp_.retrive_lmd(t1, x0_1_, updated_solution_, x_1_, lmd_1_, fonc_hx_1_);
234 for (size_t i=0; i<N+1; ++i) {
235 fonc_f_[i] = x_1_[i] - x[i];
236 x[i].noalias() += (dt/finite_difference_epsilon_) * fonc_f_[i];
237 }
238 for (size_t i=0; i<N+1; ++i) {
239 fonc_hx_[i] = lmd_1_[i] - lmd[i];
240 lmd[i].noalias() += (dt/finite_difference_epsilon_) * fonc_hx_[i];
241 }
242 if constexpr (nub > 0) {
243 nlp_.retrive_dummy_update(solution, dummy, mu, solution_update, dummy_update_);
244 nlp_.retrive_mu_update(solution, dummy, mu, solution_update, mu_update_);
245 for (size_t i=0; i<N; ++i) {
246 dummy[i].noalias() += dt * (fonc_hdummy_1_[i] - dummy_update_[i]);
247 }
248 for (size_t i=0; i<N; ++i) {
249 mu[i].noalias() += dt * (fonc_hmu_1_[i] - mu_update_[i]);
250 }
251 nlp_.clip_dummy(dummy, min_dummy);
252 }
253 }
254
255 template <typename VectorType>
256 void retrive_x(const Scalar t, const MatrixBase<VectorType>& x0, const Vector<dim>& solution,
257 std::array<Vector<nx>, N+1>& x) {
258 assert(x0.size() == nx);
259 std::fill(fonc_f_1_.begin(), fonc_f_1_.end(), Vector<nx>::Zero());
260 nlp_.retrive_x(t, x0, solution, x, fonc_f_1_);
261 }
262
263 template <typename VectorType>
264 void retrive_lmd(const Scalar t, const MatrixBase<VectorType>& x0, const Vector<dim>& solution,
265 const std::array<Vector<nx>, N+1>& x,
266 std::array<Vector<nx>, N+1>& lmd) {
267 assert(x0.size() == nx);
268 std::fill(fonc_hx_1_.begin(), fonc_hx_1_.end(), Vector<nx>::Zero());
269 nlp_.retrive_lmd(t, x0, solution, x, lmd, fonc_hx_1_);
270 }
271
272 void retrive_dummy(const Vector<dim>& solution,
273 std::array<Vector<nub>, N>& dummy,
274 const std::array<Vector<nub>, N>& mu,
275 const Scalar min_dummy) {
276 if constexpr (nub > 0) {
277 std::fill(fonc_hmu_1_.begin(), fonc_hmu_1_.end(), Vector<nub>::Zero());
278 std::fill(dummy.begin(), dummy.end(), Vector<nub>::Zero());
279 nlp_.eval_fonc_hmu(solution, dummy, mu, fonc_hmu_1_);
280 for (size_t i=0; i<N; ++i) {
281 dummy[i].array() = fonc_hmu_1_[i].array().abs().sqrt();
282 }
283 nlp_.clip_dummy(dummy, min_dummy);
284 }
285 }
286
287 void retrive_mu(const Vector<dim>& solution,
288 const std::array<Vector<nub>, N>& dummy,
289 std::array<Vector<nub>, N>& mu) {
290 if constexpr (nub > 0) {
291 std::fill(fonc_hdummy_1_.begin(), fonc_hdummy_1_.end(), Vector<nub>::Zero());
292 std::fill(mu.begin(), mu.end(), Vector<nub>::Zero());
293 nlp_.eval_fonc_hdummy(solution, dummy, mu, fonc_hdummy_1_);
294 for (size_t i=0; i<N; ++i) {
295 mu[i].array() = - fonc_hdummy_1_[i].array() / (2.0 * dummy[i].array());
296 }
297 }
298 }
299
300 const NLP& get_nlp() const { return nlp_; }
301
302 void synchronize_ocp() { nlp_.synchronize_ocp(); }
303
304 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
305
306private:
307 NLP nlp_;
308 Scalar finite_difference_epsilon_, zeta_;
309 Vector<dim> updated_solution_, fonc_hu_, fonc_hu_1_, fonc_hu_2_, fonc_hu_3_;
310 std::array<Vector<nx>, N+1> x_1_, lmd_1_, fonc_f_, fonc_hx_, fonc_f_1_, fonc_hx_1_;
311 std::array<Vector<nub>, N> dummy_1_, mu_1_, fonc_hdummy_, fonc_hmu_,
312 fonc_hdummy_1_, fonc_hmu_1_, dummy_update_, mu_update_;
313 Vector<nx> x0_1_, dx_;
314};
315
316} // namespace detail
317} // namespace cgmres
318
319#endif // CGMRES__CONTINUATION_GMRES_CONDENSING_HPP_
Definition: continuation_gmres_condensing.hpp:15
void eval_b(const Scalar t, const MatrixBase< VectorType1 > &x0, const MatrixBase< VectorType2 > &solution, const std::array< Vector< nx >, N+1 > &x, const std::array< Vector< nx >, N+1 > &lmd, const std::array< Vector< nub >, N > &dummy, const std::array< Vector< nub >, N > &mu, const MatrixBase< VectorType3 > &solution_update, const MatrixBase< VectorType4 > &b_vec)
Definition: continuation_gmres_condensing.hpp:102
void synchronize_ocp()
Definition: continuation_gmres_condensing.hpp:302
static constexpr int nx
Definition: continuation_gmres_condensing.hpp:17
static constexpr int dim
Definition: continuation_gmres_condensing.hpp:22
void retrive_x(const Scalar t, const MatrixBase< VectorType > &x0, const Vector< dim > &solution, std::array< Vector< nx >, N+1 > &x)
Definition: continuation_gmres_condensing.hpp:256
static constexpr int nc
Definition: continuation_gmres_condensing.hpp:19
ContinuationGMRESCondensing(const NLP &nlp, const Scalar finite_difference_epsilon, const Scalar zeta)
Definition: continuation_gmres_condensing.hpp:25
void eval_Ax(const Scalar t, const MatrixBase< VectorType1 > &x0, const MatrixBase< VectorType2 > &solution, const std::array< Vector< nx >, N+1 > &x, const std::array< Vector< nx >, N+1 > &lmd, const std::array< Vector< nub >, N > &dummy, const std::array< Vector< nub >, N > &mu, const MatrixBase< VectorType3 > &solution_update, const MatrixBase< VectorType4 > &ax_vec)
Definition: continuation_gmres_condensing.hpp:182
void eval_fonc(const Scalar t, const MatrixBase< VectorType > &x0, const Vector< dim > &solution, const std::array< Vector< nx >, N+1 > &x, const std::array< Vector< nx >, N+1 > &lmd, const std::array< Vector< nub >, N > &dummy, const std::array< Vector< nub >, N > &mu)
Definition: continuation_gmres_condensing.hpp:87
void retrive_lmd(const Scalar t, const MatrixBase< VectorType > &x0, const Vector< dim > &solution, const std::array< Vector< nx >, N+1 > &x, std::array< Vector< nx >, N+1 > &lmd)
Definition: continuation_gmres_condensing.hpp:264
static constexpr int nu
Definition: continuation_gmres_condensing.hpp:18
static constexpr int nuc
Definition: continuation_gmres_condensing.hpp:20
void retrive_dummy(const Vector< dim > &solution, std::array< Vector< nub >, N > &dummy, const std::array< Vector< nub >, N > &mu, const Scalar min_dummy)
Definition: continuation_gmres_condensing.hpp:272
static constexpr int N
Definition: continuation_gmres_condensing.hpp:23
Scalar optError() const
Definition: continuation_gmres_condensing.hpp:67
static constexpr int nub
Definition: continuation_gmres_condensing.hpp:21
const NLP & get_nlp() const
Definition: continuation_gmres_condensing.hpp:300
void retrive_mu(const Vector< dim > &solution, const std::array< Vector< nub >, N > &dummy, std::array< Vector< nub >, N > &mu)
Definition: continuation_gmres_condensing.hpp:287
void expansion(const Scalar t, const MatrixBase< VectorType1 > &x0, const MatrixBase< VectorType2 > &solution, std::array< Vector< nx >, N+1 > &x, std::array< Vector< nx >, N+1 > &lmd, std::array< Vector< nub >, N > &dummy, std::array< Vector< nub >, N > &mu, const MatrixBase< VectorType3 > &solution_update, const Scalar dt, const Scalar min_dummy)
Definition: continuation_gmres_condensing.hpp:215
#define CGMRES_EIGEN_CONST_CAST(TYPE, OBJ)
Definition: macros.hpp:7
Definition: continuation_gmres.hpp:11
Eigen::Matrix< Scalar, size, 1 > Vector
Alias of Eigen::Vector.
Definition: types.hpp:23
double Scalar
Alias of double.
Definition: types.hpp:11
Eigen::MatrixBase< MatrixType > MatrixBase
Alias of Eigen::MatrixBase.
Definition: types.hpp:29