Rheolef  7.1
an efficient C++ finite element environment
solver_mumps.cc
Go to the documentation of this file.
1 // direct solver MUMPS, mpi implementations
22 //
23 // Note : why no seq implementation based on mumps ?
24 //
25 // 1. mumps has a pure seq library implementation.
26 // Both seq and mpi libraries cannot be linked together because they define
27 // both the same dmumps() function.
28 // 2. but, when using mpi, the mpi implementation is unable to solve efficiently
29 // in // some independant sparse linear systems (eg. sparse mass matrix local
30 // on an element) because the right-hand side may be merged on the proc=0
31 // for an obscure raison.
32 //
33 // Thus rheolef uses mumps for mpi-only implementation of the direct solver
34 // and use another library (eg umfpack) for the seq direct solver.
35 //
36 // The present 'seq' implementation has been tested and is invalid :
37 // the code is maintained (but no more instanciated) for two reasons:
38 // 1. it can be used when mpi is not present, as a seq solver with the pure seq impl of mumps
39 // 2. mumps will propose in the future to provide a distributed RHS:
40 // https://listes.ens-lyon.fr/sympa/arc/mumps-users
41 // From: Alfredo Buttari <alfredo.buttari@enseeiht.fr>
42 // Date: Mon, 31 Jan 2011 19:07:03 +0100
43 // Subject: Re: [mumps-users] Scalability of MUMPS solution phase
44 // Jack,
45 // yes, adding support for distributed RHS is on our TODO list but I
46 // can't say at this moment when it will be available.
47 //
48 #include "rheolef/config.h"
49 #ifdef _RHEOLEF_HAVE_MUMPS
50 #include "solver_mumps.h"
51 
52 #undef DEBUG_MUMPS_SCOTCH
53 #ifdef DEBUG_MUMPS_SCOTCH
54 #undef _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH // has failed on some linear systems: prefer seq-scotch when available
55 #endif // DEBUG_MUMPS
56 
57 namespace rheolef {
58 using namespace std;
59 
60 // =========================================================================
61 // 1. local functions
62 // =========================================================================
63 // count non-zero entries of the upper part of A
64 // 1) diagonal block case
65 template<class T, class M>
66 static
67 typename csr<T,M>::size_type
68 nnz_dia_upper (const csr<T,M>& a)
69 {
70  typedef typename csr<T,M>::size_type size_type;
71  size_type dia_nnz = 0;
72  typename csr<T,M>::const_iterator dia_ia = a.begin();
73  for (size_type i = 0, n = a.nrow(); i < n; i++) {
74  for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
75  size_type j = (*p).first;
76  if (j >= i) dia_nnz++;
77  }
78  }
79  return dia_nnz;
80 }
81 // 2) extra-diagonal block case, for distributed csr
82 template<class T, class M>
83 static
84 typename csr<T,M>::size_type
85 nnz_ext_upper (const csr<T,M>& a)
86 {
87  return 0;
88 }
89 #ifdef _RHEOLEF_HAVE_MPI
90 template<class T>
91 static
93 nnz_ext_upper (const csr<T,distributed>& a)
94 {
96  size_type first_i = a.row_ownership().first_index();
97  size_type ext_nnz = 0;
98  typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
99  for (size_type i = 0, n = a.nrow(); i < n; i++) {
100  for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
101  size_type dis_i = first_i + i;
102  size_type dis_j = a.jext2dis_j ((*p).first);
103  if (dis_j >= dis_i) ext_nnz++;
104  }
105  }
106  return ext_nnz;
107 }
108 #endif // _RHEOLEF_HAVE_MPI
109 template<class T, class M>
110 static
111 void
112 csr2mumps_struct_seq (const csr<T,M>& a, vector<int>& row, vector<int>& col, bool use_symmetry)
113 {
114  typedef typename csr<T,M>::size_type size_type;
115  typename csr<T,M>::const_iterator dia_ia = a.begin();
116  for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
117  for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
118  size_type j = (*p).first;
119  if (!use_symmetry || j >= i) {
120  row[q] = i + 1;
121  col[q] = j + 1;
122  q++;
123  }
124  }
125  }
126 }
127 template<class T, class M>
128 static
129 void
130 csr2mumps_values_seq (const csr<T,M>& a, vector<T>& val, bool use_symmetry)
131 {
132  typedef typename csr<T,M>::size_type size_type;
133  typename csr<T,M>::const_iterator dia_ia = a.begin();
134  for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
135  for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
136  size_type j = (*p).first;
137  if (!use_symmetry || j >= i) {
138  val[q] = (*p).second;
139  q++;
140  }
141  }
142  }
143 }
144 #ifdef _RHEOLEF_HAVE_MPI
145 template<class T>
146 static
147 void
148 csr2mumps_struct_dis (const csr<T,sequential>& a, vector<int>& row, vector<int>& col, bool use_symmetry, bool)
149 {
150  // dummy case: for the compiler to be happy
151 }
152 template<class T>
153 static
154 void
155 csr2mumps_struct_dis (const csr<T,distributed>& a, vector<int>& row, vector<int>& col, bool use_symmetry, bool drop_ext_nnz)
156 {
157  typedef typename csr<T,distributed>::size_type size_type;
158  typename csr<T,distributed>::const_iterator dia_ia = a.begin();
159  typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
160  size_type dis_nr = a.dis_nrow();
161  size_type dis_nc = a.dis_ncol();
162  size_type nc = a.ncol();
163  size_type first_i = a.row_ownership().first_index();
164  for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
165  for (typename csr<T,distributed>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
166  size_type j = (*p).first;
167  size_type dis_i = first_i + i;
168  size_type dis_j = first_i + j;
169  assert_macro (j < nc, "invalid matrix");
170  assert_macro (dis_i < dis_nr && dis_j < dis_nc, "invalid matrix");
171  if (!use_symmetry || dis_j >= dis_i) {
172  row[q] = dis_i + 1;
173  col[q] = dis_j + 1;
174  q++;
175  }
176  }
177  if (drop_ext_nnz) continue;
178  for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
179  size_type dis_i = first_i + i;
180  size_type dis_j = a.jext2dis_j ((*p).first);
181  assert_macro (dis_i < dis_nr && dis_j < dis_nc, "invalid matrix");
182  if (!use_symmetry || dis_j >= dis_i) {
183  row[q] = dis_i + 1;
184  col[q] = dis_j + 1;
185  q++;
186  }
187  }
188  }
189 }
190 template<class T>
191 static
192 void
193 csr2mumps_values_dis (const csr<T,sequential>& a, vector<T>& val, bool use_symmetry, bool)
194 {
195  // dummy case: for the compiler to be happy
196 }
197 template<class T>
198 static
199 void
200 csr2mumps_values_dis (const csr<T,distributed>& a, vector<T>& val, bool use_symmetry, bool drop_ext_nnz)
201 {
202 #ifdef TO_CLEAN
203  std:fill (val.begin(), val.end(), std::numeric_limits<T>::max());
204 #endif // TO_CLEAN
205 
206  typedef typename csr<T,distributed>::size_type size_type;
207  size_type first_i = a.row_ownership().first_index();
208  typename csr<T,distributed>::const_iterator dia_ia = a.begin();
209  typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
210  for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
211  for (typename csr<T,distributed>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
212  size_type dis_i = first_i + i;
213  size_type dis_j = first_i + (*p).first;
214  if (!use_symmetry || dis_j >= dis_i) {
215 #ifdef TO_CLEAN
216  check_macro(q < val.size(), "invalid index");
217 #endif // TO_CLEAN
218  val[q] = (*p).second;
219  q++;
220  }
221  }
222  if (drop_ext_nnz) continue;
223  for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
224  size_type dis_i = first_i + i;
225  size_type dis_j = a.jext2dis_j ((*p).first);
226  if (!use_symmetry || dis_j >= dis_i) {
227 #ifdef TO_CLEAN
228  check_macro(q < val.size(), "invalid index");
229 #endif // TO_CLEAN
230  val[q] = (*p).second;
231  q++;
232  }
233  }
234  }
235 #ifdef TO_CLEAN
236  T val_min = std::numeric_limits<T>::max(), val_max = 0;
237  for (size_t q = 0; q < val.size(); q++) {
238  val_min = std::min (val_min, abs(val[q]));
239  val_max = std::max (val_max, abs(val[q]));
240  }
241  warning_macro ("val_min="<<val_min<<", val_max="<<val_max);
242 #endif // TO_CLEAN
243 }
244 #endif // _RHEOLEF_HAVE_MPI
245 // =========================================================================
246 // 2. the class interface
247 // =========================================================================
248 template<class T, class M>
250  : solver_abstract_rep<T,M>(opt),
251  _has_mumps_instance(false),
252  _drop_ext_nnz(false),
253  _mumps_par(),
254  _row(),
255  _col(),
256  _val(),
257  _a00(0),
258  _det()
259 {
261  update_values (a);
262 }
263 template<class T, class M>
264 void
266 {
267  size_type nproc = communicator().size();
268  if (a.dis_nrow() <= 1) {
269  // skip 0 or 1 dis_nrow, where mumps core dump
270  if (a.nrow() == 1) {
271  _a00 = (*(a.begin()[0])).second;
272  }
273  _has_mumps_instance = false;
274  return;
275  }
276  _has_mumps_instance = true;
277  // ----------------------------------
278  // step 0 : init a mumps instance
279  // ----------------------------------
280  bool use_symmetry = a.is_symmetric();
281  bool use_definite_positive = a.is_definite_positive();
282  _mumps_par.par = 1; // use parallel
283 #ifdef _RHEOLEF_HAVE_MPI
284  _mumps_par.comm_fortran = MPI_Comm_c2f (a.row_ownership().comm());
285 #endif // _RHEOLEF_HAVE_MPI
286  if (use_symmetry && !use_definite_positive) {
287  // sym invertible but could be undef: could have either < 0 or > 0 eigenvalues
288  // => leads to similar cpu with s.d.p. but without the asumption of positive or negative :
289  _mumps_par.sym = 2;
290  } else if (use_symmetry && use_definite_positive) {
291 #if 0
292  // sym def pos
293  // a) standard approach
294  // Note: could fail in scalapack when sym def negative
295  _mumps_par.sym = 1;
296  // b) second approach: from AmeButGue-2011-mumps, page 15:
297  // Another approach to suppress numerical pivoting which works with ScaLAPACK
298  // for both positive definite and negative definite matrices consists in setting SYM=2 and
299  // CNTL(1)=0.0D0 (recommended strategy).
300  _mumps_par.sym = 2;
301  _mumps_par.cntl[1-1] = 0.0;
302 #endif // 0
303  // c) third approach: do not use the knowledge of positive or negative, since
304  // this knowledge is contained in bilinear forms defined by the user
305  // use only the general symetry:
306  // tests show CPU is very similar than two previous approaches
307  // and the user do not need know about positive_definite or negative_definite
308  _mumps_par.sym = 2;
309  } else {
310  // unsym invertible
311  _mumps_par.sym = 0;
312  }
313  _mumps_par.job = -1;
314  dmumps_c(&_mumps_par);
315  check_macro (_mumps_par.infog[1-1] == 0, "mumps: an error has occurred");
316  // ----------------------------------
317  // step 1 : symbolic factorization
318  // ----------------------------------
319  if (base::option().verbose_level != 0) {
320  _mumps_par.icntl[1-1] = 1; // error output
321  _mumps_par.icntl[2-1] = 1; // verbose output
322  _mumps_par.icntl[3-1] = 1; // global infos
323  _mumps_par.icntl[4-1] = base::option().verbose_level; // verbosity level
324  // strcpy (_mumps_par.write_problem, "dump_mumps"); // dump sparse structure
325  } else {
326  _mumps_par.icntl[1-1] = -1;
327  _mumps_par.icntl[2-1] = -1;
328  _mumps_par.icntl[3-1] = -1;
329  _mumps_par.icntl[4-1] = 0;
330  }
331  if (base::option().compute_determinant) {
332  _mumps_par.icntl[33-1] = 1;
333  }
334  _mumps_par.icntl[5-1] = 0; // matrix is in assembled format
335 #if defined(_RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_SCOTCH)
336  _mumps_par.icntl[7-1] = 3; // sequential ordering: 3==scotch
337 #elif defined(_RHEOLEF_HAVE_MUMPS_WITH_PARMETIS) || defined(_RHEOLEF_HAVE_MUMPS_WITH_METIS)
338  _mumps_par.icntl[7-1] = 5; // sequential ordering: 5==metis
339 #else // _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
340  _mumps_par.icntl[7-1] = 7; // ordering: 7=auto
341 #endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
342  _mumps_par.icntl[14-1] = 40; // default 20 seems to be insufficient in some cases
343  _mumps_par.icntl[29-1] = 0; // 0: auto; 1: ptscotch; 2: parmetis
344  _mumps_par.icntl[22-1] = 0; // 0: in-core ; 1: out-of-core TODO
346  _mumps_par.icntl[18-1] = 3; // distributed
347 
348 #if defined(_RHEOLEF_HAVE_MUMPS_WITH_SCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_METIS)
349  _mumps_par.icntl[28-1] = 1; // sequential ordering (preferred, more robust)
350 #elif defined(_RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_PARMETIS)
351  _mumps_par.icntl[28-1] = 2; // parallel ordering (less robust, but faster)
352 #endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH || _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
353 
354 #ifdef _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
355  _mumps_par.icntl[29-1] = 1; // ptscotch=1
356 #elif _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
357  _mumps_par.icntl[29-1] = 2; // parmetis=2
358 #else
359  _mumps_par.icntl[29-1] = 0; // automatic choice
360 #endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
361  } else {
362  _mumps_par.icntl[18-1] = 0; // sequential
363  }
364  // load the matrix in mumps
365  _mumps_par.n = a.dis_nrow();
366  size_type dia_nnz = ((use_symmetry) ? nnz_dia_upper(a) : a.nnz());
367  size_type ext_nnz = ((use_symmetry) ? nnz_ext_upper(a) : a.ext_nnz());
368  size_type dis_nnz = dia_nnz + (_drop_ext_nnz ? 0 : ext_nnz);
370 #ifdef _RHEOLEF_HAVE_MPI
371  _row.resize (dis_nnz);
372  _col.resize (dis_nnz);
373  csr2mumps_struct_dis (a, _row, _col, use_symmetry, _drop_ext_nnz);
374  _mumps_par.nz_loc = dis_nnz;
375  _mumps_par.irn_loc = _row.begin().operator->();
376  _mumps_par.jcn_loc = _col.begin().operator->();
377 #endif // _RHEOLEF_HAVE_MPI
378  } else { // sequential
379  _row.resize (dia_nnz);
380  _col.resize (dia_nnz);
381  csr2mumps_struct_seq (a, _row, _col, use_symmetry);
382  _mumps_par.nnz = dia_nnz;
383  _mumps_par.irn = _row.begin().operator->();
384  _mumps_par.jcn = _col.begin().operator->();
385  }
386  _mumps_par.job = 1;
387  dmumps_c(&_mumps_par);
388  trace_macro ("symbolic: ordering effectively used = " << _mumps_par.infog[7-1]); // 3=scoth, etc
389  check_macro (_mumps_par.infog[1-1] == 0, "mumps: error infog(1)="<<_mumps_par.infog[1-1]<<" has occurred (see mumps/infog(1) documentation; infog(2)="<<_mumps_par.infog[2-1]<<")");
390  if (a.dis_nrow() <= 1) {
391  // skip 0 or 1 dis_nrow, where mumps core dump
392  if (a.nrow() == 1) {
393  _a00 = (*(a.begin()[0])).second;
394  }
395  return;
396  }
397  // ----------------------------------
398  // step 2 : numeric factorization
399  // ----------------------------------
401 #ifdef _RHEOLEF_HAVE_MPI
402  _val.resize (dis_nnz);
403  csr2mumps_values_dis (a, _val, use_symmetry, _drop_ext_nnz);
404  _mumps_par.a_loc = _val.begin().operator->();
405 #endif // _RHEOLEF_HAVE_MPI
406  } else { // sequential
407  _val.resize (dia_nnz);
408  csr2mumps_values_seq (a, _val, use_symmetry);
409  _mumps_par.a = _val.begin().operator->();
410  }
411  _mumps_par.job = 2;
412  bool finished = false;
413  while (!finished && _mumps_par.icntl[14-1] <= 2000) {
414  dmumps_c(&_mumps_par);
415  if ((_mumps_par.infog[1-1] == -8 || _mumps_par.infog[1-1] == -9) &&
416  _mumps_par.infog[2-1] >= 0) {
417  // not enought working space:
418  // default 20 seems to be insufficient in some cases (periodic,surfacic,p-laplacian)
419  _mumps_par.icntl[14-1] += 10;
420  if (_mumps_par.icntl[14-1] > 100) {
421  dis_warning_macro ("numeric factorization: increase working space of "<<_mumps_par.icntl[14-1]<< "% and retry...");
422  }
423  } else {
424  finished = true;
425  }
426  }
427  check_macro (_mumps_par.infog[1-1] == 0,
428  "solver failed: mumps error infog(1)=" <<_mumps_par.infog[1-1]
429  << ", infog(2)=" <<_mumps_par.infog[2-1]
430  << ", icntl(14)="<<_mumps_par.icntl[14-1]
431  << ", icntl(23)="<<_mumps_par.icntl[23-1]);
432  if (_mumps_par.icntl[33-1] != 0) {
433  // get determinant from infos:
434  _det.mantissa = _mumps_par.rinfog[12-1];
435  // imag part _mumps_par.rinfog[13-1] is zero as matrix is real here
436  _det.exponant = _mumps_par.infog[34-1];
437  _det.base = 2;
438  }
439 }
440 template<class T, class M>
441 vec<T,M>
443 {
444  if (b.dis_size() <= 1) {
445  // skip 0 or 1 dis_nrow, where mumps core dump
446  if (b.size() == 1) {
447  return vec<T,M>(b.ownership(), b[0]/_a00);
448  } else {
449  return vec<T,M>(b.ownership());
450  }
451  }
452  // ----------------------------------
453  // step 3 : define rhs & solve
454  // ----------------------------------
455  vec<T,M> x;
456  vector<T> sol;
457  vector<int> perm;
458  size_type sol_size = 0;
459  vec<T,M> b_host;
460  T dummy; // mumps faild when zero sizes and 0 pointers...
461  int idummy;
463  // 3.1a. merge the rhs b on proc=0 as required by distributed mumps (why ?)
464  _mumps_par.icntl[21-1] = 1; // 1: solution is distributed
465  _mumps_par.icntl[10-1] = 0; // nstep of iterative refinement (not in distributed version?)
466  communicator comm = b.ownership().comm();
467  distributor host_ownership (b.dis_size(), comm, comm.rank() == 0 ? b.dis_size() : 0);
468  b_host.resize (host_ownership);
469  size_type first_i = b.ownership().first_index();
470  for (size_type i = 0, n = b.size(); i < n; i++) {
471  size_type dis_i = first_i + i;
472  b_host.dis_entry(dis_i) = b[i];
473  }
474  b_host.dis_entry_assembly();
475  if (comm.rank() == 0) {
476  _mumps_par.nrhs = 1;
477  _mumps_par.rhs = b_host.begin().operator->();
478  }
479  sol_size = _mumps_par.info[23-1];
480  sol.resize (sol_size);
481  perm.resize (sol_size);
482  _mumps_par.lsol_loc = sol_size;
483  _mumps_par.sol_loc = (sol_size > 0) ? sol.begin().operator->() : &dummy;
484  _mumps_par.isol_loc = (sol_size > 0) ? perm.begin().operator->() : &idummy;
485  } else { // sequential
486  // 3.1b. inplace resolution: copy b in x and put x as rhs
487  _mumps_par.icntl[21-1] = 0; // 0: sol goes on the proc=0, inplace in rhs
488  _mumps_par.icntl[10-1] = 0; // nstep of iterative refinement (not in distributed version?)
489  x = b;
490  _mumps_par.nrhs = 1;
491  _mumps_par.rhs = x.begin().operator->();
492  }
493  // 3.2. run mumps solver
494  _mumps_par.icntl [9-1] = 1; // solve A*x=b : ctrl=1 ; otherwise solve A^t*x=b TODO
495  _mumps_par.job = 3;
496  dmumps_c(&_mumps_par);
497  check_macro (_mumps_par.infog[1-1] >= 0,
498  "solver failed: mumps error infog(1)="<<_mumps_par.infog[1-1]
499  << ", infog(2)="<<_mumps_par.infog[2-1]);
500  if (_mumps_par.infog[1-1] > 0) {
501  warning_macro ("mumps warning infog(1)="<<_mumps_par.infog[1-1]
502  << ", infog(2)="<<_mumps_par.infog[2-1]);
503  }
505  // 3.3. redistribute the solution as b.ownership
506  x.resize (b.ownership());
507  for (size_t i = 0; i < sol_size; i++) {
508  size_type dis_i = perm[i] - 1;
509  assert_macro (dis_i < x.dis_size(), "invalid index");
510  x.dis_entry(dis_i) = sol[i];
511  }
512  x.dis_entry_assembly();
513  }
514  return x;
515 }
516 template<class T, class M>
517 vec<T,M>
519 {
520  error_macro ("not yet");
521  return vec<T,M>();
522 }
523 template<class T, class M>
525 {
526  if (!_has_mumps_instance) return;
527  _has_mumps_instance = false;
528  // ----------------------------------
529  // step F : close the mumps instance
530  // ----------------------------------
531  _mumps_par.job = -2;
532  dmumps_c(&_mumps_par);
533  check_macro (_mumps_par.infog[1-1] == 0, "mumps: an error has occurred");
534 }
535 // ----------------------------------------------------------------------------
536 // instanciation in library
537 // ----------------------------------------------------------------------------
538 // TODO: code is only valid here for T=double
539 
541 
542 #ifdef _RHEOLEF_HAVE_MPI
544 #endif // _RHEOLEF_HAVE_MPI
545 
546 } // namespace rheolef
547 #endif // MUMPS
rheolef::csr< T, distributed >::const_iterator
rep::const_iterator const_iterator
Definition: csr.h:477
rheolef::solver_mumps_rep::~solver_mumps_rep
~solver_mumps_rep()
Definition: solver_mumps.cc:524
warning_macro
#define warning_macro(message)
Definition: dis_macros.h:53
check_macro
check_macro(expr1.have_homogeneous_space(Xh1), "dual(expr1,expr2); expr1 should have homogeneous space. HINT: use dual(interpolate(Xh, expr1),expr2)")
rheolef::solver_mumps_rep::solver_mumps_rep
solver_mumps_rep(const csr< T, M > &a, const solver_option &opt=solver_option())
Definition: solver_mumps.cc:249
rheolef::dummy
static iorheo::force_initialization dummy
Definition: iorheo.cc:147
rheolef::distributor
see the distributor page for the full documentation
Definition: distributor.h:62
rheolef::size_type
size_t size_type
Definition: basis_get.cc:76
rheolef::vec
see the vec page for the full documentation
Definition: vec.h:79
rheolef::is_distributed
Definition: distributed.h:36
p
Definition: sphere.icc:25
rheolef::solver_abstract_rep
Definition: solver.h:191
solver_mumps.h
dis_warning_macro
#define dis_warning_macro(message)
Definition: dis_macros.h:66
assert_macro
#define assert_macro(ok_condition, message)
Definition: dis_macros.h:113
a
Definition: diffusion_isotropic.h:25
rheolef::csr
see the csr page for the full documentation
Definition: asr.h:31
rheolef::csr< T, distributed >::const_data_iterator
rep::const_data_iterator const_data_iterator
Definition: csr.h:479
rheolef
This file is part of Rheolef.
Definition: compiler_eigen.h:37
error_macro
#define error_macro(message)
Definition: dis_macros.h:49
rheolef::solver_abstract_rep::size_type
csr< T, M >::size_type size_type
Definition: solver.h:193
rheolef::solver_mumps_rep::trans_solve
vec< T, M > trans_solve(const vec< T, M > &rhs) const
Definition: solver_mumps.cc:518
rheolef::solver_mumps_rep::_drop_ext_nnz
bool _drop_ext_nnz
Definition: solver_mumps.h:64
rheolef::solver_mumps_rep::update_values
void update_values(const csr< T, M > &a)
Definition: solver_mumps.cc:265
rheolef::solver_mumps_rep
Definition: solver_mumps.h:39
mkgeo_ball.b
b
Definition: mkgeo_ball.sh:152
rheolef::csr< T, distributed >::size_type
rep::size_type size_type
Definition: csr.h:474
mkgeo_ball.n
n
Definition: mkgeo_ball.sh:150
size_type
field::size_type size_type
Definition: branch.cc:425
rheolef::vec::resize
void resize(const distributor &ownership, const T &init_val=std::numeric_limits< T >::max())
Definition: vec.h:199
rheolef::solver_option::force_seq
bool force_seq
Definition: solver_option.h:173
M
Expr1::memory_type M
Definition: vec_expr_v2.h:385
T
Expr1::float_type T
Definition: field_expr.h:218
trace_macro
#define trace_macro(message)
Definition: dis_macros.h:111
rheolef::solver_option
see the solver_option page for the full documentation
Definition: solver_option.h:155
rheolef::solver_mumps_rep::solve
vec< T, M > solve(const vec< T, M > &rhs) const
Definition: solver_mumps.cc:442