deal.II version 9.7.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_sparsity_pattern.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2025 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
18#ifdef DEAL_II_WITH_TRILINOS
19
20# include <deal.II/base/mpi.h>
22
25
27# include <Epetra_Export.h>
29
30# include <limits>
31
33
34namespace TrilinosWrappers
35{
37 {
38 void
40 {
41 // if we are asked to visit the past-the-end line, then simply
42 // release all our caches and go on with life
43 if (this->a_row == sparsity_pattern->n_rows() ||
44 (sparsity_pattern->in_local_range(this->a_row) == false))
45 {
46 colnum_cache.reset();
47 return;
48 }
49
50 // otherwise first flush Trilinos caches if necessary
51 if (!sparsity_pattern->is_compressed())
52 sparsity_pattern->compress();
53
54 colnum_cache = std::make_shared<std::vector<size_type>>(
55 sparsity_pattern->row_length(this->a_row));
56
57 if (colnum_cache->size() > 0)
58 {
59 // get a representation of the present row
60 int ncols;
61 const int ierr = sparsity_pattern->graph->ExtractGlobalRowCopy(
62 this->a_row,
63 colnum_cache->size(),
64 ncols,
65 reinterpret_cast<TrilinosWrappers::types::int_type *>(
66 const_cast<size_type *>(colnum_cache->data())));
67 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
68 AssertThrow(static_cast<std::vector<size_type>::size_type>(ncols) ==
69 colnum_cache->size(),
71 }
72 }
73 } // namespace SparsityPatternIterators
74
75
76 // The constructor is actually the
77 // only point where we have to check
78 // whether we build a serial or a
79 // parallel Trilinos matrix.
80 // Actually, it does not even matter
81 // how many threads there are, but
82 // only if we use an MPI compiler or
83 // a standard compiler. So, even one
84 // thread on a configuration with
85 // MPI will still get a parallel
86 // interface.
88 {
90 std::make_unique<Epetra_Map>(TrilinosWrappers::types::int_type(0),
93 graph = std::make_unique<Epetra_FECrsGraph>(View,
96 0);
97 graph->FillComplete();
98 }
99
100
101
103 const size_type n,
104 const size_type n_entries_per_row)
105 {
106 reinit(m, n, n_entries_per_row);
107 }
108
109
110
112 const size_type m,
113 const size_type n,
114 const std::vector<size_type> &n_entries_per_row)
115 {
116 reinit(m, n, n_entries_per_row);
117 }
118
119
120
122 : SparsityPatternBase(std::move(other))
123 , column_space_map(std::move(other.column_space_map))
124 , graph(std::move(other.graph))
125 , nonlocal_graph(std::move(other.nonlocal_graph))
126 {}
127
128
129
130 // Copy function only works if the
131 // sparsity pattern is empty.
133 : SparsityPatternBase(input_sparsity)
134 , column_space_map(new Epetra_Map(TrilinosWrappers::types::int_type(0),
135 TrilinosWrappers::types::int_type(0),
136 Utilities::Trilinos::comm_self()))
137 , graph(
138 new Epetra_FECrsGraph(View, *column_space_map, *column_space_map, 0))
139 {
140 Assert(input_sparsity.n_rows() == 0,
142 "Copy constructor only works for empty sparsity patterns."));
143 }
144
145
146
147 SparsityPattern::SparsityPattern(const IndexSet &parallel_partitioning,
148 const MPI_Comm communicator,
149 const size_type n_entries_per_row)
150 {
151 reinit(parallel_partitioning,
152 parallel_partitioning,
153 communicator,
154 n_entries_per_row);
155 }
156
157
158
160 const IndexSet &parallel_partitioning,
161 const MPI_Comm communicator,
162 const std::vector<size_type> &n_entries_per_row)
163 {
164 reinit(parallel_partitioning,
165 parallel_partitioning,
166 communicator,
167 n_entries_per_row);
168 }
169
170
171
172 SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
173 const IndexSet &col_parallel_partitioning,
174 const MPI_Comm communicator,
175 const size_type n_entries_per_row)
176 {
177 reinit(row_parallel_partitioning,
178 col_parallel_partitioning,
179 communicator,
180 n_entries_per_row);
181 }
182
183
184
186 const IndexSet &row_parallel_partitioning,
187 const IndexSet &col_parallel_partitioning,
188 const MPI_Comm communicator,
189 const std::vector<size_type> &n_entries_per_row)
190 {
191 reinit(row_parallel_partitioning,
192 col_parallel_partitioning,
193 communicator,
194 n_entries_per_row);
195 }
196
197
198
199 SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
200 const IndexSet &col_parallel_partitioning,
201 const IndexSet &writable_rows,
202 const MPI_Comm communicator,
203 const size_type n_max_entries_per_row)
204 {
205 reinit(row_parallel_partitioning,
206 col_parallel_partitioning,
207 writable_rows,
208 communicator,
209 n_max_entries_per_row);
210 }
211
212
213
214 void
216 const size_type n,
217 const size_type n_entries_per_row)
218 {
221 MPI_COMM_SELF,
222 n_entries_per_row);
223 }
224
225
226
227 void
229 const size_type n,
230 const std::vector<size_type> &n_entries_per_row)
231 {
234 MPI_COMM_SELF,
235 n_entries_per_row);
236 }
237
238
239
240 namespace
241 {
242 using size_type = SparsityPattern::size_type;
243
244 void
245 reinit_sp(const Epetra_Map &row_map,
246 const Epetra_Map &col_map,
247 const size_type n_entries_per_row,
248 std::unique_ptr<Epetra_Map> &column_space_map,
249 std::unique_ptr<Epetra_FECrsGraph> &graph,
250 std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
251 {
252 Assert(row_map.IsOneToOne(),
253 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
254 "the maps of different processors."));
255 Assert(col_map.IsOneToOne(),
256 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
257 "the maps of different processors."));
258
259 nonlocal_graph.reset();
260 graph.reset();
261 column_space_map = std::make_unique<Epetra_Map>(col_map);
262
265 std::uint64_t(n_entries_per_row) <
266 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
267 ExcMessage("The TrilinosWrappers use Epetra internally which "
268 "uses 'signed int' to represent local indices. "
269 "Therefore, only 2,147,483,647 nonzero matrix "
270 "entries can be stored on a single process, "
271 "but you are requesting more than that. "
272 "If possible, use more MPI processes."));
273
274
275 // for more than one processor, need to specify only row map first and
276 // let the matrix entries decide about the column map (which says which
277 // columns are present in the matrix, not to be confused with the
278 // col_map that tells how the domain dofs of the matrix will be
279 // distributed). for only one processor, we can directly assign the
280 // columns as well. If we use a recent Trilinos version, we can also
281 // require building a non-local graph which gives us thread-safe
282 // initialization.
283 if (row_map.Comm().NumProc() > 1)
284 graph = std::make_unique<Epetra_FECrsGraph>(
285 Copy, row_map, n_entries_per_row, false
286 // TODO: Check which new Trilinos version supports this...
287 // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
288 // #if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
289 // , true
290 // #endif
291 );
292 else
293 graph = std::make_unique<Epetra_FECrsGraph>(
294 Copy, row_map, col_map, n_entries_per_row, false);
295 }
296
297
298
299 void
300 reinit_sp(const Epetra_Map &row_map,
301 const Epetra_Map &col_map,
302 const std::vector<size_type> &n_entries_per_row,
303 std::unique_ptr<Epetra_Map> &column_space_map,
304 std::unique_ptr<Epetra_FECrsGraph> &graph,
305 std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
306 {
307 Assert(row_map.IsOneToOne(),
308 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
309 "the maps of different processors."));
310 Assert(col_map.IsOneToOne(),
311 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
312 "the maps of different processors."));
313
314 // release memory before reallocation
315 nonlocal_graph.reset();
316 graph.reset();
317 AssertDimension(n_entries_per_row.size(),
319
320 column_space_map = std::make_unique<Epetra_Map>(col_map);
321
322 // Translate the vector of row lengths into one that only stores
323 // those entries that related to the locally stored rows of the matrix:
324 std::vector<int> local_entries_per_row(
327 for (unsigned int i = 0; i < local_entries_per_row.size(); ++i)
328 local_entries_per_row[i] =
329 n_entries_per_row[TrilinosWrappers::min_my_gid(row_map) + i];
330
331 AssertThrow(std::accumulate(local_entries_per_row.begin(),
332 local_entries_per_row.end(),
333 std::uint64_t(0)) <
334 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
335 ExcMessage("The TrilinosWrappers use Epetra internally which "
336 "uses 'signed int' to represent local indices. "
337 "Therefore, only 2,147,483,647 nonzero matrix "
338 "entries can be stored on a single process, "
339 "but you are requesting more than that. "
340 "If possible, use more MPI processes."));
341
342 if (row_map.Comm().NumProc() > 1)
343 graph = std::make_unique<Epetra_FECrsGraph>(
344 Copy, row_map, local_entries_per_row.data(), false
345 // TODO: Check which new Trilinos version supports this...
346 // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
347 // #if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
348 // , true
349 // #endif
350 );
351 else
352 graph = std::make_unique<Epetra_FECrsGraph>(
353 Copy, row_map, col_map, local_entries_per_row.data(), false);
354 }
355
356
357
358 template <typename SparsityPatternType>
359 void
360 reinit_sp(const Epetra_Map &row_map,
361 const Epetra_Map &col_map,
362 const SparsityPatternType &sp,
363 const bool exchange_data,
364 std::unique_ptr<Epetra_Map> &column_space_map,
365 std::unique_ptr<Epetra_FECrsGraph> &graph,
366 std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
367 {
368 nonlocal_graph.reset();
369 graph.reset();
370
371 AssertDimension(sp.n_rows(),
373 AssertDimension(sp.n_cols(),
375
376 column_space_map = std::make_unique<Epetra_Map>(col_map);
377
378 Assert(row_map.LinearMap() == true,
380 "This function only works if the row map is contiguous."));
381
382 const size_type first_row = TrilinosWrappers::min_my_gid(row_map),
383 last_row = TrilinosWrappers::max_my_gid(row_map) + 1;
384 std::vector<int> n_entries_per_row(last_row - first_row);
385
386 // Trilinos wants the row length as an int this is hopefully never going
387 // to be a problem.
388 for (size_type row = first_row; row < last_row; ++row)
389 n_entries_per_row[row - first_row] =
390 static_cast<int>(sp.row_length(row));
391
392 AssertThrow(std::accumulate(n_entries_per_row.begin(),
393 n_entries_per_row.end(),
394 std::uint64_t(0)) <
395 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
396 ExcMessage("The TrilinosWrappers use Epetra internally which "
397 "uses 'signed int' to represent local indices. "
398 "Therefore, only 2,147,483,647 nonzero matrix "
399 "entries can be stored on a single process, "
400 "but you are requesting more than that. "
401 "If possible, use more MPI processes."));
402
403 if (row_map.Comm().NumProc() > 1)
404 graph = std::make_unique<Epetra_FECrsGraph>(Copy,
405 row_map,
406 n_entries_per_row.data(),
407 false);
408 else
409 graph = std::make_unique<Epetra_FECrsGraph>(
410 Copy, row_map, col_map, n_entries_per_row.data(), false);
411
412 AssertDimension(sp.n_rows(), n_global_rows(*graph));
413
414 std::vector<TrilinosWrappers::types::int_type> row_indices;
415
416 for (size_type row = first_row; row < last_row; ++row)
417 {
418 const TrilinosWrappers::types::int_type row_length =
419 sp.row_length(row);
420 if (row_length == 0)
421 continue;
422
423 row_indices.resize(row_length, -1);
424 {
425 typename SparsityPatternType::iterator p = sp.begin(row);
426 // avoid incrementing p over the end of the current row because
427 // it is slow for DynamicSparsityPattern in parallel
428 for (int col = 0; col < row_length;)
429 {
430 row_indices[col++] = p->column();
431 if (col < row_length)
432 ++p;
433 }
434 }
435 if (exchange_data == false)
436 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
437 row_length,
438 row_indices.data());
439 else
440 {
441 // Include possibility to exchange data since
442 // DynamicSparsityPattern is able to do so
443 const TrilinosWrappers::types::int_type trilinos_row = row;
444 graph->InsertGlobalIndices(1,
445 &trilinos_row,
446 row_length,
447 row_indices.data());
448 }
449 }
450
451 // TODO A dynamic_cast fails here, this is suspicious.
452 const auto &range_map =
453 static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
454 int ierr = graph->GlobalAssemble(*column_space_map, range_map, true);
455 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
456
457 ierr = graph->OptimizeStorage();
458 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
459 }
460 } // namespace
461
462
463
464 void
465 SparsityPattern::reinit(const IndexSet &parallel_partitioning,
466 const MPI_Comm communicator,
467 const size_type n_entries_per_row)
468 {
469 SparsityPatternBase::resize(parallel_partitioning.size(),
470 parallel_partitioning.size());
471 Epetra_Map map =
472 parallel_partitioning.make_trilinos_map(communicator, false);
473 reinit_sp(
474 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
475 }
476
477
478
479 void
480 SparsityPattern::reinit(const IndexSet &parallel_partitioning,
481 const MPI_Comm communicator,
482 const std::vector<size_type> &n_entries_per_row)
483 {
484 SparsityPatternBase::resize(parallel_partitioning.size(),
485 parallel_partitioning.size());
486 Epetra_Map map =
487 parallel_partitioning.make_trilinos_map(communicator, false);
488 reinit_sp(
489 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
490 }
491
492
493
494 void
495 SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
496 const IndexSet &col_parallel_partitioning,
497 const MPI_Comm communicator,
498 const size_type n_entries_per_row)
499 {
500 SparsityPatternBase::resize(row_parallel_partitioning.size(),
501 col_parallel_partitioning.size());
502 Epetra_Map row_map =
503 row_parallel_partitioning.make_trilinos_map(communicator, false);
504 Epetra_Map col_map =
505 col_parallel_partitioning.make_trilinos_map(communicator, false);
506 reinit_sp(row_map,
507 col_map,
508 n_entries_per_row,
510 graph,
512 }
513
514
515
516 void
517 SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
518 const IndexSet &col_parallel_partitioning,
519 const MPI_Comm communicator,
520 const std::vector<size_type> &n_entries_per_row)
521 {
522 SparsityPatternBase::resize(row_parallel_partitioning.size(),
523 col_parallel_partitioning.size());
524 Epetra_Map row_map =
525 row_parallel_partitioning.make_trilinos_map(communicator, false);
526 Epetra_Map col_map =
527 col_parallel_partitioning.make_trilinos_map(communicator, false);
528 reinit_sp(row_map,
529 col_map,
530 n_entries_per_row,
532 graph,
534 }
535
536
537
538 void
539 SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
540 const IndexSet &col_parallel_partitioning,
541 const IndexSet &writable_rows,
542 const MPI_Comm communicator,
543 const size_type n_entries_per_row)
544 {
545 SparsityPatternBase::resize(row_parallel_partitioning.size(),
546 col_parallel_partitioning.size());
547 Epetra_Map row_map =
548 row_parallel_partitioning.make_trilinos_map(communicator, false);
549 Epetra_Map col_map =
550 col_parallel_partitioning.make_trilinos_map(communicator, false);
551 reinit_sp(row_map,
552 col_map,
553 n_entries_per_row,
555 graph,
557
558 IndexSet nonlocal_partitioner = writable_rows;
559 AssertDimension(nonlocal_partitioner.size(),
560 row_parallel_partitioning.size());
561 if constexpr (running_in_debug_mode())
562 {
563 {
564 IndexSet tmp = writable_rows & row_parallel_partitioning;
565 Assert(tmp == row_parallel_partitioning,
567 "The set of writable rows passed to this method does not "
568 "contain the locally owned rows, which is not allowed."));
569 }
570 }
571 nonlocal_partitioner.subtract_set(row_parallel_partitioning);
572 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
573 {
574 Epetra_Map nonlocal_map =
575 nonlocal_partitioner.make_trilinos_map(communicator, true);
577 std::make_unique<Epetra_CrsGraph>(Copy, nonlocal_map, 0);
578 }
579 else
580 Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
581 }
582
583
584
585 template <typename SparsityPatternType>
586 void
588 const IndexSet &row_parallel_partitioning,
589 const IndexSet &col_parallel_partitioning,
590 const SparsityPatternType &nontrilinos_sparsity_pattern,
591 const MPI_Comm communicator,
592 const bool exchange_data)
593 {
594 SparsityPatternBase::resize(row_parallel_partitioning.size(),
595 col_parallel_partitioning.size());
596 Epetra_Map row_map =
597 row_parallel_partitioning.make_trilinos_map(communicator, false);
598 Epetra_Map col_map =
599 col_parallel_partitioning.make_trilinos_map(communicator, false);
600 reinit_sp(row_map,
601 col_map,
602 nontrilinos_sparsity_pattern,
603 exchange_data,
605 graph,
607 }
608
609
610
611 template <typename SparsityPatternType>
612 void
614 const IndexSet &parallel_partitioning,
615 const SparsityPatternType &nontrilinos_sparsity_pattern,
616 const MPI_Comm communicator,
617 const bool exchange_data)
618 {
619 AssertDimension(nontrilinos_sparsity_pattern.n_rows(),
620 parallel_partitioning.size());
621 AssertDimension(nontrilinos_sparsity_pattern.n_cols(),
622 parallel_partitioning.size());
623 SparsityPatternBase::resize(parallel_partitioning.size(),
624 parallel_partitioning.size());
625 Epetra_Map map =
626 parallel_partitioning.make_trilinos_map(communicator, false);
627 reinit_sp(map,
628 map,
629 nontrilinos_sparsity_pattern,
630 exchange_data,
632 graph,
634 }
635
636
637
640 {
642 return *this;
643 }
644
645
646
647 void
649 {
651 column_space_map = std::make_unique<Epetra_Map>(*sp.column_space_map);
652 graph = std::make_unique<Epetra_FECrsGraph>(*sp.graph);
653
654 if (sp.nonlocal_graph.get() != nullptr)
655 nonlocal_graph = std::make_unique<Epetra_CrsGraph>(*sp.nonlocal_graph);
656 else
657 nonlocal_graph.reset();
658 }
659
660
661
662 template <typename SparsityPatternType>
663 void
664 SparsityPattern::copy_from(const SparsityPatternType &sp)
665 {
666 SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
667 const Epetra_Map rows(TrilinosWrappers::types::int_type(sp.n_rows()),
668 0,
670 const Epetra_Map columns(TrilinosWrappers::types::int_type(sp.n_cols()),
671 0,
673
674 reinit_sp(
675 rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
676 }
677
678
679
680 void
682 {
684 // When we clear the matrix, reset
685 // the pointer and generate an
686 // empty sparsity pattern.
688 std::make_unique<Epetra_Map>(TrilinosWrappers::types::int_type(0),
691 graph = std::make_unique<Epetra_FECrsGraph>(View,
694 0);
695 graph->FillComplete();
696
697 nonlocal_graph.reset();
698 }
699
700
701
702 void
704 {
705 int ierr;
707 if (nonlocal_graph.get() != nullptr)
708 {
709 if (nonlocal_graph->IndicesAreGlobal() == false &&
710 nonlocal_graph->RowMap().NumMyElements() > 0 &&
712 {
713 // Insert dummy element at (row, column) that corresponds to row 0
714 // in local index counting.
718
719 // in case we have a square sparsity pattern, add the entry on the
720 // diagonal
723 column = row;
724 // if not, take a column index that we have ourselves since we
725 // know for sure it is there (and it will not create spurious
726 // messages to many ranks like putting index 0 on many processors)
727 else if (column_space_map->NumMyElements() > 0)
729 ierr = nonlocal_graph->InsertGlobalIndices(row, 1, &column);
730 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
731 }
732 Assert(nonlocal_graph->RowMap().NumMyElements() == 0 ||
734 nonlocal_graph->IndicesAreGlobal() == true,
736
737 ierr =
738 nonlocal_graph->FillComplete(*column_space_map, graph->RangeMap());
739 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
740 ierr = nonlocal_graph->OptimizeStorage();
741 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
742 Epetra_Export exporter(nonlocal_graph->RowMap(), graph->RowMap());
743 ierr = graph->Export(*nonlocal_graph, exporter, Add);
744 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
745 ierr = graph->FillComplete(*column_space_map, graph->RangeMap());
746 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
747 }
748 else
749 {
750 // TODO A dynamic_cast fails here, this is suspicious.
751 const auto &range_map =
752 static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
753 ierr = graph->GlobalAssemble(*column_space_map, range_map, true);
754 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
755 }
756
757 try
758 {
759 ierr = graph->OptimizeStorage();
760 }
761 catch (const int error_code)
762 {
764 false,
766 "The Epetra_CrsGraph::OptimizeStorage() function "
767 "has thrown an error with code " +
768 std::to_string(error_code) +
769 ". You will have to look up the exact meaning of this error "
770 "in the Trilinos source code, but oftentimes, this function "
771 "throwing an error indicates that you are trying to allocate "
772 "more than 2,147,483,647 nonzero entries in the sparsity "
773 "pattern on the local process; this will not work because "
774 "Epetra indexes entries with a simple 'signed int'."));
775 }
776 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
777
778 // Check consistency between the sizes set at the beginning and what
779 // Trilinos stores:
780 using namespace deal_II_exceptions::internals;
785 }
786
787
788
789 bool
791 {
792 return graph->RowMap().LID(
793 static_cast<TrilinosWrappers::types::int_type>(i)) != -1;
794 }
795
796
797
798 bool
800 {
801 if (!row_is_stored_locally(i))
802 {
803 return false;
804 }
805 else
806 {
807 // Extract local indices in
808 // the matrix.
809 int trilinos_i =
810 graph->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
811 trilinos_j =
812 graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
813
814 // Check whether the matrix
815 // already is transformed to
816 // local indices.
817 if (graph->Filled() == false)
818 {
819 int nnz_present = graph->NumGlobalIndices(i);
820 int nnz_extracted;
822
823 // Generate the view and make
824 // sure that we have not generated
825 // an error.
826 // TODO: trilinos_i is the local row index -> it is an int but
827 // ExtractGlobalRowView requires trilinos_i to be the global row
828 // index and thus it should be a long long int
829 int ierr = graph->ExtractGlobalRowView(trilinos_i,
830 nnz_extracted,
831 col_indices);
832 Assert(ierr == 0, ExcTrilinosError(ierr));
833 Assert(nnz_present == nnz_extracted,
834 ExcDimensionMismatch(nnz_present, nnz_extracted));
835
836 // Search the index
837 const std::ptrdiff_t local_col_index =
838 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
839 col_indices;
840
841 if (local_col_index == nnz_present)
842 return false;
843 }
844 else
845 {
846 // Prepare pointers for extraction
847 // of a view of the row.
848 int nnz_present = graph->NumGlobalIndices(i);
849 int nnz_extracted;
850 int *col_indices;
851
852 // Generate the view and make
853 // sure that we have not generated
854 // an error.
855 int ierr =
856 graph->ExtractMyRowView(trilinos_i, nnz_extracted, col_indices);
857 Assert(ierr == 0, ExcTrilinosError(ierr));
858
859 Assert(nnz_present == nnz_extracted,
860 ExcDimensionMismatch(nnz_present, nnz_extracted));
861
862 // Search the index
863 const std::ptrdiff_t local_col_index =
864 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
865 col_indices;
866
867 if (local_col_index == nnz_present)
868 return false;
869 }
870 }
871
872 return true;
873 }
874
875
876
879 {
880 size_type local_b = 0;
881 for (int i = 0; i < static_cast<int>(local_size()); ++i)
882 {
883 int *indices;
884 int num_entries;
885 graph->ExtractMyRowView(i, num_entries, indices);
886 for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
887 ++j)
888 {
889 if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
890 local_b = std::abs(i - indices[j]);
891 }
892 }
893
895 graph->Comm().MaxAll(reinterpret_cast<TrilinosWrappers::types::int_type *>(
896 &local_b),
897 &global_b,
898 1);
899 return static_cast<size_type>(global_b);
900 }
901
902
903
904 unsigned int
906 {
907 return graph->NumMyRows();
908 }
909
910
911
912 std::pair<SparsityPattern::size_type, SparsityPattern::size_type>
914 {
916 const size_type end = TrilinosWrappers::max_my_gid(graph->RowMap()) + 1;
917
918 return {begin, end};
919 }
920
921
922
923 std::uint64_t
928
929
930
931 unsigned int
933 {
934 return graph->MaxNumIndices();
935 }
936
937
938
941 {
942 Assert(row < n_rows(), ExcInternalError());
943
944 // Get a representation of the where the present row is located on
945 // the current processor
947 graph->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
948
949 // On the processor who owns this row, we'll have a non-negative
950 // value for `local_row` and can ask for the length of the row.
951 if (local_row >= 0)
952 return graph->NumMyIndices(local_row);
953 else
954 return static_cast<size_type>(-1);
955 }
956
957
958
959 void
961 const ArrayView<const size_type> &columns,
962 const bool indices_are_sorted)
963 {
964 add_entries(row, columns.begin(), columns.end(), indices_are_sorted);
965 }
966
967
968
969 const Epetra_Map &
971 {
972 // TODO A dynamic_cast fails here, this is suspicious.
973 const auto &domain_map =
974 static_cast<const Epetra_Map &>(graph->DomainMap()); // NOLINT
975 return domain_map;
976 }
977
978
979
980 const Epetra_Map &
982 {
983 // TODO A dynamic_cast fails here, this is suspicious.
984 const auto &range_map =
985 static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
986 return range_map;
987 }
988
989
990
993 {
994 const Epetra_MpiComm *mpi_comm =
995 dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
996 Assert(mpi_comm != nullptr, ExcInternalError());
997 return mpi_comm->Comm();
998 }
999
1000
1001
1002 void
1007
1008
1009
1010 // As of now, no particularly neat
1011 // output is generated in case of
1012 // multiple processors.
1013 void
1014 SparsityPattern::print(std::ostream &out,
1015 const bool write_extended_trilinos_info) const
1016 {
1017 if (write_extended_trilinos_info)
1018 out << *graph;
1019 else
1020 {
1021 int *indices;
1022 int num_entries;
1023
1024 for (int i = 0; i < graph->NumMyRows(); ++i)
1025 {
1026 graph->ExtractMyRowView(i, num_entries, indices);
1027 for (int j = 0; j < num_entries; ++j)
1028 out << "(" << TrilinosWrappers::global_index(graph->RowMap(), i)
1029 << ","
1030 << TrilinosWrappers::global_index(graph->ColMap(), indices[j])
1031 << ") " << std::endl;
1032 }
1033 }
1034
1035 AssertThrow(out.fail() == false, ExcIO());
1036 }
1037
1038
1039
1040 void
1041 SparsityPattern::print_gnuplot(std::ostream &out) const
1042 {
1043 Assert(graph->Filled() == true, ExcInternalError());
1044 for (::types::global_dof_index row = 0; row < local_size(); ++row)
1045 {
1046 int *indices;
1047 int num_entries;
1048 graph->ExtractMyRowView(row, num_entries, indices);
1049
1050 Assert(num_entries >= 0, ExcInternalError());
1051 // avoid sign comparison warning
1052 const ::types::global_dof_index num_entries_ = num_entries;
1053 for (::types::global_dof_index j = 0; j < num_entries_; ++j)
1054 // while matrix entries are usually
1055 // written (i,j), with i vertical and
1056 // j horizontal, gnuplot output is
1057 // x-y, that is we have to exchange
1058 // the order of output
1059 out << static_cast<int>(
1060 TrilinosWrappers::global_index(graph->ColMap(), indices[j]))
1061 << " "
1062 << -static_cast<int>(
1063 TrilinosWrappers::global_index(graph->RowMap(), row))
1064 << std::endl;
1065 }
1066
1067 AssertThrow(out.fail() == false, ExcIO());
1068 }
1069
1070 // TODO: Implement!
1071 std::size_t
1073 {
1075 return 0;
1076 }
1077
1078
1079# ifndef DOXYGEN
1080 // explicit instantiations
1081 //
1082 template void
1083 SparsityPattern::copy_from(const ::SparsityPattern &);
1084 template void
1085 SparsityPattern::copy_from(const ::DynamicSparsityPattern &);
1086
1087 template void
1089 const ::SparsityPattern &,
1090 const MPI_Comm,
1091 bool);
1092 template void
1094 const ::DynamicSparsityPattern &,
1095 const MPI_Comm,
1096 bool);
1097
1098
1099 template void
1101 const IndexSet &,
1102 const ::SparsityPattern &,
1103 const MPI_Comm,
1104 bool);
1105 template void
1107 const IndexSet &,
1108 const ::DynamicSparsityPattern &,
1109 const MPI_Comm,
1110 bool);
1111# endif
1112
1113} // namespace TrilinosWrappers
1114
1116
1117#endif // DEAL_II_WITH_TRILINOS
iterator begin() const
Definition array_view.h:707
iterator end() const
Definition array_view.h:716
size_type size() const
Definition index_set.h:1791
size_type n_elements() const
Definition index_set.h:1949
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
void subtract_set(const IndexSet &other)
Definition index_set.cc:480
virtual void resize(const size_type rows, const size_type cols)
size_type n_rows() const
size_type n_cols() const
std::shared_ptr< const std::vector< size_type > > colnum_cache
size_type row_length(const size_type row) const
std::unique_ptr< Epetra_FECrsGraph > graph
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void print_gnuplot(std::ostream &out) const
const_iterator end() const
virtual void add_row_entries(const size_type &row, const ArrayView< const size_type > &columns, const bool indices_are_sorted=false) override
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
bool exists(const size_type i, const size_type j) const
std::pair< size_type, size_type > local_range() const
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_sorted=false)
void copy_from(const SparsityPattern &input_sparsity_pattern)
std::unique_ptr< Epetra_Map > column_space_map
const_iterator begin() const
SparsityPattern & operator=(const SparsityPattern &input_sparsity_pattern)
bool row_is_stored_locally(const size_type i) const
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:40
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:603
constexpr bool running_in_debug_mode()
Definition config.h:78
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:41
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:647
#define DEAL_II_NOT_IMPLEMENTED()
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1219
void reinit_sp(const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &row_map, const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &col_map, const size_type< MemorySpace > n_entries_per_row, Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &column_space_map, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &graph, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &nonlocal_graph)
TrilinosWrappers::types::int_type global_index(const Epetra_BlockMap &map, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_rows(const Epetra_CrsGraph &graph)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_entries(const Epetra_CrsGraph &graph)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:105
const Epetra_Comm & comm_self()
constexpr bool compare_for_equality(const T &t, const U &u)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int global_dof_index
Definition types.h:94