1544 * These three get-
functions are used to channel the mesh and the solution
1545 * to the next solver.
1550 *
return triangulation;
1554 *
return dof_handler;
1564 * The following data members are typical
for all deal.II simulations:
1565 * triangulation, finite elements, dof handlers, etc. The constraints
1566 * are used to enforce the Dirichlet boundary conditions. The names of the
1567 * data members are self-explanatory.
1587 *
const unsigned int refinement_parameter;
1588 *
const unsigned int mapping_degree;
1589 *
const double eta_squared;
1590 *
const std::string fname;
1595 * The program utilizes the
WorkStream technology. The @ref step_9
"step-9" tutorial
1596 * does a much better job of explaining the workings of
WorkStream.
1597 * Reading the
"WorkStream paper", see the glossary, is recommended.
1601 *
struct AssemblyScratchData
1604 *
const double eta_squared,
1605 *
const unsigned int mapping_degree);
1607 * AssemblyScratchData(
const AssemblyScratchData &scratch_data);
1609 *
const FreeCurrentDensity Jf;
1614 *
const unsigned int dofs_per_cell;
1615 *
const unsigned int n_q_points;
1617 * std::vector<Tensor<1, 3>> Jf_list;
1619 *
const double eta_squared;
1623 *
struct AssemblyCopyData
1627 * std::vector<types::global_dof_index> local_dof_indices;
1630 *
void system_matrix_local(
1632 * AssemblyScratchData &scratch_data,
1633 * AssemblyCopyData ©_data);
1635 *
void copy_local_to_global(
const AssemblyCopyData ©_data);
1638 * Solver::Solver(
const unsigned int p,
1639 *
const unsigned int r,
1640 *
const unsigned int mapping_degree,
1641 *
const double eta_squared,
1642 *
const std::string &fname)
1644 * , refinement_parameter(r)
1645 * , mapping_degree(mapping_degree)
1646 * , eta_squared(eta_squared)
1648 * , timer(std::cout,
1656 * The following function loads the mesh, assigns material IDs to all cells,
1657 * and attaches the spherical manifold to the mesh. The material IDs are
1658 * assigned on the basis of the distance from the center of a cell to the
1659 * origin. The spherical manifold is attached to a face
if all vertices of
1660 * the face are at the same distance from the origin provided the cell is
1661 *
outside the cube in the center of the mesh, see mesh description in the
1665 *
void Solver::make_mesh()
1670 * std::ifstream ifs(
"sphere_r" + std::to_string(refinement_parameter) +
1672 * gridin.read_msh(ifs);
1674 * triangulation.reset_all_manifolds();
1676 *
for (
auto cell : triangulation.active_cell_iterators())
1678 * cell->set_material_id(
1679 * Settings::material_id_free_space);
1681 *
if ((cell->center().norm() > Settings::a1) &&
1682 * (cell->center().norm() < Settings::b1))
1683 * cell->set_material_id(
1684 * Settings::material_id_core);
1686 *
if ((cell->center().norm() > Settings::a2) &&
1687 * (cell->center().norm() < Settings::b2))
1688 * cell->set_material_id(
1689 * Settings::material_id_free_current);
1692 *
for (
unsigned int f = 0; f < cell->n_faces(); f++)
1694 *
double dif_norm = 0.0;
1695 *
for (
unsigned int v = 1; v < cell->face(f)->n_vertices(); v++)
1696 * dif_norm +=
std::abs(cell->face(f)->vertex(0).norm() -
1697 * cell->face(f)->vertex(v).norm());
1699 *
if ((dif_norm < Settings::eps) &&
1700 * (cell->center().norm() > Settings::d1))
1701 * cell->face(f)->set_all_manifold_ids(1);
1705 * triangulation.set_manifold(1, sphere);
1710 * This function initializes the dofs, applies the Dirichlet boundary
1711 * condition, and initializes the vectors and matrices.
1714 *
void Solver::setup()
1716 * dof_handler.reinit(triangulation);
1717 * dof_handler.distribute_dofs(fe);
1721 * The following segment of the code applies the homogeneous Dirichlet
1722 * boundary condition. As discussed in the introduction, the Dirichlet
1723 * boundary condition is an essential condition and must be enforced
1724 * by constraining the system
matrix. This segment of the code does the
1728 * constraints.clear();
1736 * Settings::boundary_id_infinity,
1740 * constraints.close();
1744 * The rest of the function arranges the dofs in a sparsity pattern and
1745 * initializes the system matrices and vectors.
1750 * sparsity_pattern.copy_from(dsp);
1752 * system_matrix.reinit(sparsity_pattern);
1753 * solution.reinit(dof_handler.n_dofs());
1754 * system_rhs.reinit(dof_handler.n_dofs());
1759 * Formally, the following function assembles the system of linear equations.
1760 * In reality, however, it just spells all the magic words to get the
1761 *
WorkStream going. The interesting part, i.e., the actual assembling of the
1762 * system
matrix and the right-hand side, happens below in the function
1763 * Solver::system_matrix_local().
1766 *
void Solver::assemble()
1769 * dof_handler.end(),
1771 * &Solver::system_matrix_local,
1772 * &Solver::copy_local_to_global,
1773 * AssemblyScratchData(fe, eta_squared, mapping_degree),
1774 * AssemblyCopyData());
1779 * The following two constructors initialize scratch data from the input
1780 * parameters and from another
object of the same type, i.e., a
copy
1784 * Solver::AssemblyScratchData::AssemblyScratchData(
1786 *
const double eta_squared,
1787 *
const unsigned int mapping_degree)
1795 * , dofs_per_cell(fe_values.dofs_per_cell)
1796 * , n_q_points(fe_values.get_quadrature().size())
1798 * , eta_squared(eta_squared)
1802 * Solver::AssemblyScratchData::AssemblyScratchData(
1803 *
const AssemblyScratchData &scratch_data)
1805 * ,
mapping(scratch_data.mapping.get_degree())
1807 * scratch_data.fe_values.get_fe(),
1808 * scratch_data.fe_values.get_quadrature(),
1811 * , dofs_per_cell(fe_values.dofs_per_cell)
1812 * , n_q_points(fe_values.get_quadrature().size())
1814 * , eta_squared(scratch_data.eta_squared)
1820 * This function assembles a fraction of the system
matrix and the system
1821 * right-hand side related to a single cell. These fractions are
1822 * `copy_data.cell_matrix` and `copy_data.cell_rhs`. They are copied into
1823 * the system
matrix, @f$A_{ij}@f$, and the right-hand side, @f$b_i@f$, by the
1824 * function `Solver::copy_local_to_global()`.
1827 *
void Solver::system_matrix_local(
1829 * AssemblyScratchData &scratch_data,
1830 * AssemblyCopyData ©_data)
1834 * First, we reinitialize the matrices and vectors related to the current
1835 * cell and compute the FE values.
1838 * copy_data.cell_matrix.reinit(scratch_data.dofs_per_cell,
1839 * scratch_data.dofs_per_cell);
1841 * copy_data.cell_rhs.reinit(scratch_data.dofs_per_cell);
1843 * copy_data.local_dof_indices.resize(scratch_data.dofs_per_cell);
1845 * scratch_data.fe_values.reinit(cell);
1849 * Second, we compute the free-current density, @f$\vec{J}_f@f$, at the
1850 * quadrature points.
1853 * scratch_data.Jf.value_list(scratch_data.fe_values.get_quadrature_points(),
1854 * cell->material_id(),
1855 * scratch_data.Jf_list);
1859 * Third, we compute the components of the cell
matrix and cell right-hand
1860 * side. The labels of the integrals are the same as in the introduction to
1864 *
for (
unsigned int q_index = 0; q_index < scratch_data.n_q_points; ++q_index)
1866 *
for (
unsigned int i = 0; i < scratch_data.dofs_per_cell; ++i)
1868 *
for (
unsigned int j = 0; j < scratch_data.dofs_per_cell; ++j)
1870 * copy_data.cell_matrix(i, j) +=
1871 * (scratch_data.fe_values[scratch_data.ve].curl(
1873 * scratch_data.fe_values[scratch_data.ve].curl(
1875 * + scratch_data.eta_squared *
1876 * scratch_data.fe_values[scratch_data.ve].value(
1878 * scratch_data.fe_values[scratch_data.ve].value(
1881 * scratch_data.fe_values.JxW(q_index);
1883 * copy_data.cell_rhs(i) +=
1884 * (scratch_data.Jf_list[q_index] *
1885 * scratch_data.fe_values[scratch_data.ve].curl(i, q_index)) *
1886 * scratch_data.fe_values.JxW(q_index);
1892 * Finally, we query the dof indices on the current cell and store them
1893 * in the
copy data structure, so we know to which locations of the system
1894 *
matrix and right-hand side the components of the cell
matrix and
1895 * cell right-hand side must be copied.
1898 * cell->get_dof_indices(copy_data.local_dof_indices);
1903 * This function copies the components of a cell
matrix and a cell right-hand
1904 * side into the system
matrix, @f$A_{i,j}@f$, and the system right-hand side,
1908 *
void Solver::copy_local_to_global(
const AssemblyCopyData ©_data)
1910 * constraints.distribute_local_to_global(copy_data.cell_matrix,
1911 * copy_data.cell_rhs,
1912 * copy_data.local_dof_indices,
1919 * This function solves the system of linear equations.
1920 * If `Settings::log_cg_convergence ==
true`, the convergence data is saved
1921 * into a file. In theory, a CG solver can solve an @f$m \times m@f$ system of
1922 * linear equations in at most @f$m@f$ steps. In practice, it can take more steps
1923 * to converge. The convergence of the algorithm depends on the spectral
1925 * compact cluster away from
zero. In our
case, however, the
eigenvalues are
1926 * spread in between
zero and the maximal eigenvalue. Consequently, we expect
1927 * a poor convergence and increase the maximal number of iteration steps by a
1928 * factor of 10, i.e., `10*system_rhs.size()`. The stopping condition is \f[
1929 * |\boldsymbol{
b} - \boldsymbol{
A}\boldsymbol{c}|
1930 * < 10^{-6} |\boldsymbol{
b}|.
1932 * As soon as we use constraints, we must not forget to distribute them.
1935 *
void Solver::solve()
1938 * 1.0e-6 * system_rhs.l2_norm(),
1942 *
if (Settings::log_cg_convergence)
1943 * control.enable_history_data();
1949 * preconditioner.
initialize(system_matrix, 1.2);
1951 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
1953 * constraints.distribute(solution);
1955 *
if (Settings::log_cg_convergence)
1957 *
const std::vector<double> history_data = control.get_history_data();
1959 * std::ofstream ofs(fname +
"_cg_convergence.csv");
1961 *
for (
unsigned int i = 1; i < history_data.size(); i++)
1962 * ofs << i <<
", " << history_data[i] <<
"\n";
1968 * This function saves the computed current vector potential into a
vtu file.
1971 *
void Solver::save() const
1973 *
const std::vector<std::string> solution_names(3,
"VectorField");
1974 *
const std::vector<DataComponentInterpretation::DataComponentInterpretation>
1987 * data_out.set_flags(flags);
1991 * data_out.build_patches(
mapping,
1995 * std::ofstream ofs(fname +
".vtu");
1996 * data_out.write_vtu(ofs);
1999 *
void Solver::run()
2031 * <a name=
"step_97-SolverA"></a>
2032 * <h3>Solver -
A</h3>
2036 * This name space contains all the code related to the computation of the
2037 * magnetic vector potential, @f$\vec{
A}@f$. The main difference between
this
2038 * solver and the solver
for the current vector potential, @f$\vec{
T}@f$, is in how
2039 * the information on the source is fed to respective solvers. The solver
for
2040 * @f$\vec{
T}@f$ is fed data sampled from the analytical closed-form expression
for
2041 * @f$\vec{J}_f@f$. The solver
for @f$\vec{
A}@f$ is fed a field function, i.e., a
2042 * numerically computed current vector potential, @f$\vec{
T}@f$.
2049 * This
class describes the permeability in the entire problem domain. The
2050 * permeability is given by the definition of the problem, see the
2054 *
class Permeability
2058 * std::vector<double> &values)
const
2060 *
if ((mid == Settings::material_id_free_space) ||
2061 * (mid == Settings::material_id_free_current))
2062 * std::fill(values.begin(), values.end(), Settings::mu_0);
2064 *
if (mid == Settings::material_id_core)
2065 * std::fill(values.begin(), values.end(), Settings::mu_1);
2071 * This
class describes the parameter @f$\
gamma@f$ in the Robin boundary
2072 * condition. As soon as it is evaluated on the boundary, the permeability
2073 * equals to that of free space. Therefore, we evaluate the parameter
gamma as
2075 * \
gamma = \dfrac{1}{\mu_0 r}.
2082 *
void value_list(
const std::vector<
Point<3>> &r,
2083 * std::vector<double> &values)
const
2085 *
Assert(r.size() == values.size(),
2086 * ExcDimensionMismatch(r.size(), values.size()));
2088 *
for (
unsigned int i = 0; i < values.size(); i++)
2089 * values[i] = 1.0 / (Settings::mu_0 * r[i].
norm());
2095 * This
class implements the solver that minimizes the functional
2096 * @f$F(\vec{
A})@f$. The numerically computed current vector potential, @f$\vec{T}@f$,
2097 * is fed to
this solver by means of the input parameters `dof_handler_T` and
2098 * `solution_T`. Moreover,
this solver reuses the mesh on which @f$\vec{
T}@f$ has
2099 * been computed. The reference to the mesh is passed via the input parameter
2100 * `triangulation_T`.
2106 * Solver() =
delete;
2107 * Solver(
const unsigned int p,
2108 *
const unsigned int mapping_degree,
2112 *
const double eta_squared = 0.0,
2113 *
const std::string &fname =
"data");
2118 *
void save()
const;
2121 * system_matrix.clear();
2122 * system_rhs.reinit(0);
2129 *
return dof_handler;
2143 * The following data members are typical
for all deal.II simulations:
2144 * triangulation, finite elements, dof handlers, etc. The constraints
2145 * are used to enforce the Dirichlet boundary conditions. The names of the
2146 * data members are self-explanatory.
2159 *
const unsigned int mapping_degree;
2160 *
const double eta_squared;
2161 *
const std::string fname;
2166 * This time we have two dof handlers, `dof_handler_T`
for @f$\vec{
T}@f$ and
2167 * `dof_handler`
for @f$\vec{
A}@f$. The
WorkStream needs to walk through
2168 * the two dof handlers synchronously. For
this purpose we will pair two
2169 * active cell iterators (one from `dof_handler_T`, another from
2170 * `dof_handler`). For that we need the `IteratorPair` type.
2173 *
using IteratorTuple =
2174 * std::tuple<typename DoFHandler<3>::active_cell_iterator,
2181 * The program utilizes the
WorkStream technology. The @ref step_9
"step-9" tutorial
2182 * does a much better job of explaining the workings of
WorkStream.
2183 * Reading the
"WorkStream paper", see the glossary, is recommended.
2187 *
struct AssemblyScratchData
2192 *
const unsigned int mapping_degree,
2193 *
const double eta_squared,
2194 *
const BoundaryConditionType boundary_condition_type);
2196 * AssemblyScratchData(
const AssemblyScratchData &scratch_data);
2198 *
const Permeability permeability;
2199 *
const Gamma
gamma;
2207 *
const unsigned int dofs_per_cell;
2208 *
const unsigned int n_q_points;
2209 *
const unsigned int n_q_points_face;
2211 * std::vector<double> permeability_list;
2212 * std::vector<double> gamma_list;
2213 * std::vector<Tensor<1, 3>> T_values;
2220 *
const double eta_squared;
2221 *
const BoundaryConditionType boundary_condition_type;
2224 *
struct AssemblyCopyData
2228 * std::vector<types::global_dof_index> local_dof_indices;
2231 *
void system_matrix_local(
const IteratorPair &IP,
2232 * AssemblyScratchData &scratch_data,
2233 * AssemblyCopyData ©_data);
2235 *
void copy_local_to_global(
const AssemblyCopyData ©_data);
2238 * Solver::Solver(
const unsigned int p,
2239 *
const unsigned int mapping_degree,
2243 *
const double eta_squared,
2244 *
const std::string &fname)
2245 * : triangulation_T(triangulation_T)
2246 * , dof_handler_T(dof_handler_T)
2247 * , solution_T(solution_T)
2249 * , mapping_degree(mapping_degree)
2250 * , eta_squared(eta_squared)
2252 * , timer(std::cout,
2260 * This function initializes the dofs, applies the Dirichlet boundary
2261 * condition, and initializes the vectors and matrices.
2264 *
void Solver::setup()
2266 * dof_handler.reinit(triangulation_T);
2267 * dof_handler.distribute_dofs(fe);
2271 * The following segment of the code applies the homogeneous Dirichlet
2272 * boundary condition. As discussed in the introduction, the Dirichlet
2273 * boundary condition is an essential condition and must be enforced
2274 * by constraining the system
matrix. This segment of code does the
2278 * constraints.clear();
2282 *
if (Settings::boundary_condition_type_A == Dirichlet)
2287 * Settings::boundary_id_infinity,
2291 * constraints.close();
2295 * The rest of the function arranges the dofs in a sparsity pattern and
2296 * initializes the system
matrix and the system vectors.
2302 * sparsity_pattern.copy_from(dsp);
2303 * system_matrix.reinit(sparsity_pattern);
2304 * solution.reinit(dof_handler.n_dofs());
2305 * system_rhs.reinit(dof_handler.n_dofs());
2310 * Formally,
this function assembles the system of linear equations. In
2311 * reality, however, it just spells all the magic words to get the
WorkStream
2312 * going. The interesting part, i.e., the actual assembling of the system
2313 *
matrix and the right-hand side happens below in the
2314 * Solver::system_matrix_local function. Note that
this time the first two
2316 * iterators themselves as per usual. Note also the order in which we
package
2317 * the iterators: first the iterator of `dof_handler`, then the iterator of
2318 * the `dof_handler_T`. We will extract them in the same order.
2321 * void Solver::assemble()
2323 * WorkStream::run(IteratorPair(IteratorTuple(dof_handler.begin_active(),
2324 * dof_handler_T.begin_active())),
2326 * IteratorTuple(dof_handler.end(), dof_handler_T.end())),
2328 * &Solver::system_matrix_local,
2329 * &Solver::copy_local_to_global,
2330 * AssemblyScratchData(fe,
2335 * Settings::boundary_condition_type_A),
2336 * AssemblyCopyData());
2341 * The following two constructors initialize scratch data from the input
2342 * parameters and from another
object of the same type, i.e., a
copy
2346 * Solver::AssemblyScratchData::AssemblyScratchData(
2350 *
const unsigned int mapping_degree,
2351 *
const double eta_squared,
2352 *
const BoundaryConditionType boundary_condition_type)
2366 * dof_hand_T.get_fe(),
2369 * , dofs_per_cell(fe_values.dofs_per_cell)
2370 * , n_q_points(fe_values.get_quadrature().size())
2371 * , n_q_points_face(fe_face_values.get_quadrature().size())
2372 * , permeability_list(n_q_points)
2373 * , gamma_list(n_q_points_face)
2374 * , T_values(n_q_points)
2376 * , dof_hand_T(dof_hand_T)
2378 * , eta_squared(eta_squared)
2379 * , boundary_condition_type(boundary_condition_type)
2382 * Solver::AssemblyScratchData::AssemblyScratchData(
2383 *
const AssemblyScratchData &scratch_data)
2386 * ,
mapping(scratch_data.mapping.get_degree())
2388 * scratch_data.fe_values.get_fe(),
2389 * scratch_data.fe_values.get_quadrature(),
2392 * scratch_data.fe_face_values.get_fe(),
2393 * scratch_data.fe_face_values.get_quadrature(),
2397 * scratch_data.fe_values_T.get_fe(),
2398 * scratch_data.fe_values_T.get_quadrature(),
2400 * , dofs_per_cell(fe_values.dofs_per_cell)
2401 * , n_q_points(fe_values.get_quadrature().size())
2402 * , n_q_points_face(fe_face_values.get_quadrature().size())
2403 * , permeability_list(n_q_points)
2404 * , gamma_list(n_q_points_face)
2405 * , T_values(n_q_points)
2407 * , dof_hand_T(scratch_data.dof_hand_T)
2408 * , dofs_T(scratch_data.dofs_T)
2409 * , eta_squared(scratch_data.eta_squared)
2410 * , boundary_condition_type(scratch_data.boundary_condition_type)
2415 * This function assembles a fraction of the system
matrix and the system
2416 * right-hand side related to a single cell. These fractions are
2417 * `copy_data.cell_matrix` and `copy_data.cell_rhs`. They are copied into
2418 * to the system
matrix, @f$A_{ij}@f$, and the right-hand side, @f$b_i@f$, by the
2419 * function `Solver::copy_local_to_global()`.
2422 *
void Solver::system_matrix_local(
const IteratorPair &IP,
2423 * AssemblyScratchData &scratch_data,
2424 * AssemblyCopyData ©_data)
2428 * First we reinitialize the matrices and vectors related to the current
2432 * copy_data.cell_matrix.reinit(scratch_data.dofs_per_cell,
2433 * scratch_data.dofs_per_cell);
2435 * copy_data.cell_rhs.reinit(scratch_data.dofs_per_cell);
2437 * copy_data.local_dof_indices.resize(scratch_data.dofs_per_cell);
2441 * Second, we
extract the cells from the pair. We
extract them in the
2442 * correct order, see above.
2445 *
auto cell = std::get<0>(*IP);
2446 *
auto cell_T = std::get<1>(*IP);
2450 * Third, we compute the ordered FE values, the permeability, and the values
2451 * of the current vector potential, @f$\vec{
T}@f$, on the cell.
2454 * scratch_data.fe_values.reinit(cell);
2455 * scratch_data.fe_values_T.reinit(cell_T);
2457 * scratch_data.permeability.value_list(cell->material_id(),
2458 * scratch_data.permeability_list);
2460 * scratch_data.fe_values_T[scratch_data.ve].get_function_values(
2461 * scratch_data.dofs_T, scratch_data.T_values);
2465 * Fourth, we compute the components of the cell
matrix and cell right-hand
2466 * side. The labels of the integrals are the same as in the introduction to
2470 *
for (
unsigned int q_index = 0; q_index < scratch_data.n_q_points; ++q_index)
2472 *
for (
unsigned int i = 0; i < scratch_data.dofs_per_cell; ++i)
2474 *
for (
unsigned int j = 0; j < scratch_data.dofs_per_cell; ++j)
2476 * copy_data.cell_matrix(i, j) +=
2477 * (1.0 / scratch_data.permeability_list[q_index]) *
2478 * (scratch_data.fe_values[scratch_data.ve].curl(
2480 * scratch_data.fe_values[scratch_data.ve].curl(
2482 * + scratch_data.eta_squared *
2483 * scratch_data.fe_values[scratch_data.ve].value(
2485 * scratch_data.fe_values[scratch_data.ve].value(
2488 * scratch_data.fe_values.JxW(q_index);
2490 * copy_data.cell_rhs(i) +=
2491 * (scratch_data.T_values[q_index] *
2492 * scratch_data.fe_values[scratch_data.ve].curl(i, q_index)) *
2493 * scratch_data.fe_values.JxW(q_index);
2499 * If the Robin boundary condition (first-order ABC) is ordered,
2500 * we compute an extra integral over the boundary.
2503 *
if (scratch_data.boundary_condition_type == BoundaryConditionType::Robin)
2505 *
for (
unsigned int f = 0; f < cell->n_faces(); ++f)
2507 *
if (cell->face(f)->at_boundary())
2509 * scratch_data.fe_face_values.reinit(cell, f);
2511 *
for (
unsigned int q_index_face = 0;
2512 * q_index_face < scratch_data.n_q_points_face;
2515 *
for (
unsigned int i = 0; i < scratch_data.dofs_per_cell;
2518 * scratch_data.gamma.value_list(
2519 * scratch_data.fe_face_values.get_quadrature_points(),
2520 * scratch_data.gamma_list);
2522 *
for (
unsigned int j = 0; j < scratch_data.dofs_per_cell;
2525 * copy_data.cell_matrix(i, j) +=
2526 * scratch_data.gamma_list[q_index_face] *
2528 * scratch_data.fe_face_values.normal_vector(
2530 * scratch_data.fe_face_values[scratch_data.ve]
2531 * .value(i, q_index_face)) *
2533 * scratch_data.fe_face_values.normal_vector(
2535 * scratch_data.fe_face_values[scratch_data.ve]
2538 * * scratch_data.fe_face_values.JxW(
2550 * Finally, we query the dof indices on the current cell and store them
2551 * in the
copy data structure, so we know to which locations of the system
2552 *
matrix and right-hand side the components of the cell
matrix and
2553 * cell right-hand side must be copied.
2556 * cell->get_dof_indices(copy_data.local_dof_indices);
2561 * This function copies the components of a cell
matrix and a cell right-hand
2562 * side into the system
matrix, @f$A_{i,j}@f$, and the system right-hand side,
2566 *
void Solver::copy_local_to_global(
const AssemblyCopyData ©_data)
2568 * constraints.distribute_local_to_global(copy_data.cell_matrix,
2569 * copy_data.cell_rhs,
2570 * copy_data.local_dof_indices,
2577 * This function solves the system of linear equations.
2578 * If `Settings::log_cg_convergence ==
true`, the convergence data is saved
2579 * into a file. In theory, a CG solver can solve an @f$m \times m@f$ system of
2580 * linear equations in at most @f$m@f$ steps. In practice, it can take more steps
2581 * to converge. The convergence of the algorithm depends on the spectral
2583 * compact cluster away from
zero. In our
case, however, the
eigenvalues are
2584 * spread in between
zero and the maximal eigenvalue. Consequently, we expect
2585 * a poor convergence and increase the maximal number of iteration steps by a
2586 * factor of 10, i.e., `10*system_rhs.size()`. The stopping condition is \f[
2587 * |\boldsymbol{
b} - \boldsymbol{
A}\boldsymbol{c}|
2588 * < 10^{-6} |\boldsymbol{
b}|.
2590 * As soon as we use constraints, we must not forget to distribute them.
2593 *
void Solver::solve()
2596 * 1.0e-6 * system_rhs.l2_norm(),
2600 *
if (Settings::log_cg_convergence)
2601 * control.enable_history_data();
2607 * preconditioner.
initialize(system_matrix, 1.2);
2609 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
2611 * constraints.distribute(solution);
2613 *
if (Settings::log_cg_convergence)
2615 *
const std::vector<double> history_data = control.get_history_data();
2617 * std::ofstream ofs(fname +
"_cg_convergence.csv");
2619 *
for (
unsigned int i = 1; i < history_data.size(); i++)
2620 * ofs << i <<
", " << history_data[i] <<
"\n";
2626 * This function saves the computed magnetic vector potential into a
vtu file.
2629 *
void Solver::save() const
2631 * std::vector<std::string> solution_names(3,
"VectorField");
2632 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
2645 * data_out.set_flags(flags);
2649 * data_out.build_patches(
mapping,
2653 * std::ofstream ofs(fname +
".vtu");
2654 * data_out.write_vtu(ofs);
2657 *
void Solver::run()
2686 * <a name=
"step_97-ProjectorfromHcurltoHdiv"></a>
2687 * <h3>Projector from H(curl) to H(div)</h3>
2688 * This name space contains all the code related to the conversion of the
2689 * magnetic vector potential, @f$\vec{
A}@f$, into magnetic field, @f$\vec{B}@f$.
2690 * The magnetic vector potential is modeled by the
FE_Nedelec finite elements,
2691 *
while the magnetic field is modeled by the
FE_RaviartThomas finite elements.
2692 * This code is also used
for converting the current vector potential,
2693 * @f$\vec{
T}@f$ into the free-current density, @f$\vec{J}_f@f$.
2696 *
namespace ProjectorHcurlToHdiv
2701 * This
class implements the solver that minimizes the functional @f$F(\vec{B})@f$
2702 * or @f$F(\vec{J}_f)@f$, see the introduction. The input vector field,
2703 * @f$\vec{A}@f$ or @f$\vec{
T}@f$, is fed to the solver by means of the input
2704 * parameters `dof_handler_Hcurl` and `solution_Hcurl`. Moreover,
this solver
2705 * reuses the mesh on which the input vector field has been computed. The
2706 * reference to the mesh is passed via the input parameter
2707 * `triangulation_Hcurl`. There are no constraints
this time around as we are
2708 * not going to
apply the Dirichlet boundary condition.
2714 * Solver() =
delete;
2715 * Solver(
const unsigned int p,
2716 *
const unsigned int mapping_degree,
2720 *
const std::string &fname =
"data",
2723 *
double get_L2_norm()
2728 *
unsigned int get_n_cells() const
2730 *
return triangulation_Hcurl.n_active_cells();
2735 *
return dof_handler_Hdiv.n_dofs();
2741 *
void save()
const;
2742 *
void compute_error_norms();
2743 *
void project_exact_solution_fcn();
2746 * system_matrix.clear();
2747 * system_rhs.reinit(0);
2759 * The following data members are typical
for all deal.II simulations:
2760 * triangulation, finite elements, dof handlers, etc. The constraints
2761 * are used to enforce the Dirichlet boundary conditions. The names of the
2762 * data members are self-explanatory.
2780 *
const unsigned int mapping_degree;
2785 *
const std::string fname;
2790 * This time we have two dof handlers, `dof_handler_Hcurl`
for the input
2791 * vector field and `dof_handler_Hdiv`
for the output vector field. The
2792 *
WorkStream needs to walk through the two dof handlers synchronously.
2793 * For
this purpose we will pair two active cells iterators (one from
2794 * `dof_handler_Hcurl`, another from `dof_handler_Hdiv`) to be walked
2795 * through synchronously. For that we need the `IteratorPair` type.
2798 *
using IteratorTuple =
2799 * std::tuple<typename DoFHandler<3>::active_cell_iterator,
2806 * The program utilizes the
WorkStream technology. The @ref step_9
"step-9" tutorial
2807 * does a much better job of explaining the workings of WarkStream.
2808 * Reading the
"WorkStream paper", see the glossary, is recommended.
2812 *
struct AssemblyScratchData
2817 *
const unsigned int mapping_degree);
2819 * AssemblyScratchData(
const AssemblyScratchData &scratch_data);
2825 *
const unsigned int dofs_per_cell;
2826 *
const unsigned int n_q_points;
2828 * std::vector<Tensor<1, 3>> curl_vec_in_Hcurl;
2836 *
struct AssemblyCopyData
2840 * std::vector<types::global_dof_index> local_dof_indices;
2843 *
void system_matrix_local(
const IteratorPair &IP,
2844 * AssemblyScratchData &scratch_data,
2845 * AssemblyCopyData ©_data);
2847 *
void copy_local_to_global(
const AssemblyCopyData ©_data);
2850 * Solver::Solver(
const unsigned int p,
2851 *
const unsigned int mapping_degree,
2855 *
const std::string &fname,
2857 * : triangulation_Hcurl(triangulation_Hcurl)
2858 * , dof_handler_Hcurl(dof_handler_Hcurl)
2859 * , solution_Hcurl(solution_Hcurl)
2861 * , exact_solution(exact_solution)
2862 * , mapping_degree(mapping_degree)
2864 * , timer(std::cout,
2869 *
Assert(exact_solution !=
nullptr,
2870 * ExcMessage(
"The exact solution is missing."));
2875 * This function initializes the dofs, vectors and matrices. This time there
2876 * are no constraints as we
do not
apply Dirichlet boundary condition.
2879 *
void Solver::setup()
2881 * constraints.close();
2883 * dof_handler_Hdiv.reinit(triangulation_Hcurl);
2884 * dof_handler_Hdiv.distribute_dofs(fe_Hdiv);
2887 * dof_handler_Hdiv.n_dofs());
2890 * sparsity_pattern.copy_from(dsp);
2891 * system_matrix.reinit(sparsity_pattern);
2892 * solution_Hdiv.reinit(dof_handler_Hdiv.n_dofs());
2893 * system_rhs.reinit(dof_handler_Hdiv.n_dofs());
2895 *
if (Settings::project_exact_solution && exact_solution)
2896 * projected_exact_solution.reinit(dof_handler_Hdiv.n_dofs());
2898 *
if (exact_solution)
2899 * L2_per_cell.reinit(triangulation_Hcurl.n_active_cells());
2904 * Formally,
this function assembles the system of linear equations. In
2905 * reality, however, it just spells all the magic words to get the
WorkStream
2906 * going. The interesting part, i.e., the actual assembling of the system
2907 *
matrix and the right-hand side happens below in the
2908 * Solver::system_matrix_local function.
2911 *
void Solver::assemble()
2914 * IteratorTuple(dof_handler_Hdiv.begin_active(),
2915 * dof_handler_Hcurl.begin_active())),
2916 * IteratorPair(IteratorTuple(dof_handler_Hdiv.end(),
2917 * dof_handler_Hcurl.end())),
2919 * &Solver::system_matrix_local,
2920 * &Solver::copy_local_to_global,
2921 * AssemblyScratchData(fe_Hdiv,
2922 * dof_handler_Hcurl,
2925 * AssemblyCopyData());
2930 * The following two constructors initialize scratch data from the input
2931 * parameters and from another
object of the same type, i.e., a
copy
2935 * Solver::AssemblyScratchData::AssemblyScratchData(
2939 *
const unsigned int mapping_degree)
2946 * dof_hand_Hcurl.get_fe(),
2949 * , dofs_per_cell(fe_values_Hdiv.dofs_per_cell)
2950 * , n_q_points(fe_values_Hdiv.get_quadrature().size())
2951 * , curl_vec_in_Hcurl(n_q_points)
2953 * , dof_hand_Hcurl(dof_hand_Hcurl)
2954 * , dofs_Hcurl(dofs_Hcurl)
2957 * Solver::AssemblyScratchData::AssemblyScratchData(
2958 *
const AssemblyScratchData &scratch_data)
2959 * :
mapping(scratch_data.mapping.get_degree())
2961 * scratch_data.fe_values_Hdiv.get_fe(),
2962 * scratch_data.fe_values_Hdiv.get_quadrature(),
2965 * scratch_data.fe_values_Hcurl.get_fe(),
2966 * scratch_data.fe_values_Hcurl.get_quadrature(),
2968 * , dofs_per_cell(fe_values_Hdiv.dofs_per_cell)
2969 * , n_q_points(fe_values_Hdiv.get_quadrature().size())
2970 * , curl_vec_in_Hcurl(scratch_data.n_q_points)
2972 * , dof_hand_Hcurl(scratch_data.dof_hand_Hcurl)
2973 * , dofs_Hcurl(scratch_data.dofs_Hcurl)
2978 * This function assembles a fraction of the system
matrix and the system
2979 * right-hand side related to a single cell. These fractions are
2980 * `copy_data.cell_matrix` and `copy_data.cell_rhs`. They are copied into
2981 * to the system
matrix, @f$A_{ij}@f$, and the right-hand side, @f$b_i@f$, by the
2982 * function `Solver::copy_local_to_global()`.
2985 *
void Solver::system_matrix_local(
const IteratorPair &IP,
2986 * AssemblyScratchData &scratch_data,
2987 * AssemblyCopyData ©_data)
2991 * First we reinitialize the matrices and vectors related to the current
2992 * cell, update the FE values, and compute the curl of the input vector
2996 * copy_data.cell_matrix.reinit(scratch_data.dofs_per_cell,
2997 * scratch_data.dofs_per_cell);
2999 * copy_data.cell_rhs.reinit(scratch_data.dofs_per_cell);
3001 * copy_data.local_dof_indices.resize(scratch_data.dofs_per_cell);
3003 * scratch_data.fe_values_Hdiv.reinit(std::get<0>(*IP));
3004 * scratch_data.fe_values_Hcurl.reinit(std::get<1>(*IP));
3008 * The variable `curl_vec_in_Hcurl` denotes the curl of the input vector
3009 * field, @f$\vec{\nabla} \times \vec{
T}@f$ or @f$\vec{\nabla} \times \vec{
A}@f$,
3010 * depending on the context.
3013 * scratch_data.fe_values_Hcurl[scratch_data.ve].get_function_curls(
3014 * scratch_data.dofs_Hcurl, scratch_data.curl_vec_in_Hcurl);
3018 * Second, we compute the components of the cell
matrix and cell right-hand
3019 * side. The labels of the integrals are the same as in the introduction to
3023 *
for (
unsigned int q_index = 0; q_index < scratch_data.n_q_points; ++q_index)
3025 *
for (
unsigned int i = 0; i < scratch_data.dofs_per_cell; ++i)
3027 *
for (
unsigned int j = 0; j < scratch_data.dofs_per_cell; ++j)
3029 * copy_data.cell_matrix(i, j) +=
3030 * scratch_data.fe_values_Hdiv[scratch_data.ve].value(i,
3032 * scratch_data.fe_values_Hdiv[scratch_data.ve].value(j,
3034 * scratch_data.fe_values_Hdiv.JxW(q_index);
3037 * copy_data.cell_rhs(i) +=
3038 * scratch_data.curl_vec_in_Hcurl[q_index] *
3039 * scratch_data.fe_values_Hdiv[scratch_data.ve].value(i, q_index) *
3040 * scratch_data.fe_values_Hdiv.JxW(q_index);
3046 * Finally, we query the dof indices on the current cell and store them
3047 * in the
copy data structure, so we know to which locations of the system
3048 *
matrix and right-hand side the components of the cell
matrix and
3049 * cell right-hand side must be copied.
3052 * std::get<0>(*IP)->get_dof_indices(copy_data.local_dof_indices);
3057 * This function copies the components of a cell
matrix and a cell right-hand
3058 * side into the system
matrix, @f$A_{i,j}@f$, and the system right-hand side,
3062 *
void Solver::copy_local_to_global(
const AssemblyCopyData ©_data)
3064 * constraints.distribute_local_to_global(copy_data.cell_matrix,
3065 * copy_data.cell_rhs,
3066 * copy_data.local_dof_indices,
3073 * The following two
functions compute the error norms and
project the exact
3077 *
void Solver::compute_error_norms()
3079 *
const Weight weight;
3096 *
void Solver::project_exact_solution_fcn()
3099 * constraints_empty.
close();
3103 * constraints_empty,
3106 * projected_exact_solution);
3111 * This function solves the system of linear equations. This time we are
3112 * dealing with a mass
matrix. It has good spectral properties. Consequently,
3113 * we
do not use the factor of 10 as in preceding two solvers.
3114 * The stopping condition is
3116 * |\boldsymbol{
b} - \boldsymbol{
A}\boldsymbol{c}|
3117 * < 10^{-6} |\boldsymbol{
b}|.
3121 *
void Solver::solve()
3124 * 1.0e-6 * system_rhs.l2_norm(),
3128 *
if (Settings::log_cg_convergence)
3129 * control.enable_history_data();
3135 * preconditioner.
initialize(system_matrix, 1.2);
3137 * cg.solve(system_matrix, solution_Hdiv, system_rhs, preconditioner);
3139 *
if (Settings::log_cg_convergence)
3141 *
const std::vector<double> history_data = control.get_history_data();
3143 * std::ofstream ofs(fname +
"_cg_convergence.csv");
3145 *
for (
unsigned int i = 1; i < history_data.size(); i++)
3146 * ofs << i <<
", " << history_data[i] <<
"\n";
3152 * This function saves the computed fields into a
vtu file. This time we also
3153 * save the projected exact solution and the @f$L^2@f$ error
norm. The exact
3154 * solution is only saved
if the `Settings::project_exact_solution =
true`
3157 *
void Solver::save() const
3159 * std::vector<std::string> solution_names(3,
"VectorField");
3160 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
3171 *
if (Settings::project_exact_solution)
3173 * std::vector<std::string> solution_names_ex(3,
"VectorFieldExact");
3175 * data_out.add_data_vector(dof_handler_Hdiv,
3176 * projected_exact_solution,
3177 * solution_names_ex,
3181 *
if (exact_solution)
3183 * data_out.add_data_vector(L2_per_cell,
"L2norm");
3188 * data_out.set_flags(flags);
3192 * data_out.build_patches(
mapping,
3193 * fe_Hdiv.degree + 2,
3196 * std::ofstream ofs(fname +
".vtu");
3197 * data_out.write_vtu(ofs);
3200 *
void Solver::run()
3215 *
if (exact_solution)
3219 * compute_error_norms();
3222 *
if (Settings::project_exact_solution)
3226 * project_exact_solution_fcn();
3245 * <a name=
"step_97-Themainloop"></a>
3246 * <h3>The main
loop</h3>
3250 * This
class contains the main
loop of the program.
3253 *
class MagneticProblem
3258 *
if (Settings::n_threads_max)
3261 * MainOutputTable table_Jf(3);
3262 * MainOutputTable table_B(3);
3267 * std::cout <<
"Solving for (p = " << Settings::fe_degree
3268 * <<
"): " << std::flush;
3270 *
for (
unsigned int r = 6; r < 10; r++)
3272 * table_Jf.add_value(
"r", r);
3273 * table_Jf.add_value(
"p", Settings::fe_degree);
3275 * table_B.add_value(
"r", r);
3276 * table_B.add_value(
"p", Settings::fe_degree);
3280 * Stage 1. Computing @f$\vec{
T}@f$.
3286 * std::cout <<
"T " << std::flush;
3288 * SolverT::Solver
T(Settings::fe_degree,
3290 * Settings::mapping_degree,
3291 * Settings::eta_squared_T,
3292 *
"T_p" + std::to_string(Settings::fe_degree) +
"_r" +
3293 * std::to_string(r));
3299 * Stage 2. Computing @f$\vec{J}_f@f$.
3305 * std::cout <<
"Jf " << std::flush;
3307 * ExactSolutions::FreeCurrentDensity Jf_exact;
3309 * ProjectorHcurlToHdiv::Solver Jf(Settings::fe_degree,
3310 * Settings::mapping_degree,
3312 *
T.get_dof_handler(),
3315 * std::to_string(Settings::fe_degree) +
3316 *
"_r" + std::to_string(r),
3321 * table_Jf.add_value(
"ndofs", Jf.get_n_dofs());
3322 * table_Jf.add_value(
"ncells", Jf.get_n_cells());
3323 * table_Jf.add_value(
"L2", Jf.get_L2_norm());
3327 * Stage 3. Computing @f$\vec{
A}@f$.
3333 * std::cout <<
"A " << std::flush;
3335 * SolverA::Solver
A(Settings::fe_degree,
3336 * Settings::mapping_degree,
3338 *
T.get_dof_handler(),
3340 * Settings::eta_squared_A,
3341 *
"A_p" + std::to_string(Settings::fe_degree) +
"_r" +
3342 * std::to_string(r));
3348 * Stage 4. Computing @f$\vec{B}@f$.
3354 * std::cout <<
"B " << std::flush;
3356 * ExactSolutions::MagneticField B_exact;
3358 * ProjectorHcurlToHdiv::Solver B(Settings::fe_degree,
3359 * Settings::mapping_degree,
3361 *
A.get_dof_handler(),
3364 * std::to_string(Settings::fe_degree) +
3365 *
"_r" + std::to_string(r),
3369 * table_B.add_value(
"ndofs", B.get_n_dofs());
3370 * table_B.add_value(
"ncells", B.get_n_cells());
3371 * table_B.add_value(
"L2", B.get_L2_norm());
3380 * table_Jf.save(
"table_Jf_p" + std::to_string(Settings::fe_degree));
3381 * table_B.save(
"table_B_p" + std::to_string(Settings::fe_degree));
3383 * std::cout << std::endl;
3391 * MagneticProblem problem;
3394 *
catch (std::exception &exc)
3396 * std::cerr << std::endl
3398 * <<
"----------------------------------------------------"
3400 * std::cerr <<
"Exception on processing: " << std::endl
3401 * << exc.what() << std::endl
3402 * <<
"Aborting!" << std::endl
3403 * <<
"----------------------------------------------------"
3410 * std::cerr << std::endl
3412 * <<
"----------------------------------------------------"
3414 * std::cerr <<
"Unknown exception!" << std::endl
3415 * <<
"Aborting!" << std::endl
3416 * <<
"----------------------------------------------------"
3424<a name=
"step_97-Results"></a><h1>Results</h1>
3427The program generates the following output in the command line
interface by
3431Solving for (p = 0):
T Jf
A B
T Jf
A B
T Jf
A B
T Jf
A B
3434The program assumes the finite elements of the lowermost degree, @f$p = 0@f$.
3435To change the degree of the finite elements, say @f$p = 2@f$,
one needs to change
3436the setting `Settings::fe_degree = 2` and rebuild the program.
3438The program also dumps a number of files in the current directory. In the default
3439configuration these files are:
3440-
vtu files. They contain the computed vector fields. Recall that the spherical
3441 manifold is attached to many cell faces. Consequently, these cell faces are
3442 curved. They look more like patches of a sphere. Furthermore, the shape
3443 functions are mapped from the reference cell to the real mesh cells by the
3444 second-order
mapping to accommodate the cells with curved faces. For these
3445 reasons,
one needs to use a visualization software that can deal with curved
3446 faces and the higher-order
mapping.
A fresh version of ParaView is recommended.
3447 Visit will not do. The <a href=
"https://github.com/dealii/dealii/wiki/Notes-on-visualizing-high-order-output">
3448 Notes on visualizing high order output</a> provide more information on this topic.
3449- tex files. These files contain the convergence tables.
3451The following provides examples of the convergence tables simulated with the
3452default settings for three different degrees of the finite elements,
3455| p | r | cells | dofs |@f$\|
e\|_{
L^2}@f$|@f$\alpha_{
L^2}@f$|
3456|-- |---|-------|---------|----------|------|
3457| 0 | 6 | 4625 | 13950 | 1.66e-01 | - |
3458| 0 | 7 | 7992 | 24084 | 1.38e-01 | 0.99 |
3459| 0 | 8 | 12691 | 38220 | 1.19e-01 | 0.99 |
3460| 0 | 9 | 18944 | 57024 | 1.04e-01 | 0.99 |
3461| 1 | 6 | 4625 | 111300 | 8.12e-04 | - |
3462| 1 | 7 | 7992 | 192240 | 4.97e-04 | 2.69 |
3463| 1 | 8 | 12691 | 305172 | 3.32e-04 | 2.61 |
3464| 1 | 9 | 18944 | 455424 | 2.37e-04 | 2.54 |
3465| 2 | 6 | 4625 | 375300 | 6.78e-04 | - |
3466| 2 | 7 | 7992 | 648324 | 3.94e-04 | 2.97 |
3467| 2 | 8 | 12691 | 1029294 | 2.49e-04 | 2.98 |
3468| 2 | 9 | 18944 | 1536192 | 1.67e-04 | 2.99 |
3470**
Table 1. Convergence table. Free-current density, @f$\vec{J}_f@f$.
3472| p | r | cells | dofs |@f$\|
e\|_{
L^2}@f$|@f$\alpha_{
L^2}@f$|
3473|-- |---|-------|---------|----------|------|
3474| 0 | 6 | 4625 | 13950 | 8.84e-08 | - |
3475| 0 | 7 | 7992 | 24084 | 7.36e-08 | 1.00 |
3476| 0 | 8 | 12691 | 38220 | 6.30e-08 | 1.01 |
3477| 0 | 9 | 18944 | 57024 | 5.51e-08 | 1.00 |
3478| 1 | 6 | 4625 | 111300 | 4.41e-09 | - |
3479| 1 | 7 | 7992 | 192240 | 3.11e-09 | 1.91 |
3480| 1 | 8 | 12691 | 305172 | 2.23e-09 | 2.18 |
3481| 1 | 9 | 18944 | 455424 | 1.71e-09 | 1.96 |
3482| 2 | 6 | 4625 | 375300 | 1.84e-10 | - |
3483| 2 | 7 | 7992 | 648324 | 1.03e-10 | 3.21 |
3484| 2 | 8 | 12691 | 1029294 | 6.08e-11 | 3.40 |
3485| 2 | 9 | 18944 | 1536192 | 4.04e-11 | 3.07 |
3487**
Table 2. Convergence table. Magnetic field, @f$\vec{B}@f$.
3489The following notations were used in the headers of the tables:
3491- p - the degree of the finite elements.
3493- r - the mesh refinement parameter, i.
e., the number of nodes on the transfinite
3496- cells - the total amount of active cells.
3498- dofs - the amount of degrees of freedom.
3500-@f$\|
e\|_{
L^2}@f$ - the @f$L^2@f$ error
norm.
3502-@f$\alpha_{
L^2}@f$ - the order of convergence of the @f$L^2@f$ error
norm.
3504If `Settings::log_cg_convergence = true`, the program saves the convergence data
3505of the CG solver into csv files.
3507The vector representations of the calculated vector fields, @f$\vec{J}_f@f$ and
3508@f$\vec{B}@f$, are illustrated above by the first figure on this page. The figures
3509below illustrate slices of the magnitudes of these fields. The figures below
3510were simulated with @f$p = 2@f$ and @f$r = 9@f$. Visual inspection of the vector
3511potentials is not very informative as their conservative portions are unknown.
3515 <img src=
"https://www.dealii.org/images/steps/developer/step-97-Jf.svg" alt=
"The result - free-current
3516 density" height=
"531">
3522 <img src=
"https://www.dealii.org/images/steps/developer/step-97-B.svg" alt=
"The result - magnetic field
3523 density" height=
"531">
3527<a name=
"step_97-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
3530Repeat the simulations for the three
types of the boundary conditions,
3531Dirichlet, Neumann, and Robin. The Robin boundary condition is supposed to be
3532superior to the other two. Look at the simulated data to see that this is indeed
3533the case. You can save the projected exact solution next to the simulated
3534solutions into the
vtu files, just set `Settings::project_exact_solution = true`.
3535ParaView has
"Plot Over Line" filter. You can use this filter to visualize the
3536difference between the exact solution and a solution simulated with a particular
3537boundary condition. You can also draw conclusions by observing the convergence
3538tables. Keep in mind the @f$\eta^2@f$ parameter. Increase it if the CG solver chokes
3539while you are experimenting. Note that the benefits offered by the Robin
3540boundary condition are observed the best when higher-order finite elements are
3541used, i.
e., @f$p = 1@f$ and @f$p = 2@f$.
3543The Robin boundary condition as described above is also called the first-order
3544asymptotic boundary condition (ABC). There exist ABCs of higher orders
3545@cite gratkowski2010p. Implement and test the second-order ABC to see if it
3546performs any better. There exist improvised asymptotic boundary conditions, IABCs,
3547@cite meeker2013a. Try to implement the first order IABC.
3550<a name=
"step_97-PlainProg"></a>
3551<h1> The plain program</h1>
3552@include
"step-97.cc"
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
void attach_triangulation(Triangulation< dim, spacedim > &tria)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void initialize(const MatrixType &A, const AdditionalData ¶meters=AdditionalData())
@ cpu_and_wall_times_grouped
#define Assert(cond, exc)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void project_boundary_values_curl_conforming_l2(const DoFHandler< dim, dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, number > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< number > &constraints, const Mapping< dim > &mapping)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
MappingQ< dim, spacedim > StaticMappingQ1< dim, spacedim >::mapping
@ component_is_part_of_vector
@ matrix
Contents is actually a matrix.
constexpr types::blas_int zero
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
long double gamma(const unsigned int n)
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
bool write_higher_order_cells
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
constexpr Tensor< 1, dim, typename ProductType< Number1, Number2 >::type > cross_product_3d(const Tensor< 1, dim, Number1 > &src1, const Tensor< 1, dim, Number2 > &src2)