diff --git a/docs/src/config/solver.md b/docs/src/config/solver.md index bff4de059..6a6cb7159 100644 --- a/docs/src/config/solver.md +++ b/docs/src/config/solver.md @@ -170,7 +170,6 @@ solver. This option is relevant only for `"Type": "FEAST"`. "MaxFreq": , "FreqStep": , "SaveStep": , - "SaveOnlyPorts": , "Restart": , "AdaptiveTol": , "AdaptiveMaxSamples": , @@ -191,11 +190,6 @@ fields to disk for visualization with [ParaView](https://www.paraview.org/). Fil saved in the `paraview/` directory under the directory specified by [`config["Problem"]["Output"]`](problem.md#config%5B%22Problem%22%5D). -`"SaveOnlyPorts" [false]` : If set to `true`, postprocessing is only performed for port -boundaries and skipped for quantities depending on, for example, field integrals over all -or part of the interior of the computational domain. This can be useful in speeding up -simulations if only port boundary quantities are required. - `"Restart" [1]` : Iteration (1-based) from which to restart for a partial frequency sweep simulation. That is, the initial frequency will be computed as `"MinFreq" + ("Restart" - 1) * "FreqStep"`. @@ -226,8 +220,7 @@ error tolerance. "ExcitationWidth": , "MaxTime": , "TimeStep": , - "SaveStep": , - "SaveOnlyPorts": + "SaveStep": } ``` @@ -279,11 +272,6 @@ disk for visualization with [ParaView](https://www.paraview.org/). Files are sav `paraview/` directory under the directory specified by [`config["Problem"]["Output"]`](problem.md#config%5B%22Problem%22%5D). -`"SaveOnlyPorts" [false]` : If set to `true`, postprocessing is only performed for port -boundaries and skipped for quantities depending on, for example, field integrals over all -or part of the interior of the computational domain. This can be useful in speeding up -simulations if only port boundary quantities are required. - ## `solver["Electrostatic"]` ```json diff --git a/palace/drivers/basesolver.cpp b/palace/drivers/basesolver.cpp index 6b937c483..312628bbc 100644 --- a/palace/drivers/basesolver.cpp +++ b/palace/drivers/basesolver.cpp @@ -736,8 +736,7 @@ void BaseSolver::PostprocessProbes(const PostOperator &postop, const std::string #endif } -void BaseSolver::PostprocessFields(const PostOperator &postop, int step, double time, - const ErrorIndicator *indicator) const +void BaseSolver::PostprocessFields(const PostOperator &postop, int step, double time) const { // Save the computed fields in parallel in format for viewing with ParaView. BlockTimer bt(Timer::IO); @@ -748,12 +747,13 @@ void BaseSolver::PostprocessFields(const PostOperator &postop, int step, double "fields to disk!\n"); return; } - postop.WriteFields(step, time, indicator); + postop.WriteFields(step, time); Mpi::Barrier(postop.GetComm()); } void BaseSolver::PostprocessErrorIndicator(const PostOperator &postop, - const ErrorIndicator &indicator) const + const ErrorIndicator &indicator, + bool fields) const { // Write the indicator statistics. if (post_dir.length() == 0) @@ -780,6 +780,12 @@ void BaseSolver::PostprocessErrorIndicator(const PostOperator &postop, data[3], table.w, table.p); // clang-format on } + if (fields) + { + BlockTimer bt(Timer::IO); + postop.WriteFieldsFinal(&indicator); + Mpi::Barrier(postop.GetComm()); + } } template void BaseSolver::SaveMetadata(const KspSolver &) const; diff --git a/palace/drivers/basesolver.hpp b/palace/drivers/basesolver.hpp index bfab97850..c504f05ff 100644 --- a/palace/drivers/basesolver.hpp +++ b/palace/drivers/basesolver.hpp @@ -67,12 +67,11 @@ class BaseSolver double time) const; // Common field visualization postprocessing for all simulation types. - void PostprocessFields(const PostOperator &postop, int step, double time, - const ErrorIndicator *indicator = nullptr) const; + void PostprocessFields(const PostOperator &postop, int step, double time) const; // Common error indicator postprocessing for all simulation types. void PostprocessErrorIndicator(const PostOperator &postop, - const ErrorIndicator &indicator) const; + const ErrorIndicator &indicator, bool fields) const; // Performs a solve using the mesh sequence, then reports error indicators and the number // of global true dofs. diff --git a/palace/drivers/drivensolver.cpp b/palace/drivers/drivensolver.cpp index b6a9f8b3e..dc55967eb 100644 --- a/palace/drivers/drivensolver.cpp +++ b/palace/drivers/drivensolver.cpp @@ -175,21 +175,19 @@ ErrorIndicator DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator & // Compute B = -1/(iω) ∇ x E on the true dofs, and set the internal GridFunctions in // PostOperator for all postprocessing operations. BlockTimer bt0(Timer::POSTPRO); - double E_elec = 0.0, E_mag = 0.0; Curl.Mult(E.Real(), B.Real()); Curl.Mult(E.Imag(), B.Imag()); B *= -1.0 / (1i * omega); postop.SetEGridFunction(E); postop.SetBGridFunction(B); postop.UpdatePorts(spaceop.GetLumpedPortOp(), spaceop.GetWavePortOp(), omega); + double E_elec = postop.GetEFieldEnergy(); + double E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Sol. ||E|| = {:.6e} (||RHS|| = {:.6e})\n", linalg::Norml2(spaceop.GetComm(), E), linalg::Norml2(spaceop.GetComm(), RHS)); - if (!iodata.solver.driven.only_port_post) { const double J = iodata.DimensionalizeValue(IoData::ValueType::ENERGY, 1.0); - E_elec = postop.GetEFieldEnergy(); - E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Field energy E ({:.3e} J) + H ({:.3e} J) = {:.3e} J\n", E_elec * J, E_mag * J, (E_elec + E_mag) * J); } @@ -201,7 +199,6 @@ ErrorIndicator DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator & // Postprocess S-parameters and optionally write solution to disk. Postprocess(postop, spaceop.GetLumpedPortOp(), spaceop.GetWavePortOp(), spaceop.GetSurfaceCurrentOp(), step, omega, E_elec, E_mag, - !iodata.solver.driven.only_port_post, (step == nstep - 1) ? &indicator : nullptr); // Increment frequency. @@ -261,12 +258,16 @@ ErrorIndicator DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator // range of interest. Each call for an HDM solution adds the frequency sample to P_S and // removes it from P \ P_S. Timing for the HDM construction and solve is handled inside // of the RomOperator. + auto UpdatePROM = [&](double omega) + { + // Add the HDM solution to the PROM reduced basis. + promop.UpdatePROM(omega, E); + estimator.AddErrorIndicator(E, indicator); + }; promop.SolveHDM(omega0, E); - promop.UpdatePROM(omega0, E); - estimator.AddErrorIndicator(E, indicator); + UpdatePROM(omega0); promop.SolveHDM(omega0 + (nstep - step0 - 1) * delta_omega, E); - promop.UpdatePROM(omega0 + (nstep - step0 - 1) * delta_omega, E); - estimator.AddErrorIndicator(E, indicator); + UpdatePROM(omega0 + (nstep - step0 - 1) * delta_omega); // Greedy procedure for basis construction (offline phase). Basis is initialized with // solutions at frequency sweep endpoints. @@ -308,8 +309,7 @@ ErrorIndicator DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator (memory == 0) ? "" : fmt::format(", memory = {:d}/{:d}", memory, convergence_memory)); - promop.UpdatePROM(omega_star, E); - estimator.AddErrorIndicator(E, indicator); + UpdatePROM(omega_star); it++; } Mpi::Print("\nAdaptive sampling{} {:d} frequency samples:\n" @@ -343,19 +343,17 @@ ErrorIndicator DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator // Compute B = -1/(iω) ∇ x E on the true dofs, and set the internal GridFunctions in // PostOperator for all postprocessing operations. BlockTimer bt0(Timer::POSTPRO); - double E_elec = 0.0, E_mag = 0.0; Curl.Mult(E.Real(), B.Real()); Curl.Mult(E.Imag(), B.Imag()); B *= -1.0 / (1i * omega); postop.SetEGridFunction(E); postop.SetBGridFunction(B); postop.UpdatePorts(spaceop.GetLumpedPortOp(), spaceop.GetWavePortOp(), omega); + double E_elec = postop.GetEFieldEnergy(); + double E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Sol. ||E|| = {:.6e}\n", linalg::Norml2(spaceop.GetComm(), E)); - if (!iodata.solver.driven.only_port_post) { const double J = iodata.DimensionalizeValue(IoData::ValueType::ENERGY, 1.0); - E_elec = postop.GetEFieldEnergy(); - E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Field energy E ({:.3e} J) + H ({:.3e} J) = {:.3e} J\n", E_elec * J, E_mag * J, (E_elec + E_mag) * J); } @@ -363,7 +361,6 @@ ErrorIndicator DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator // Postprocess S-parameters and optionally write solution to disk. Postprocess(postop, spaceop.GetLumpedPortOp(), spaceop.GetWavePortOp(), spaceop.GetSurfaceCurrentOp(), step, omega, E_elec, E_mag, - !iodata.solver.driven.only_port_post, (step == nstep - 1) ? &indicator : nullptr); // Increment frequency. @@ -394,36 +391,33 @@ void DrivenSolver::Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, const WavePortOperator &wave_port_op, const SurfaceCurrentOperator &surf_j_op, int step, - double omega, double E_elec, double E_mag, bool full, + double omega, double E_elec, double E_mag, const ErrorIndicator *indicator) const { // The internal GridFunctions for PostOperator have already been set from the E and B // solutions in the main frequency sweep loop. - double freq = iodata.DimensionalizeValue(IoData::ValueType::FREQUENCY, omega); + const double freq = iodata.DimensionalizeValue(IoData::ValueType::FREQUENCY, omega); + double E_cap = postop.GetLumpedCapacitorEnergy(lumped_port_op); + double E_ind = postop.GetLumpedInductorEnergy(lumped_port_op); PostprocessCurrents(postop, surf_j_op, step, omega); PostprocessPorts(postop, lumped_port_op, step, omega); if (surf_j_op.Size() == 0) { PostprocessSParameters(postop, lumped_port_op, wave_port_op, step, omega); } - if (full) - { - double E_cap = postop.GetLumpedCapacitorEnergy(lumped_port_op); - double E_ind = postop.GetLumpedInductorEnergy(lumped_port_op); - PostprocessDomains(postop, "f (GHz)", step, freq, E_elec, E_mag, E_cap, E_ind); - PostprocessSurfaces(postop, "f (GHz)", step, freq, E_elec + E_cap, E_mag + E_ind, 1.0, - 1.0); - PostprocessProbes(postop, "f (GHz)", step, freq); - } + PostprocessDomains(postop, "f (GHz)", step, freq, E_elec, E_mag, E_cap, E_ind); + PostprocessSurfaces(postop, "f (GHz)", step, freq, E_elec + E_cap, E_mag + E_ind, 1.0, + 1.0); + PostprocessProbes(postop, "f (GHz)", step, freq); if (iodata.solver.driven.delta_post > 0 && step % iodata.solver.driven.delta_post == 0) { Mpi::Print("\n"); - PostprocessFields(postop, step / iodata.solver.driven.delta_post, freq, indicator); + PostprocessFields(postop, step / iodata.solver.driven.delta_post, freq); Mpi::Print(" Wrote fields to disk at step {:d}\n", step + 1); } if (indicator) { - PostprocessErrorIndicator(postop, *indicator); + PostprocessErrorIndicator(postop, *indicator, iodata.solver.driven.delta_post > 0); } } diff --git a/palace/drivers/drivensolver.hpp b/palace/drivers/drivensolver.hpp index fbebbf44f..6178f1b2d 100644 --- a/palace/drivers/drivensolver.hpp +++ b/palace/drivers/drivensolver.hpp @@ -12,13 +12,11 @@ namespace palace { class ErrorIndicator; -class IoData; class LumpedPortOperator; class Mesh; class PostOperator; class SpaceOperator; class SurfaceCurrentOperator; -class Timer; class WavePortOperator; // @@ -38,8 +36,7 @@ class DrivenSolver : public BaseSolver void Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, const WavePortOperator &wave_port_op, const SurfaceCurrentOperator &surf_j_op, int step, double omega, - double E_elec, double E_mag, bool full, - const ErrorIndicator *indicator) const; + double E_elec, double E_mag, const ErrorIndicator *indicator) const; void PostprocessCurrents(const PostOperator &postop, const SurfaceCurrentOperator &surf_j_op, int step, diff --git a/palace/drivers/eigensolver.cpp b/palace/drivers/eigensolver.cpp index 484de94b1..aa3f10119 100644 --- a/palace/drivers/eigensolver.cpp +++ b/palace/drivers/eigensolver.cpp @@ -206,8 +206,11 @@ EigenSolver::Solve(const std::vector> &mesh) const // Configure the shift-and-invert strategy is employed to solve for the eigenvalues // closest to the specified target, σ. const double target = iodata.solver.eigenmode.target; - const double f_target = iodata.DimensionalizeValue(IoData::ValueType::FREQUENCY, target); - Mpi::Print(" Shift-and-invert σ = {:.3e} GHz ({:.3e})\n", f_target, target); + { + const double f_target = + iodata.DimensionalizeValue(IoData::ValueType::FREQUENCY, target); + Mpi::Print(" Shift-and-invert σ = {:.3e} GHz ({:.3e})\n", f_target, target); + } if (C) { // Search for eigenvalues closest to λ = iσ. @@ -270,8 +273,8 @@ EigenSolver::Solve(const std::vector> &mesh) const BlockTimer bt2(Timer::POSTPRO); SaveMetadata(*ksp); - // Calculate and record the error indicators. - Mpi::Print("\nComputing solution error estimates\n"); + // Calculate and record the error indicators, and postprocess the results. + Mpi::Print("\nComputing solution error estimates and performing postprocessing\n"); CurlFluxErrorEstimator estimator( spaceop.GetMaterialOp(), spaceop.GetNDSpaces(), iodata.solver.linear.estimator_tol, iodata.solver.linear.estimator_max_it, 0, iodata.solver.linear.estimator_mg); @@ -284,13 +287,6 @@ EigenSolver::Solve(const std::vector> &mesh) const eigen->SetBMat(*KM); eigen->RescaleEigenvectors(num_conv); } - for (int i = 0; i < iodata.solver.eigenmode.n; i++) - { - eigen->GetEigenvector(i, E); - estimator.AddErrorIndicator(E, indicator); - } - - // Postprocess the results. Mpi::Print("\n"); for (int i = 0; i < num_conv; i++) { @@ -318,10 +314,19 @@ EigenSolver::Solve(const std::vector> &mesh) const postop.SetEGridFunction(E); postop.SetBGridFunction(B); postop.UpdatePorts(spaceop.GetLumpedPortOp(), omega.real()); + double E_elec = postop.GetEFieldEnergy(); + double E_mag = postop.GetHFieldEnergy(); + + // Calculate and record the error indicators. + if (i < iodata.solver.eigenmode.n) + { + estimator.AddErrorIndicator(E, indicator); + } // Postprocess the mode. Postprocess(postop, spaceop.GetLumpedPortOp(), i, omega, error_bkwd, error_abs, - num_conv, (i == 0) ? &indicator : nullptr); + num_conv, E_elec, E_mag, + (i == iodata.solver.eigenmode.n - 1) ? &indicator : nullptr); } return {indicator, spaceop.GlobalTrueVSize()}; } @@ -329,16 +334,11 @@ EigenSolver::Solve(const std::vector> &mesh) const void EigenSolver::Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, int i, std::complex omega, double error_bkwd, - double error_abs, int num_conv, + double error_abs, int num_conv, double E_elec, double E_mag, const ErrorIndicator *indicator) const { // The internal GridFunctions for PostOperator have already been set from the E and B - // solutions in the main loop over converged eigenvalues. Note: The energies output are - // nondimensional (they can be dimensionalized using the scaling μ₀ * H₀² * L₀³, which - // are the free space permeability, characteristic magnetic field strength, and - // characteristic length scale, respectively). - double E_elec = postop.GetEFieldEnergy(); - double E_mag = postop.GetHFieldEnergy(); + // solutions in the main loop over converged eigenvalues. double E_cap = postop.GetLumpedCapacitorEnergy(lumped_port_op); double E_ind = postop.GetLumpedInductorEnergy(lumped_port_op); PostprocessEigen(i, omega, error_bkwd, error_abs, num_conv); @@ -349,12 +349,12 @@ void EigenSolver::Postprocess(const PostOperator &postop, PostprocessProbes(postop, "m", i, i + 1); if (i < iodata.solver.eigenmode.n_post) { - PostprocessFields(postop, i, i + 1, indicator); + PostprocessFields(postop, i, i + 1); Mpi::Print(" Wrote mode {:d} to disk\n", i + 1); } if (indicator) { - PostprocessErrorIndicator(postop, *indicator); + PostprocessErrorIndicator(postop, *indicator, iodata.solver.eigenmode.n_post > 0); } } diff --git a/palace/drivers/eigensolver.hpp b/palace/drivers/eigensolver.hpp index 291199e16..63ee3d2eb 100644 --- a/palace/drivers/eigensolver.hpp +++ b/palace/drivers/eigensolver.hpp @@ -13,11 +13,9 @@ namespace palace { class ErrorIndicator; -class IoData; class LumpedPortOperator; class Mesh; class PostOperator; -class Timer; // // Driver class for eigenmode simulations. @@ -27,7 +25,8 @@ class EigenSolver : public BaseSolver private: void Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, int i, std::complex omega, double error_bkwd, double error_abs, - int num_conv, const ErrorIndicator *indicator) const; + int num_conv, double E_elec, double E_mag, + const ErrorIndicator *indicator) const; void PostprocessEigen(int i, std::complex omega, double error_bkwd, double error_abs, int num_conv) const; diff --git a/palace/drivers/electrostaticsolver.cpp b/palace/drivers/electrostaticsolver.cpp index f69584d7f..935aad093 100644 --- a/palace/drivers/electrostaticsolver.cpp +++ b/palace/drivers/electrostaticsolver.cpp @@ -28,6 +28,7 @@ ElectrostaticSolver::Solve(const std::vector> &mesh) const BlockTimer bt0(Timer::CONSTRUCT); LaplaceOperator laplaceop(iodata, mesh); auto K = laplaceop.GetStiffnessMatrix(); + const auto &Grad = laplaceop.GetGradMatrix(); SaveMetadata(laplaceop.GetH1Spaces()); // Set up the linear solver. @@ -41,8 +42,9 @@ ElectrostaticSolver::Solve(const std::vector> &mesh) const MFEM_VERIFY(nstep > 0, "No terminal boundaries specified for electrostatic simulation!"); // Right-hand side term and solution vector storage. - Vector RHS(K->Height()); + Vector RHS(Grad.Width()), E(Grad.Height()); std::vector V(nstep); + std::vector E_elec(nstep); // Initialize structures for storing and reducing the results of error estimation. GradFluxErrorEstimator estimator( @@ -67,15 +69,30 @@ ElectrostaticSolver::Solve(const std::vector> &mesh) const laplaceop.GetExcitationVector(idx, *K, V[step], RHS); ksp.Mult(RHS, V[step]); + // Compute E = -∇V on the true dofs, and set the internal GridFunctions in PostOperator + // for all postprocessing operations. BlockTimer bt2(Timer::POSTPRO); + E = 0.0; + Grad.AddMult(V[step], E, -1.0); + postop.SetVGridFunction(V[step]); + postop.SetEGridFunction(E); + E_elec[step] = postop.GetEFieldEnergy(); Mpi::Print(" Sol. ||V|| = {:.6e} (||RHS|| = {:.6e})\n", linalg::Norml2(laplaceop.GetComm(), V[step]), linalg::Norml2(laplaceop.GetComm(), RHS)); + { + const double J = iodata.DimensionalizeValue(IoData::ValueType::ENERGY, 1.0); + Mpi::Print(" Field energy E = {:.3e} J\n", E_elec[step] * J); + } // Calculate and record the error indicators. Mpi::Print(" Updating solution error estimates\n"); estimator.AddErrorIndicator(V[step], indicator); + // Postprocess field solutions and optionally write solution to disk. + Postprocess(postop, step, idx, E_elec[step], + (step == nstep - 1) ? &indicator : nullptr); + // Next terminal. step++; } @@ -83,13 +100,32 @@ ElectrostaticSolver::Solve(const std::vector> &mesh) const // Postprocess the capacitance matrix from the computed field solutions. BlockTimer bt1(Timer::POSTPRO); SaveMetadata(ksp); - Postprocess(laplaceop, postop, V, indicator); + PostprocessTerminals(postop, laplaceop.GetSources(), V, E_elec); return {indicator, laplaceop.GlobalTrueVSize()}; } -void ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop, PostOperator &postop, - const std::vector &V, - const ErrorIndicator &indicator) const +void ElectrostaticSolver::Postprocess(const PostOperator &postop, int step, int idx, + double E_elec, const ErrorIndicator *indicator) const +{ + // The internal GridFunctions for PostOperator have already been set from the V solution + // in the main loop. + PostprocessDomains(postop, "i", step, idx, E_elec, 0.0, 0.0, 0.0); + PostprocessSurfaces(postop, "i", step, idx, E_elec, 0.0, 1.0, 0.0); + PostprocessProbes(postop, "i", step, idx); + if (step < iodata.solver.electrostatic.n_post) + { + PostprocessFields(postop, step, idx); + Mpi::Print(" Wrote fields to disk for terminal {:d}\n", idx); + } + if (indicator) + { + PostprocessErrorIndicator(postop, *indicator, iodata.solver.electrostatic.n_post > 0); + } +} + +void ElectrostaticSolver::PostprocessTerminals( + PostOperator &postop, const std::map> &terminal_sources, + const std::vector &V, const std::vector &E_elec) const { // Postprocess the Maxwell capacitance matrix. See p. 97 of the COMSOL AC/DC Module manual // for the associated formulas based on the electric field energy based on a unit voltage @@ -97,41 +133,17 @@ void ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop, PostOperator & // charges from the prescribed voltage to get C directly as: // Q_i = ∫ ρ dV = ∫ ∇ ⋅ (ε E) dV = ∫ (ε E) ⋅ n dS // and C_ij = Q_i/V_j. The energy formulation avoids having to locally integrate E = -∇V. - const auto &Grad = laplaceop.GetGradMatrix(); - const std::map> &terminal_sources = laplaceop.GetSources(); - int nstep = static_cast(terminal_sources.size()); - mfem::DenseMatrix C(nstep), Cm(nstep); - Vector E(Grad.Height()), Vij(Grad.Width()); - int i = 0; - for (const auto &[idx, data] : terminal_sources) + mfem::DenseMatrix C(V.size()), Cm(V.size()); + for (int i = 0; i < C.Height(); i++) { - // Compute E = -∇V on the true dofs, and set the internal GridFunctions in PostOperator - // for all postprocessing operations. - E = 0.0; - Grad.AddMult(V[i], E, -1.0); - postop.SetVGridFunction(V[i]); - postop.SetEGridFunction(E); - double Ue = postop.GetEFieldEnergy(); - PostprocessDomains(postop, "i", i, idx, Ue, 0.0, 0.0, 0.0); - PostprocessSurfaces(postop, "i", i, idx, Ue, 0.0, 1.0, 0.0); - PostprocessProbes(postop, "i", i, idx); - if (i < iodata.solver.electrostatic.n_post) - { - PostprocessFields(postop, i, idx, (i == 0) ? &indicator : nullptr); - Mpi::Print("{}Wrote fields to disk for terminal {:d}\n", (i == 0) ? "\n" : "", idx); - } - if (i == 0) - { - PostprocessErrorIndicator(postop, indicator); - } - // Diagonal: C_ii = 2 U_e(V_i) / V_i². - C(i, i) = Cm(i, i) = 2.0 * Ue; - i++; + C(i, i) = Cm(i, i) = 2.0 * E_elec[i]; } // Off-diagonals: C_ij = U_e(V_i + V_j) / (V_i V_j) - 1/2 (V_i/V_j C_ii + V_j/V_i C_jj). - for (i = 0; i < C.Height(); i++) + Vector Vij(V[0].Size()); + Vij.UseDevice(true); + for (int i = 0; i < C.Height(); i++) { for (int j = 0; j < C.Width(); j++) { @@ -155,13 +167,7 @@ void ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop, PostOperator & } mfem::DenseMatrix Cinv(C); Cinv.Invert(); // In-place, uses LAPACK (when available) and should be cheap - PostprocessTerminals(terminal_sources, C, Cinv, Cm); -} -void ElectrostaticSolver::PostprocessTerminals( - const std::map> &terminal_sources, const mfem::DenseMatrix &C, - const mfem::DenseMatrix &Cinv, const mfem::DenseMatrix &Cm) const -{ // Only root writes to disk (every process has full matrices). if (!root || post_dir.length() == 0) { diff --git a/palace/drivers/electrostaticsolver.hpp b/palace/drivers/electrostaticsolver.hpp index 313976138..ea7a869c2 100644 --- a/palace/drivers/electrostaticsolver.hpp +++ b/palace/drivers/electrostaticsolver.hpp @@ -15,7 +15,6 @@ namespace mfem template class Array; -class DenseMatrix; } // namespace mfem @@ -23,11 +22,8 @@ namespace palace { class ErrorIndicator; -class IoData; -class LaplaceOperator; class Mesh; class PostOperator; -class Timer; // // Driver class for electrostatic simulations. @@ -35,12 +31,13 @@ class Timer; class ElectrostaticSolver : public BaseSolver { private: - void Postprocess(LaplaceOperator &laplaceop, PostOperator &postop, - const std::vector &V, const ErrorIndicator &indicator) const; + void Postprocess(const PostOperator &postop, int step, int idx, double E_elec, + const ErrorIndicator *indicator) const; - void PostprocessTerminals(const std::map> &terminal_sources, - const mfem::DenseMatrix &C, const mfem::DenseMatrix &Cinv, - const mfem::DenseMatrix &Cm) const; + void PostprocessTerminals(PostOperator &postop, + const std::map> &terminal_sources, + const std::vector &V, + const std::vector &E_elec) const; std::pair Solve(const std::vector> &mesh) const override; diff --git a/palace/drivers/magnetostaticsolver.cpp b/palace/drivers/magnetostaticsolver.cpp index bdca356f9..8dbd4f712 100644 --- a/palace/drivers/magnetostaticsolver.cpp +++ b/palace/drivers/magnetostaticsolver.cpp @@ -28,6 +28,7 @@ MagnetostaticSolver::Solve(const std::vector> &mesh) const BlockTimer bt0(Timer::CONSTRUCT); CurlCurlOperator curlcurlop(iodata, mesh); auto K = curlcurlop.GetStiffnessMatrix(); + const auto &Curl = curlcurlop.GetCurlMatrix(); SaveMetadata(curlcurlop.GetNDSpaces()); // Set up the linear solver. @@ -41,8 +42,9 @@ MagnetostaticSolver::Solve(const std::vector> &mesh) const "No surface current boundaries specified for magnetostatic simulation!"); // Source term and solution vector storage. - Vector RHS(K->Height()); + Vector RHS(Curl.Width()), B(Curl.Height()); std::vector A(nstep); + std::vector I_inc(nstep), E_mag(nstep); // Initialize structures for storing and reducing the results of error estimation. CurlFluxErrorEstimator estimator( @@ -69,29 +71,66 @@ MagnetostaticSolver::Solve(const std::vector> &mesh) const curlcurlop.GetExcitationVector(idx, RHS); ksp.Mult(RHS, A[step]); + // Compute B = ∇ x A on the true dofs, and set the internal GridFunctions in + // PostOperator for all postprocessing operations. BlockTimer bt2(Timer::POSTPRO); + Curl.Mult(A[step], B); + postop.SetAGridFunction(A[step]); + postop.SetBGridFunction(B); + E_mag[step] = postop.GetHFieldEnergy(); Mpi::Print(" Sol. ||A|| = {:.6e} (||RHS|| = {:.6e})\n", linalg::Norml2(curlcurlop.GetComm(), A[step]), linalg::Norml2(curlcurlop.GetComm(), RHS)); + { + const double J = iodata.DimensionalizeValue(IoData::ValueType::ENERGY, 1.0); + Mpi::Print(" Field energy H = {:.3e} J\n", E_mag[step] * J); + } + I_inc[step] = data.GetExcitationCurrent(); // Calculate and record the error indicators. Mpi::Print(" Updating solution error estimates\n"); estimator.AddErrorIndicator(A[step], indicator); + // Postprocess field solutions and optionally write solution to disk. + Postprocess(postop, step, idx, I_inc[step], E_mag[step], + (step == nstep - 1) ? &indicator : nullptr); + // Next source. step++; } - // Postprocess the capacitance matrix from the computed field solutions. + // Postprocess the inductance matrix from the computed field solutions. BlockTimer bt1(Timer::POSTPRO); SaveMetadata(ksp); - Postprocess(curlcurlop, postop, A, indicator); + PostprocessTerminals(postop, curlcurlop.GetSurfaceCurrentOp(), A, I_inc, E_mag); return {indicator, curlcurlop.GlobalTrueVSize()}; } -void MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop, PostOperator &postop, - const std::vector &A, - const ErrorIndicator &indicator) const +void MagnetostaticSolver::Postprocess(const PostOperator &postop, int step, int idx, + double I_inc, double E_mag, + const ErrorIndicator *indicator) const +{ + // The internal GridFunctions for PostOperator have already been set from the A solution + // in the main loop. + PostprocessDomains(postop, "i", step, idx, 0.0, E_mag, 0.0, 0.0); + PostprocessSurfaces(postop, "i", step, idx, 0.0, E_mag, 0.0, I_inc); + PostprocessProbes(postop, "i", step, idx); + if (step < iodata.solver.magnetostatic.n_post) + { + PostprocessFields(postop, step, idx); + Mpi::Print(" Wrote fields to disk for source {:d}\n", idx); + } + if (indicator) + { + PostprocessErrorIndicator(postop, *indicator, iodata.solver.magnetostatic.n_post > 0); + } +} + +void MagnetostaticSolver::PostprocessTerminals(PostOperator &postop, + const SurfaceCurrentOperator &surf_j_op, + const std::vector &A, + const std::vector &I_inc, + const std::vector &E_mag) const { // Postprocess the Maxwell inductance matrix. See p. 97 of the COMSOL AC/DC Module manual // for the associated formulas based on the magnetic field energy based on a current @@ -100,46 +139,17 @@ void MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop, PostOperator // Φ_i = ∫ B ⋅ n_j dS // and M_ij = Φ_i/I_j. The energy formulation avoids having to locally integrate B = // ∇ x A. - const auto &Curl = curlcurlop.GetCurlMatrix(); - const SurfaceCurrentOperator &surf_j_op = curlcurlop.GetSurfaceCurrentOp(); - int nstep = static_cast(surf_j_op.Size()); - mfem::DenseMatrix M(nstep), Mm(nstep); - Vector B(Curl.Height()), Aij(Curl.Width()); - Vector Iinc(nstep); - int i = 0; - for (const auto &[idx, data] : surf_j_op) + mfem::DenseMatrix M(A.size()), Mm(A.size()); + for (int i = 0; i < M.Height(); i++) { - // Get the magnitude of the current excitations (unit J_s,inc, but circuit current I is - // the integral of J_s,inc over port). - Iinc(i) = data.GetExcitationCurrent(); - MFEM_VERIFY(Iinc(i) > 0.0, "Zero current excitation for magnetostatic solver!"); - - // Compute B = ∇ x A on the true dofs, and set the internal GridFunctions in - // PostOperator for all postprocessing operations. - Curl.Mult(A[i], B); - postop.SetAGridFunction(A[i]); - postop.SetBGridFunction(B); - double Um = postop.GetHFieldEnergy(); - PostprocessDomains(postop, "i", i, idx, 0.0, Um, 0.0, 0.0); - PostprocessSurfaces(postop, "i", i, idx, 0.0, Um, 0.0, Iinc(i)); - PostprocessProbes(postop, "i", i, idx); - if (i < iodata.solver.magnetostatic.n_post) - { - PostprocessFields(postop, i, idx, (i == 0) ? &indicator : nullptr); - Mpi::Print("{}Wrote fields to disk for source {:d}\n", (i == 0) ? "\n" : "", idx); - } - if (i == 0) - { - PostprocessErrorIndicator(postop, indicator); - } - // Diagonal: M_ii = 2 U_m(A_i) / I_i². - M(i, i) = Mm(i, i) = 2.0 * Um / (Iinc(i) * Iinc(i)); - i++; + M(i, i) = Mm(i, i) = 2.0 * E_mag[i] / (I_inc[i] * I_inc[i]); } // Off-diagonals: M_ij = U_m(A_i + A_j) / (I_i I_j) - 1/2 (I_i/I_j M_ii + I_j/I_i M_jj). - for (i = 0; i < M.Height(); i++) + Vector Aij(A[0].Size()); + Aij.UseDevice(true); + for (int i = 0; i < M.Height(); i++) { for (int j = 0; j < M.Width(); j++) { @@ -155,8 +165,8 @@ void MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop, PostOperator linalg::AXPBYPCZ(1.0, A[i], 1.0, A[j], 0.0, Aij); postop.SetAGridFunction(Aij, false); double Um = postop.GetHFieldEnergy(); - M(i, j) = Um / (Iinc(i) * Iinc(j)) - - 0.5 * (M(i, i) * Iinc(i) / Iinc(j) + M(j, j) * Iinc(j) / Iinc(i)); + M(i, j) = Um / (I_inc[i] * I_inc[j]) - + 0.5 * (M(i, i) * I_inc[i] / I_inc[j] + M(j, j) * I_inc[j] / I_inc[i]); Mm(i, j) = -M(i, j); Mm(i, i) -= Mm(i, j); } @@ -164,14 +174,7 @@ void MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop, PostOperator } mfem::DenseMatrix Minv(M); Minv.Invert(); // In-place, uses LAPACK (when available) and should be cheap - PostprocessTerminals(surf_j_op, M, Minv, Mm); -} -void MagnetostaticSolver::PostprocessTerminals(const SurfaceCurrentOperator &surf_j_op, - const mfem::DenseMatrix &M, - const mfem::DenseMatrix &Minv, - const mfem::DenseMatrix &Mm) const -{ // Only root writes to disk (every process has full matrices). if (!root || post_dir.length() == 0) { diff --git a/palace/drivers/magnetostaticsolver.hpp b/palace/drivers/magnetostaticsolver.hpp index 343da6fba..7cfd96f2a 100644 --- a/palace/drivers/magnetostaticsolver.hpp +++ b/palace/drivers/magnetostaticsolver.hpp @@ -9,23 +9,13 @@ #include "drivers/basesolver.hpp" #include "linalg/vector.hpp" -namespace mfem -{ - -class DenseMatrix; - -} // namespace mfem - namespace palace { -class CurlCurlOperator; class ErrorIndicator; -class IoData; class Mesh; class PostOperator; class SurfaceCurrentOperator; -class Timer; // // Driver class for magnetostatic simulations. @@ -33,12 +23,12 @@ class Timer; class MagnetostaticSolver : public BaseSolver { private: - void Postprocess(CurlCurlOperator &curlcurlop, PostOperator &postop, - const std::vector &A, const ErrorIndicator &indicator) const; + void Postprocess(const PostOperator &postop, int step, int idx, double I_inc, + double E_mag, const ErrorIndicator *indicator) const; - void PostprocessTerminals(const SurfaceCurrentOperator &surf_j_op, - const mfem::DenseMatrix &M, const mfem::DenseMatrix &Minv, - const mfem::DenseMatrix &Mm) const; + void PostprocessTerminals(PostOperator &postop, const SurfaceCurrentOperator &surf_j_op, + const std::vector &A, const std::vector &I_inc, + const std::vector &E_mag) const; std::pair Solve(const std::vector> &mesh) const override; diff --git a/palace/drivers/transientsolver.cpp b/palace/drivers/transientsolver.cpp index 870fa82af..ee1065652 100644 --- a/palace/drivers/transientsolver.cpp +++ b/palace/drivers/transientsolver.cpp @@ -109,19 +109,17 @@ TransientSolver::Solve(const std::vector> &mesh) const // Postprocess for the time step. BlockTimer bt2(Timer::POSTPRO); - double E_elec = 0.0, E_mag = 0.0; const Vector &E = timeop.GetE(); const Vector &B = timeop.GetB(); postop.SetEGridFunction(E); postop.SetBGridFunction(B); postop.UpdatePorts(spaceop.GetLumpedPortOp()); + double E_elec = postop.GetEFieldEnergy(); + double E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Sol. ||E|| = {:.6e}, ||B|| = {:.6e}\n", linalg::Norml2(spaceop.GetComm(), E), linalg::Norml2(spaceop.GetComm(), B)); - if (!iodata.solver.transient.only_port_post) { const double J = iodata.DimensionalizeValue(IoData::ValueType::ENERGY, 1.0); - E_elec = postop.GetEFieldEnergy(); - E_mag = postop.GetHFieldEnergy(); Mpi::Print(" Field energy E ({:.3e} J) + H ({:.3e} J) = {:.3e} J\n", E_elec * J, E_mag * J, (E_elec + E_mag) * J); } @@ -132,8 +130,7 @@ TransientSolver::Solve(const std::vector> &mesh) const // Postprocess port voltages/currents and optionally write solution to disk. Postprocess(postop, spaceop.GetLumpedPortOp(), spaceop.GetSurfaceCurrentOp(), step, t, - J_coef(t), E_elec, E_mag, !iodata.solver.transient.only_port_post, - (step == nstep - 1) ? &indicator : nullptr); + J_coef(t), E_elec, E_mag, (step == nstep - 1) ? &indicator : nullptr); // Increment time step. step++; @@ -256,32 +253,28 @@ void TransientSolver::Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, const SurfaceCurrentOperator &surf_j_op, int step, double t, double J_coef, double E_elec, double E_mag, - bool full, const ErrorIndicator *indicator) const + const ErrorIndicator *indicator) const { // The internal GridFunctions for PostOperator have already been set from the E and B // solutions in the main time integration loop. const double ts = iodata.DimensionalizeValue(IoData::ValueType::TIME, t); + double E_cap = postop.GetLumpedCapacitorEnergy(lumped_port_op); + double E_ind = postop.GetLumpedInductorEnergy(lumped_port_op); PostprocessCurrents(postop, surf_j_op, step, t, J_coef); PostprocessPorts(postop, lumped_port_op, step, t, J_coef); - if (full) - { - double E_cap = postop.GetLumpedCapacitorEnergy(lumped_port_op); - double E_ind = postop.GetLumpedInductorEnergy(lumped_port_op); - PostprocessDomains(postop, "t (ns)", step, ts, E_elec, E_mag, E_cap, E_ind); - PostprocessSurfaces(postop, "t (ns)", step, ts, E_elec + E_cap, E_mag + E_ind, 1.0, - 1.0); - PostprocessProbes(postop, "t (ns)", step, ts); - } + PostprocessDomains(postop, "t (ns)", step, ts, E_elec, E_mag, E_cap, E_ind); + PostprocessSurfaces(postop, "t (ns)", step, ts, E_elec + E_cap, E_mag + E_ind, 1.0, 1.0); + PostprocessProbes(postop, "t (ns)", step, ts); if (iodata.solver.transient.delta_post > 0 && step % iodata.solver.transient.delta_post == 0) { Mpi::Print("\n"); - PostprocessFields(postop, step / iodata.solver.transient.delta_post, ts, indicator); + PostprocessFields(postop, step / iodata.solver.transient.delta_post, ts); Mpi::Print(" Wrote fields to disk at step {:d}\n", step); } if (indicator) { - PostprocessErrorIndicator(postop, *indicator); + PostprocessErrorIndicator(postop, *indicator, iodata.solver.transient.delta_post > 0); } } diff --git a/palace/drivers/transientsolver.hpp b/palace/drivers/transientsolver.hpp index 5ee860194..250d373af 100644 --- a/palace/drivers/transientsolver.hpp +++ b/palace/drivers/transientsolver.hpp @@ -13,12 +13,10 @@ namespace palace { class ErrorIndicator; -class IoData; class LumpedPortOperator; class Mesh; class PostOperator; class SurfaceCurrentOperator; -class Timer; // // Driver class for time-dependent driven terminal simulations. @@ -32,7 +30,7 @@ class TransientSolver : public BaseSolver void Postprocess(const PostOperator &postop, const LumpedPortOperator &lumped_port_op, const SurfaceCurrentOperator &surf_j_op, int step, double t, - double J_coef, double E_elec, double E_mag, bool full, + double J_coef, double E_elec, double E_mag, const ErrorIndicator *indicator) const; void PostprocessCurrents(const PostOperator &postop, diff --git a/palace/models/postoperator.cpp b/palace/models/postoperator.cpp index 3b8f80174..7781a27ad 100644 --- a/palace/models/postoperator.cpp +++ b/palace/models/postoperator.cpp @@ -672,100 +672,144 @@ double PostOperator::GetSurfaceFlux(int idx) const return Phi; } -void PostOperator::WriteFields(int step, double time, const ErrorIndicator *indicator) const +namespace { - // Given the electric field and magnetic flux density, write the fields to disk for - // visualization. Write the mesh coordinates in the same units as originally input. - bool first_save = (paraview.GetCycle() < 0); - mfem::ParMesh &mesh = - HasE() ? *E->ParFESpace()->GetParMesh() : *B->ParFESpace()->GetParMesh(); - auto ScaleGridFunctions = [&mesh, this](double L) - { - // For fields on H(curl) and H(div) spaces, we "undo" the effect of redimensionalizing - // the mesh which would carry into the fields during the mapping from reference to - // physical space through the element Jacobians. No transformation for V is needed (H1 - // interpolation). Because the coefficients are always evaluating E, B in neighboring - // elements, the Jacobian scaling is the same for the domain and boundary data - // collections (instead of being different for B due to the dim - 1 evaluation). Wave - // port fields also do not require rescaling since their submesh object where they are - // evaluated remains nondimensionalized. - if (E) - { - // Piola transform: J^-T - E->Real() *= L; - E->Real().FaceNbrData() *= L; - if (HasImag()) - { - E->Imag() *= L; - E->Imag().FaceNbrData() *= L; - } - } - if (B) + +template +void ScaleGridFunctions(double L, int dim, bool imag, T &E, T &B, T &A, T &V) +{ + // For fields on H(curl) and H(div) spaces, we "undo" the effect of redimensionalizing + // the mesh which would carry into the fields during the mapping from reference to + // physical space through the element Jacobians. No transformation for V is needed (H1 + // interpolation). Because the coefficients are always evaluating E, B in neighboring + // elements, the Jacobian scaling is the same for the domain and boundary data + // collections (instead of being different for B due to the dim - 1 evaluation). Wave + // port fields also do not require rescaling since their submesh object where they are + // evaluated remains nondimensionalized. + if (E) + { + // Piola transform: J^-T + E->Real() *= L; + E->Real().FaceNbrData() *= L; + if (imag) { - // Piola transform: J / |J| - const auto Ld = std::pow(L, mesh.Dimension() - 1); - B->Real() *= Ld; - B->Real().FaceNbrData() *= Ld; - if (HasImag()) - { - B->Imag() *= Ld; - B->Imag().FaceNbrData() *= Ld; - } + E->Imag() *= L; + E->Imag().FaceNbrData() *= L; } - if (A) + } + if (B) + { + // Piola transform: J / |J| + const auto Ld = std::pow(L, dim - 1); + B->Real() *= Ld; + B->Real().FaceNbrData() *= Ld; + if (imag) { - // Piola transform: J^-T - A->Real() *= L; - A->Real().FaceNbrData() *= L; + B->Imag() *= Ld; + B->Imag().FaceNbrData() *= Ld; } - }; - mesh::DimensionalizeMesh(mesh, mesh_Lc0); - ScaleGridFunctions(mesh_Lc0); + } + if (A) + { + // Piola transform: J^-T + A->Real() *= L; + A->Real().FaceNbrData() *= L; + } +} + +} // namespace +void PostOperator::WriteFields(int step, double time) const +{ + // Given the electric field and magnetic flux density, write the fields to disk for + // visualization. Write the mesh coordinates in the same units as originally input. + mfem::ParMesh &mesh = + HasE() ? *E->ParFESpace()->GetParMesh() : *B->ParFESpace()->GetParMesh(); + mesh::DimensionalizeMesh(mesh, mesh_Lc0); + ScaleGridFunctions(mesh_Lc0, mesh.Dimension(), HasImag(), E, B, A, V); paraview.SetCycle(step); paraview.SetTime(time); paraview_bdr.SetCycle(step); paraview_bdr.SetTime(time); - if (first_save || indicator) - { - // No need for these to be parallel objects, since the data is local to each process and - // there isn't a need to ever access the element neighbors. - mfem::L2_FECollection pwconst_fec(0, mesh.Dimension()); - mfem::FiniteElementSpace pwconst_fespace(&mesh, &pwconst_fec); - std::unique_ptr rank, eta; - if (first_save) - { - rank = std::make_unique(&pwconst_fespace); - *rank = mesh.GetMyRank() + 1; - paraview.RegisterField("Rank", rank.get()); - } - if (indicator) - { - eta = std::make_unique(&pwconst_fespace); - MFEM_VERIFY(eta->Size() == indicator->Local().Size(), - "Size mismatch for provided ErrorIndicator for postprocessing!"); - *eta = indicator->Local(); - paraview.RegisterField("Indicator", eta.get()); - } - paraview.Save(); - if (rank) - { - paraview.DeregisterField("Rank"); - } - if (eta) - { - paraview.DeregisterField("Indicator"); - } + paraview.Save(); + paraview_bdr.Save(); + mesh::NondimensionalizeMesh(mesh, mesh_Lc0); + ScaleGridFunctions(1.0 / mesh_Lc0, mesh.Dimension(), HasImag(), E, B, A, V); +} + +void PostOperator::WriteFieldsFinal(const ErrorIndicator *indicator) const +{ + // Write the mesh partitioning and (optionally) error indicators at the final step. No + // need for these to be parallel objects, since the data is local to each process and + // there isn't a need to ever access the element neighbors. We set the time to some + // non-used value to make the step identifiable within the data collection. + mfem::ParMesh &mesh = + HasE() ? *E->ParFESpace()->GetParMesh() : *B->ParFESpace()->GetParMesh(); + mesh::DimensionalizeMesh(mesh, mesh_Lc0); + paraview.SetCycle(paraview.GetCycle() + 1); + if (paraview.GetTime() < 1.0) + { + paraview.SetTime(99.0); } else { - paraview.Save(); + // 1 -> 99, 10 -> 999, etc. + paraview.SetTime( + std::pow(10.0, 2.0 + static_cast(std::log10(paraview.GetTime()))) - 1.0); + } + mfem::DataCollection::FieldMapType field_map(paraview.GetFieldMap()); // Copy + for (const auto &[name, gf] : field_map) + { + paraview.DeregisterField(name); + } + mfem::DataCollection::CoeffFieldMapType coeff_field_map(paraview.GetCoeffFieldMap()); + for (const auto &[name, gf] : coeff_field_map) + { + paraview.DeregisterCoeffField(name); + } + mfem::DataCollection::VCoeffFieldMapType vcoeff_field_map(paraview.GetVCoeffFieldMap()); + for (const auto &[name, gf] : vcoeff_field_map) + { + paraview.DeregisterVCoeffField(name); + } + mfem::L2_FECollection pwconst_fec(0, mesh.Dimension()); + mfem::FiniteElementSpace pwconst_fespace(&mesh, &pwconst_fec); + std::unique_ptr rank, eta; + { + rank = std::make_unique(&pwconst_fespace); + *rank = mesh.GetMyRank() + 1; + paraview.RegisterField("Rank", rank.get()); + } + if (indicator) + { + eta = std::make_unique(&pwconst_fespace); + MFEM_VERIFY(eta->Size() == indicator->Local().Size(), + "Size mismatch for provided ErrorIndicator for postprocessing!"); + *eta = indicator->Local(); + paraview.RegisterField("Indicator", eta.get()); + } + paraview.Save(); + if (rank) + { + paraview.DeregisterField("Rank"); + } + if (eta) + { + paraview.DeregisterField("Indicator"); + } + for (const auto &[name, gf] : field_map) + { + paraview.RegisterField(name, gf); + } + for (const auto &[name, gf] : coeff_field_map) + { + paraview.RegisterCoeffField(name, gf); + } + for (const auto &[name, gf] : vcoeff_field_map) + { + paraview.RegisterVCoeffField(name, gf); } - paraview_bdr.Save(); - - // Restore the mesh nondimensionalization. mesh::NondimensionalizeMesh(mesh, mesh_Lc0); - ScaleGridFunctions(1.0 / mesh_Lc0); } std::vector> PostOperator::ProbeEField() const diff --git a/palace/models/postoperator.hpp b/palace/models/postoperator.hpp index 4cdfc4763..52263c23b 100644 --- a/palace/models/postoperator.hpp +++ b/palace/models/postoperator.hpp @@ -162,7 +162,8 @@ class PostOperator // Write to disk the E- and B-fields extracted from the solution vectors. Note that fields // are not redimensionalized, to do so one needs to compute: B <= B * (μ₀ H₀), E <= E * // (Z₀ H₀), V <= V * (Z₀ H₀ L₀), etc. - void WriteFields(int step, double time, const ErrorIndicator *indicator = nullptr) const; + void WriteFields(int step, double time) const; + void WriteFieldsFinal(const ErrorIndicator *indicator = nullptr) const; // Probe the E- and B-fields for their vector-values at speceified locations in space. // Locations of probes are set up in constructor from configuration file data. If diff --git a/palace/utils/configfile.cpp b/palace/utils/configfile.cpp index 69d834ab4..717f0e89c 100644 --- a/palace/utils/configfile.cpp +++ b/palace/utils/configfile.cpp @@ -1429,7 +1429,6 @@ void DrivenSolverData::SetUp(json &solver) max_f = driven->at("MaxFreq"); // Required delta_f = driven->at("FreqStep"); // Required delta_post = driven->value("SaveStep", delta_post); - only_port_post = driven->value("SaveOnlyPorts", only_port_post); rst = driven->value("Restart", rst); adaptive_tol = driven->value("AdaptiveTol", adaptive_tol); adaptive_max_size = driven->value("AdaptiveMaxSamples", adaptive_max_size); @@ -1440,7 +1439,6 @@ void DrivenSolverData::SetUp(json &solver) driven->erase("MaxFreq"); driven->erase("FreqStep"); driven->erase("SaveStep"); - driven->erase("SaveOnlyPorts"); driven->erase("Restart"); driven->erase("AdaptiveTol"); driven->erase("AdaptiveMaxSamples"); @@ -1454,7 +1452,6 @@ void DrivenSolverData::SetUp(json &solver) // std::cout << "MaxFreq: " << max_f << '\n'; // std::cout << "FreqStep: " << delta_f << '\n'; // std::cout << "SaveStep: " << delta_post << '\n'; - // std::cout << "SaveOnlyPorts: " << only_port_post << '\n'; // std::cout << "Restart: " << rst << '\n'; // std::cout << "AdaptiveTol: " << adaptive_tol << '\n'; // std::cout << "AdaptiveMaxSamples: " << adaptive_max_size << '\n'; @@ -1616,7 +1613,6 @@ void TransientSolverData::SetUp(json &solver) max_t = transient->at("MaxTime"); // Required delta_t = transient->at("TimeStep"); // Required delta_post = transient->value("SaveStep", delta_post); - only_port_post = transient->value("SaveOnlyPorts", only_port_post); // Cleanup transient->erase("Type"); @@ -1626,7 +1622,6 @@ void TransientSolverData::SetUp(json &solver) transient->erase("MaxTime"); transient->erase("TimeStep"); transient->erase("SaveStep"); - transient->erase("SaveOnlyPorts"); MFEM_VERIFY(transient->empty(), "Found an unsupported configuration file keyword under \"Transient\"!\n" << transient->dump(2)); @@ -1639,7 +1634,6 @@ void TransientSolverData::SetUp(json &solver) // std::cout << "MaxTime: " << max_t << '\n'; // std::cout << "TimeStep: " << delta_t << '\n'; // std::cout << "SaveStep: " << delta_post << '\n'; - // std::cout << "SaveOnlyPorts: " << only_port_post << '\n'; } // Helpers for converting string keys to enum for LinearSolverData::Type, diff --git a/palace/utils/configfile.hpp b/palace/utils/configfile.hpp index d0dd0ee7f..ec0622318 100644 --- a/palace/utils/configfile.hpp +++ b/palace/utils/configfile.hpp @@ -578,9 +578,6 @@ struct DrivenSolverData // Step increment for saving fields to disk. int delta_post = 0; - // Only perform postprocessing on port boundaries, skipping domain interior. - bool only_port_post = false; - // Restart iteration for a partial sweep. int rst = 1; @@ -711,9 +708,6 @@ struct TransientSolverData // Step increment for saving fields to disk. int delta_post = 0; - // Only perform postprocessing on port boundaries, skipping domain interior. - bool only_port_post = false; - void SetUp(json &solver); }; diff --git a/palace/utils/error.hpp b/palace/utils/error.hpp new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/schema/config/solver.json b/scripts/schema/config/solver.json index 02737b2cf..1db36129b 100644 --- a/scripts/schema/config/solver.json +++ b/scripts/schema/config/solver.json @@ -48,7 +48,6 @@ "MaxFreq": { "type": "number" }, "FreqStep": { "type": "number" }, "SaveStep": { "type": "integer" }, - "SaveOnlyPorts": { "type": "boolean" }, "Restart": { "type": "integer", "exclusiveMinimum": 0 }, "AdaptiveTol": { "type": "number", "minimum": 0.0 }, "AdaptiveMaxSamples": { "type": "number", "exclusiveMinimum": 0 }, @@ -68,8 +67,7 @@ "ExcitationWidth": { "type": "number" }, "MaxTime": { "type": "number" }, "TimeStep": { "type": "number" }, - "SaveStep": { "type": "integer" }, - "SaveOnlyPorts": { "type": "boolean" } + "SaveStep": { "type": "integer" } } }, "Electrostatic":