From 08da23e44503898b893861f409439d7de8defa1b Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 30 Aug 2024 16:34:35 -0700 Subject: [PATCH 01/91] Express differential luminosity diagnostics in eV (#5197) --- Docs/source/usage/parameters.rst | 8 ++++---- Examples/Tests/diff_lumi_diag/analysis.py | 3 +-- Examples/Tests/diff_lumi_diag/inputs | 11 +++++------ .../ReducedDiags/DifferentialLuminosity.cpp | 17 +++++++++-------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 817ee29c763..980fb1ef2e0 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3462,8 +3462,8 @@ Reduced Diagnostics :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). - The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{J}^{-1}`. For collider-relevant WarpX simulations - involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{J}^{-1}` + The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations + involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. In practice, the above expression of the differential luminosity is evaluated over discrete bins in energy :math:`\mathcal{E}^*`, @@ -3475,10 +3475,10 @@ Reduced Diagnostics * ``.bin_number`` (`int` > 0) The number of bins in energy :math:`\mathcal{E}^*` - * ``.bin_max`` (`float`, in Joules) + * ``.bin_max`` (`float`, in eV) The minimum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. - * ``.bin_min`` (`float`, in Joules) + * ``.bin_min`` (`float`, in eV) The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. * ``.intervals`` (`string`) diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index 59378950fa5..ef573fc4863 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -9,7 +9,6 @@ import numpy as np from read_raw_data import read_reduced_diags_histogram -from scipy.constants import eV sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI @@ -22,7 +21,7 @@ # Beam parameters N = 1.2e10 -E_beam = 125e9 * eV +E_beam = 125e9 # in eV sigma_x = 500e-9 sigma_y = 10e-9 diff --git a/Examples/Tests/diff_lumi_diag/inputs b/Examples/Tests/diff_lumi_diag/inputs index 1424cfd7672..e8854937b6e 100644 --- a/Examples/Tests/diff_lumi_diag/inputs +++ b/Examples/Tests/diff_lumi_diag/inputs @@ -1,12 +1,11 @@ ################################# ########## MY CONSTANTS ######### ################################# -my_constants.mc2 = m_e*clight*clight -my_constants.GeV = q_e*1.e9 +my_constants.mc2_eV = m_e*clight*clight/q_e # BEAMS -my_constants.beam_energy = 125.*GeV -my_constants.beam_gamma = beam_energy/(mc2) +my_constants.beam_energy_eV = 125.e9 +my_constants.beam_gamma = beam_energy_eV/(mc2_eV) my_constants.beam_charge = 1.2e10*q_e my_constants.sigmax = 500e-9 my_constants.sigmay = 10e-9 @@ -123,5 +122,5 @@ DifferentialLuminosity_beam1_beam2.type = DifferentialLuminosity DifferentialLuminosity_beam1_beam2.intervals = 5 DifferentialLuminosity_beam1_beam2.species = beam1 beam2 DifferentialLuminosity_beam1_beam2.bin_number = 128 -DifferentialLuminosity_beam1_beam2.bin_max = 2.1*beam_energy -DifferentialLuminosity_beam1_beam2.bin_min = 1.9*beam_energy +DifferentialLuminosity_beam1_beam2.bin_max = 2.1*beam_energy_eV +DifferentialLuminosity_beam1_beam2.bin_min = 1.9*beam_energy_eV diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp index bace6cf73ce..111eca0f14c 100644 --- a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp @@ -113,7 +113,7 @@ DifferentialLuminosity::DifferentialLuminosity (const std::string& rd_name) ofs << m_sep; ofs << "[" << off++ << "]"; const Real b = m_bin_min + m_bin_size*(Real(i)+0.5_rt); - ofs << "bin" << 1+i << "=" << b << "(J)"; + ofs << "bin" << 1+i << "=" << b << "(eV)"; } ofs << std::endl; // close file @@ -133,7 +133,8 @@ void DifferentialLuminosity::ComputeDiags (int step) // array d_data, we add contributions at *each timestep*, but // we only write the data to file at intervals specified by the user. - const Real c2 = PhysConst::c*PhysConst::c; + const Real c2_over_qe = PhysConst::c*PhysConst::c/PhysConst::q_e; + const Real inv_c2 = 1._rt/(PhysConst::c*PhysConst::c); // get a reference to WarpX instance auto& warpx = WarpX::GetInstance(); @@ -218,13 +219,13 @@ void DifferentialLuminosity::ComputeDiags (int step) index_type const j_2 = indices_2[i_2]; Real const u1_square = u1x[j_1]*u1x[j_1] + u1y[j_1]*u1y[j_1] + u1z[j_1]*u1z[j_1]; - Real const gamma1 = std::sqrt(1._rt + u1_square/c2); + Real const gamma1 = std::sqrt(1._rt + u1_square*inv_c2); Real const u2_square = u2x[j_2]*u2x[j_2] + u2y[j_2]*u2y[j_2] + u2z[j_2]*u2z[j_2]; - Real const gamma2 = std::sqrt(1._rt + u2_square/c2); + Real const gamma2 = std::sqrt(1._rt + u2_square*inv_c2); Real const u1_dot_u2 = u1x[j_1]*u2x[j_2] + u1y[j_1]*u2y[j_2] + u1z[j_1]*u2z[j_2]; - // center of mass energy - Real const E_com = c2 * std::sqrt(m1*m1 + m2*m2 + 2*m1*m2* (gamma1*gamma2 - u1_dot_u2/c2)); + // center of mass energy in eV + Real const E_com = c2_over_qe * std::sqrt(m1*m1 + m2*m2 + 2*m1*m2* (gamma1*gamma2 - u1_dot_u2*inv_c2)); // determine particle bin int const bin = int(Math::floor((E_com-bin_min)/bin_size)); @@ -242,9 +243,9 @@ void DifferentialLuminosity::ComputeDiags (int step) Real const v1_cross_v2_square = (u1_cross_u2_x*u1_cross_u2_x + u1_cross_u2_y*u1_cross_u2_y + u1_cross_u2_z*u1_cross_u2_z) / (gamma1*gamma1*gamma2*gamma2); - Real const radicand = v1_minus_v2_square - v1_cross_v2_square / c2; + Real const radicand = v1_minus_v2_square - v1_cross_v2_square * inv_c2; - Real const dL_dEcom = std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 J^-1 + Real const dL_dEcom = std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 eV^-1 amrex::HostDevice::Atomic::Add(&dptr_data[bin], dL_dEcom); From cc6e7ea1074c0d9e97cdf60363d772c119cc1dea Mon Sep 17 00:00:00 2001 From: Justin Ray Angus Date: Fri, 30 Aug 2024 17:20:49 -0700 Subject: [PATCH 02/91] Generalization of WarpXSolverVec class used by implicit solvers (#5171) * added warpx::fields::FieldType member to WarpXSolverVec. * AMREX_ALWAYS ==> WARPX_ALWAYS * Changed how WarpXSolverVec is defined. Explicit call to SetDotMask() is no longer required. * refactoring. * refactoring comments and how to Copy WarpXSolverVec. * small comment fix. * dotMask now owned by WarpX. Defined only when needed. * clang tidy. * adding Afield_dotMask. * adding None as enum FieldType * WarpXSolverVec can now be used with scalar field quantities. * putting check for dotMask define inside SetDotMask(). * dont use namespace in header file. * name change: field ==> array * updating comments. * updating comment. * removed function. * braces * added isFieldArray() fun to warpx::fields::namespace. * simplify logic for boolean return * adding assert. * adding assertSameType() function. * adding assertIsDefined() function. * both array_vec and scalar_vec types cant be FieldType::None * restoring comment change in implicit solvers. moved to separate PR. * additional revert to comments. * one more revert to comments. * fixing merge issue. * reposition header file. * adding ArrayFieldTypes[] to Fields.H * using std::any_of() * attempt to please clang tidy. * Doxygen: Location of Fwd Declaration Fix location of `WarpX`forward declaration. --------- Co-authored-by: Axel Huebl --- Source/FieldSolver/Fields.H | 18 ++ .../ImplicitSolvers/SemiImplicitEM.cpp | 11 +- .../ImplicitSolvers/ThetaImplicitEM.cpp | 11 +- .../ImplicitSolvers/WarpXImplicitOps.cpp | 21 +- .../ImplicitSolvers/WarpXSolverVec.H | 228 ++++++++++++------ .../ImplicitSolvers/WarpXSolverVec.cpp | 137 ++++++++--- Source/WarpX.H | 21 ++ Source/WarpX.cpp | 50 ++++ 8 files changed, 367 insertions(+), 130 deletions(-) diff --git a/Source/FieldSolver/Fields.H b/Source/FieldSolver/Fields.H index 7c9cfe5285e..9e4ce5a71a7 100644 --- a/Source/FieldSolver/Fields.H +++ b/Source/FieldSolver/Fields.H @@ -7,10 +7,14 @@ #ifndef WARPX_FIELDS_H_ #define WARPX_FIELDS_H_ +#include +#include + namespace warpx::fields { enum struct FieldType : int { + None, Efield_aux, Bfield_aux, Efield_fp, @@ -37,6 +41,20 @@ namespace warpx::fields Efield_avg_cp, Bfield_avg_cp }; + + constexpr FieldType ArrayFieldTypes[] = { + FieldType::Efield_aux, FieldType::Bfield_aux, FieldType::Efield_fp, FieldType::Bfield_fp, + FieldType::current_fp, FieldType::current_fp_nodal, FieldType::vector_potential_fp, + FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, + FieldType::Efield_avg_fp, FieldType::Bfield_avg_fp, FieldType::Efield_avg_cp, FieldType::Bfield_avg_cp}; + + inline bool + isFieldArray (const FieldType field_type) + { + return std::any_of( std::begin(ArrayFieldTypes), std::end(ArrayFieldTypes), + [field_type](const FieldType& f) { return f == field_type; }); + } + } #endif //WARPX_FIELDS_H_ diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index da15ac02143..cfd18354878 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -20,13 +20,8 @@ void SemiImplicitEM::Define ( WarpX* a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); - m_Eold.Define( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); - - // Need to define the WarpXSolverVec owned dot_mask to do dot - // product correctly for linear and nonlinear solvers - const amrex::Vector& Geom = m_WarpX->Geom(); - m_E.SetDotMask(Geom); + m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_Eold.Define( m_E ); // Parse implicit solver parameters const amrex::ParmParse pp("implicit_evolve"); @@ -71,7 +66,7 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, m_WarpX->SaveParticlesAtImplicitStepStart ( ); // Save Eg at the start of the time step - m_Eold.Copy( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); + m_Eold.Copy( FieldType::Efield_fp ); // Advance WarpX owned Bfield_fp to t_{n+1/2} m_WarpX->EvolveB(a_dt, DtType::Full); diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 88b0a92a747..4c86389797f 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -21,13 +21,8 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); - m_Eold.Define( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); - - // Need to define the WarpXSolverVec owned dot_mask to do dot - // product correctly for linear and nonlinear solvers - const amrex::Vector& Geom = m_WarpX->Geom(); - m_E.SetDotMask(Geom); + m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_Eold.Define( m_E ); // Define Bold MultiFab const int num_levels = 1; @@ -92,7 +87,7 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, m_WarpX->SaveParticlesAtImplicitStepStart ( ); // Save Eg at the start of the time step - m_Eold.Copy( m_WarpX->getMultiLevelField(FieldType::Efield_fp) ); + m_Eold.Copy( FieldType::Efield_fp ); const int num_levels = static_cast(m_Bold.size()); for (int lev = 0; lev < num_levels; ++lev) { diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index f3ba9e7f86b..8dd97ed5525 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -11,6 +11,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" +#include "FieldSolver/Fields.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "Parallelization/GuardCellManager.H" #include "Particles/MultiParticleContainer.H" @@ -68,7 +69,11 @@ WarpX::ImplicitPreRHSOp ( amrex::Real a_cur_time, void WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) { - const amrex::Vector, 3 > >& Evec = a_E.getVec(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_E.getArrayVecType()==warpx::fields::FieldType::Efield_fp, + "WarpX::SetElectricFieldAndApplyBCs() must be called with Efield_fp type"); + + const amrex::Vector, 3 > >& Evec = a_E.getArrayVec(); amrex::MultiFab::Copy(*Efield_fp[0][0], *Evec[0][0], 0, 0, ncomps, Evec[0][0]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][1], *Evec[0][1], 0, 0, ncomps, Evec[0][1]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][2], *Evec[0][2], 0, 0, ncomps, Evec[0][2]->nGrowVect()); @@ -316,22 +321,26 @@ WarpX::ImplicitComputeRHSE (int lev, amrex::Real a_dt, WarpXSolverVec& a_Erhs_v void WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_Erhs_vec.getArrayVecType()==warpx::fields::FieldType::Efield_fp, + "WarpX::ImplicitComputeRHSE() must be called with Efield_fp type"); + // set RHS to zero value - a_Erhs_vec.getVec()[lev][0]->setVal(0.0); - a_Erhs_vec.getVec()[lev][1]->setVal(0.0); - a_Erhs_vec.getVec()[lev][2]->setVal(0.0); + a_Erhs_vec.getArrayVec()[lev][0]->setVal(0.0); + a_Erhs_vec.getArrayVec()[lev][1]->setVal(0.0); + a_Erhs_vec.getArrayVec()[lev][2]->setVal(0.0); // Compute Efield_rhs in regular cells by calling EvolveE. Because // a_Erhs_vec is set to zero above, calling EvolveE below results in // a_Erhs_vec storing only the RHS of the update equation. I.e., // c^2*dt*(curl(B^{n+theta} - mu0*J^{n+1/2}) if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveE( a_Erhs_vec.getVec()[lev], Bfield_fp[lev], + m_fdtd_solver_fp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_fp[lev], current_fp[lev], m_edge_lengths[lev], m_face_areas[lev], ECTRhofield[lev], F_fp[lev], lev, a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveE( a_Erhs_vec.getVec()[lev], Bfield_cp[lev], + m_fdtd_solver_cp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_cp[lev], current_cp[lev], m_edge_lengths[lev], m_face_areas[lev], ECTRhofield[lev], F_cp[lev], lev, a_dt ); diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H index 89a0b82b700..f884f5fa623 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H @@ -8,6 +8,7 @@ #define WarpXSolverVec_H_ #include "Utils/TextMsg.H" +#include "FieldSolver/Fields.H" #include #include @@ -15,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -32,18 +32,25 @@ #include #include + +// forward declaration +class WarpX; + /** - * \brief This is a wrapper class around a Vector of array of pointers to MultiFabs that + * \brief + * This is a wrapper class around a Vector of pointers to MultiFabs that * contains basic math operators and functionality needed to interact with nonlinear - * solvers in WarpX and linear solvers in AMReX, such as GMRES. + * solvers in WarpX and linear solvers in AMReX, such as GMRES. The size of the + * Vector is the number of amr levels. Hardcoded for 1 right now. * - * The size of the Vector is the number of amr levels. Hardcoded for 1 right now. The - * size of the array is the number of MultiFabs. It is hardcoded for 3 right now as it - * is only used for the electric field in the implicit electromagnetic time solvers. In - * the future, the array size can be made a template parameter so that this class can - * be used for other solver vectors, such as electrostatic (array size 1) or Darwin (array size 4). + * A WarpXSolverVec can consist of an array size 3 of MultiFabs (for vector fields + * such as E, B, and A) or of a single MultiFab for scalar fields. Both the array + * size 3 and scalar fields must be of type warpx::fields::FieldType. + * Additionally, a WarpXSolverVec can in general contain both an array size 3 field and a + * scalar field. For example, the array size 3 field can be used for the vector potential A + * and the scalar field can be used for the scalar potential phi, which is the full state of + * unknowns for a Darwin electromagnetic model. */ - class WarpXSolverVec { public: @@ -59,88 +66,94 @@ public: [[nodiscard]] inline bool IsDefined () const { return m_is_defined; } - inline - void Define (const WarpXSolverVec& a_vec) - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - a_vec.IsDefined(), - "WarpXSolverVec::Define(a_vec) called with undefined a_vec"); - Define( a_vec.getVec() ); - } + void Define ( WarpX* a_WarpX, + warpx::fields::FieldType a_array_type, + warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None ); inline - void Define ( const amrex::Vector, 3 > >& a_solver_vec ) + void Define ( const WarpXSolverVec& a_solver_vec ) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - !IsDefined(), - "WarpXSolverVec::Define() called on undefined WarpXSolverVec"); - m_field_vec.resize(m_num_amr_levels); - const int lev = 0; - for (int n=0; n<3; n++) { - const amrex::MultiFab& mf_model = *a_solver_vec[lev][n]; - m_field_vec[lev][n] = std::make_unique( mf_model.boxArray(), mf_model.DistributionMap(), - mf_model.nComp(), amrex::IntVect::TheZeroVector() ); - } - m_is_defined = true; + assertIsDefined( a_solver_vec ); + Define( WarpXSolverVec::m_WarpX, + a_solver_vec.getArrayVecType(), + a_solver_vec.getScalarVecType() ); } - void SetDotMask( const amrex::Vector& a_Geom ); [[nodiscard]] RT dotProduct( const WarpXSolverVec& a_X ) const; + void Copy ( warpx::fields::FieldType a_array_type, + warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None ); + inline - void Copy ( const amrex::Vector, 3 > >& a_solver_vec ) + void Copy ( const WarpXSolverVec& a_solver_vec ) { - AMREX_ASSERT_WITH_MESSAGE( - IsDefined(), - "WarpXSolverVec::Copy() called on undefined WarpXSolverVec"); + assertIsDefined( a_solver_vec ); + if (IsDefined()) { assertSameType( a_solver_vec ); } + else { Define(a_solver_vec); } + for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n = 0; n < 3; ++n) { - amrex::MultiFab::Copy(*m_field_vec[lev][n], *a_solver_vec[lev][n], 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); + if (m_array_type != warpx::fields::FieldType::None) { + for (int n = 0; n < 3; ++n) { + const std::unique_ptr& this_field = a_solver_vec.getArrayVec()[lev][n]; + amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_field, 0, 0, m_ncomp, + amrex::IntVect::TheZeroVector() ); + } + } + if (m_scalar_type != warpx::fields::FieldType::None) { + const std::unique_ptr& this_scalar = a_solver_vec.getScalarVec()[lev]; + amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, + amrex::IntVect::TheZeroVector() ); } } } - inline - void Copy ( const WarpXSolverVec& a_vec ) - { - AMREX_ASSERT_WITH_MESSAGE( - a_vec.IsDefined(), - "WarpXSolverVec::Copy(a_vec) called with undefined a_vec"); - if (!IsDefined()) { Define(a_vec); } - const amrex::Vector, 3 > >& field_vec = a_vec.getVec(); - Copy(field_vec); - } - // Prohibit Copy assignment operator - WarpXSolverVec& operator= ( const WarpXSolverVec& a_vec ) = delete; + WarpXSolverVec& operator= ( const WarpXSolverVec& a_solver_vec ) = delete; // Move assignment operator WarpXSolverVec(WarpXSolverVec&&) noexcept = default; - WarpXSolverVec& operator= ( WarpXSolverVec&& a_vec ) noexcept + WarpXSolverVec& operator= ( WarpXSolverVec&& a_solver_vec ) noexcept { - if (this != &a_vec) { - m_field_vec = std::move(a_vec.m_field_vec); + if (this != &a_solver_vec) { + m_array_vec = std::move(a_solver_vec.m_array_vec); + m_scalar_vec = std::move(a_solver_vec.m_scalar_vec); + m_array_type = a_solver_vec.m_array_type; + m_scalar_type = a_solver_vec.m_scalar_type; m_is_defined = true; } return *this; } inline - void operator+= ( const WarpXSolverVec& a_vec ) + void operator+= ( const WarpXSolverVec& a_solver_vec ) { + assertIsDefined( a_solver_vec ); + assertSameType( a_solver_vec ); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - m_field_vec[lev][n]->plus(*(a_vec.getVec()[lev][n]), 0, 1, 0); + if (m_array_type != warpx::fields::FieldType::None) { + m_array_vec[lev][0]->plus(*(a_solver_vec.getArrayVec()[lev][0]), 0, 1, 0); + m_array_vec[lev][1]->plus(*(a_solver_vec.getArrayVec()[lev][1]), 0, 1, 0); + m_array_vec[lev][2]->plus(*(a_solver_vec.getArrayVec()[lev][2]), 0, 1, 0); + } + if (m_scalar_type != warpx::fields::FieldType::None) { + m_scalar_vec[lev]->plus(*(a_solver_vec.getScalarVec()[lev]), 0, 1, 0); } } } inline - void operator-= (const WarpXSolverVec& a_vec) + void operator-= (const WarpXSolverVec& a_solver_vec) { + assertIsDefined( a_solver_vec ); + assertSameType( a_solver_vec ); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - m_field_vec[lev][n]->minus(*(a_vec.getVec()[lev][n]), 0, 1, 0); + if (m_array_type != warpx::fields::FieldType::None) { + m_array_vec[lev][0]->minus(*(a_solver_vec.getArrayVec()[lev][0]), 0, 1, 0); + m_array_vec[lev][1]->minus(*(a_solver_vec.getArrayVec()[lev][1]), 0, 1, 0); + m_array_vec[lev][2]->minus(*(a_solver_vec.getArrayVec()[lev][2]), 0, 1, 0); + } + if (m_scalar_type != warpx::fields::FieldType::None) { + m_scalar_vec[lev]->minus(*(a_solver_vec.getScalarVec()[lev]), 0, 1, 0); } } } @@ -151,11 +164,22 @@ public: inline void linComb (const RT a, const WarpXSolverVec& X, const RT b, const WarpXSolverVec& Y) { + assertIsDefined( X ); + assertIsDefined( Y ); + assertSameType( X ); + assertSameType( Y ); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - amrex::MultiFab::LinComb(*m_field_vec[lev][n], a, *X.getVec()[lev][n], 0, - b, *Y.getVec()[lev][n], 0, - 0, 1, 0); + if (m_array_type != warpx::fields::FieldType::None) { + for (int n = 0; n < 3; n++) { + amrex::MultiFab::LinComb(*m_array_vec[lev][n], a, *X.getArrayVec()[lev][n], 0, + b, *Y.getArrayVec()[lev][n], 0, + 0, 1, 0); + } + } + if (m_scalar_type != warpx::fields::FieldType::None) { + amrex::MultiFab::LinComb(*m_scalar_vec[lev], a, *X.getScalarVec()[lev], 0, + b, *Y.getScalarVec()[lev], 0, + 0, 1, 0); } } } @@ -165,9 +189,17 @@ public: */ void increment (const WarpXSolverVec& X, const RT a) { + assertIsDefined( X ); + assertSameType( X ); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - amrex::MultiFab::Saxpy( *m_field_vec[lev][n], a, *X.getVec()[lev][n], + if (m_array_type != warpx::fields::FieldType::None) { + for (int n = 0; n < 3; n++) { + amrex::MultiFab::Saxpy( *m_array_vec[lev][n], a, *X.getArrayVec()[lev][n], + 0, 0, 1, amrex::IntVect::TheZeroVector() ); + } + } + if (m_scalar_type != warpx::fields::FieldType::None) { + amrex::MultiFab::Saxpy( *m_scalar_vec[lev], a, *X.getScalarVec()[lev], 0, 0, 1, amrex::IntVect::TheZeroVector() ); } } @@ -179,9 +211,17 @@ public: inline void scale (RT a_a) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + IsDefined(), + "WarpXSolverVec::scale() called on undefined WarpXSolverVec"); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - m_field_vec[lev][n]->mult(a_a, 0, 1); + if (m_array_type != warpx::fields::FieldType::None) { + m_array_vec[lev][0]->mult(a_a, 0, 1); + m_array_vec[lev][1]->mult(a_a, 0, 1); + m_array_vec[lev][2]->mult(a_a, 0, 1); + } + if (m_scalar_type != warpx::fields::FieldType::None) { + m_scalar_vec[lev]->mult(a_a, 0, 1); } } } @@ -192,41 +232,69 @@ public: inline void setVal ( const RT a_val ) { - AMREX_ASSERT_WITH_MESSAGE( + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( IsDefined(), - "WarpXSolverVec::ones() called on undefined WarpXSolverVec"); + "WarpXSolverVec::setVal() called on undefined WarpXSolverVec"); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - for (int n=0; n<3; n++) { - m_field_vec[lev][n]->setVal(a_val); + if (m_array_type != warpx::fields::FieldType::None) { + m_array_vec[lev][0]->setVal(a_val); + m_array_vec[lev][1]->setVal(a_val); + m_array_vec[lev][2]->setVal(a_val); + } + if (m_scalar_type != warpx::fields::FieldType::None) { + m_scalar_vec[lev]->setVal(a_val); } } } + inline + void assertIsDefined( const WarpXSolverVec& a_solver_vec ) const + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_solver_vec.IsDefined(), + "WarpXSolverVec::function(X) called with undefined WarpXSolverVec X"); + } + + inline + void assertSameType( const WarpXSolverVec& a_solver_vec ) const + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_solver_vec.getArrayVecType()==m_array_type && + a_solver_vec.getScalarVecType()==m_scalar_type, + "WarpXSolverVec::function(X) called with WarpXSolverVec X of different type"); + } + [[nodiscard]] inline RT norm2 () const { auto const norm = dotProduct(*this); return std::sqrt(norm); } - [[nodiscard]] const amrex::Vector, 3 > >& getVec() const {return m_field_vec;} - amrex::Vector, 3 > >& getVec() {return m_field_vec;} + [[nodiscard]] const amrex::Vector,3>>& getArrayVec() const {return m_array_vec;} + amrex::Vector,3>>& getArrayVec() {return m_array_vec;} + + [[nodiscard]] const amrex::Vector>& getScalarVec() const {return m_scalar_vec;} + amrex::Vector>& getScalarVec() {return m_scalar_vec;} - // clearDotMask() must be called by the highest class that owns WarpXSolverVec() - // after it is done being used ( typically in the destructor ) to avoid the - // following error message after the simulation finishes: - // malloc_consolidate(): unaligned fastbin chunk detected - static void clearDotMask() { m_dotMask.clear(); } + // solver vector types are type warpx::fields::FieldType + [[nodiscard]] warpx::fields::FieldType getArrayVecType () const { return m_array_type; } + [[nodiscard]] warpx::fields::FieldType getScalarVecType () const { return m_scalar_type; } private: - bool m_is_defined = false; - amrex::Vector, 3 > > m_field_vec; + bool m_is_defined = false; + + amrex::Vector,3>> m_array_vec; + amrex::Vector> m_scalar_vec; + + warpx::fields::FieldType m_array_type = warpx::fields::FieldType::None; + warpx::fields::FieldType m_scalar_type = warpx::fields::FieldType::None; static constexpr int m_ncomp = 1; static constexpr int m_num_amr_levels = 1; - inline static bool m_dot_mask_defined = false; - inline static amrex::Vector,3>> m_dotMask; + inline static bool m_warpx_ptr_defined = false; + inline static WarpX* m_WarpX = nullptr; }; diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index b181f038fb5..f2a88d82d42 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -5,46 +5,127 @@ * License: BSD-3-Clause-LBNL */ #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" +#include "WarpX.H" -void WarpXSolverVec::SetDotMask( const amrex::Vector& a_Geom ) +using namespace warpx::fields; + +void WarpXSolverVec::Define ( WarpX* a_WarpX, + FieldType a_array_type, + FieldType a_scalar_type ) { - if (m_dot_mask_defined) { return; } + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !IsDefined(), + "WarpXSolverVec::Define() called on already defined WarpXSolverVec"); + + // Define static member pointer to WarpX + if (!m_warpx_ptr_defined) { + m_WarpX = a_WarpX; + m_warpx_ptr_defined = true; + } + + m_array_type = a_array_type; + m_scalar_type = a_scalar_type; + + m_array_vec.resize(m_num_amr_levels); + m_scalar_vec.resize(m_num_amr_levels); + + // Define the 3D vector field data container + if (m_array_type != FieldType::None) { + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + isFieldArray(m_array_type), + "WarpXSolverVec::Define() called with array_type not an array field"); + + for (int lev = 0; lev < m_num_amr_levels; ++lev) { + using arr_mf_type = std::array; + const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + for (int n = 0; n < 3; n++) { + m_array_vec[lev][n] = std::make_unique( this_array[n]->boxArray(), + this_array[n]->DistributionMap(), + this_array[n]->nComp(), + amrex::IntVect::TheZeroVector() ); + } + } + + } + + // Define the scalar data container + if (m_scalar_type != FieldType::None) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - IsDefined(), - "WarpXSolverVec::SetDotMask() called from undefined instance "); - - m_dotMask.resize(m_num_amr_levels); - for ( int n = 0; n < 3; n++) { - const amrex::BoxArray& grids = m_field_vec[0][n]->boxArray(); - const amrex::MultiFab tmp( grids, m_field_vec[0][n]->DistributionMap(), - 1, 0, amrex::MFInfo().SetAlloc(false) ); - const amrex::Periodicity& period = a_Geom[0].periodicity(); - m_dotMask[0][n] = tmp.OwnerMask(period); + !isFieldArray(m_scalar_type), + "WarpXSolverVec::Define() called with scalar_type not a scalar field "); + + for (int lev = 0; lev < m_num_amr_levels; ++lev) { + const amrex::MultiFab* this_mf = m_WarpX->getFieldPointer(m_scalar_type,lev,0); + m_scalar_vec[lev] = std::make_unique( this_mf->boxArray(), + this_mf->DistributionMap(), + this_mf->nComp(), + amrex::IntVect::TheZeroVector() ); + } + } - m_dot_mask_defined = true; - // If the function below is not called, then the following - // error message occurs after the simulation finishes: - // malloc_consolidate(): unaligned fastbin chunk detected - amrex::ExecOnFinalize(WarpXSolverVec::clearDotMask); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + m_array_type != FieldType::None || + m_scalar_type != FieldType::None, + "WarpXSolverVec cannot be defined with both array and scalar vecs FieldType::None"); + + m_is_defined = true; } -[[nodiscard]] amrex::Real WarpXSolverVec::dotProduct ( const WarpXSolverVec& a_X ) const +void WarpXSolverVec::Copy ( FieldType a_array_type, + FieldType a_scalar_type ) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - m_dot_mask_defined, - "WarpXSolverVec::dotProduct called with m_dotMask not yet defined"); + IsDefined(), + "WarpXSolverVec::Copy() called on undefined WarpXSolverVec"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - a_X.IsDefined(), - "WarpXSolverVec::dotProduct(a_X) called with undefined a_X"); + a_array_type==m_array_type && + a_scalar_type==m_scalar_type, + "WarpXSolverVec::Copy() called with vecs of different types"); + + for (int lev = 0; lev < m_num_amr_levels; ++lev) { + if (m_array_type != FieldType::None) { + using arr_mf_type = std::array; + const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + for (int n = 0; n < 3; ++n) { + amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_array[n], 0, 0, m_ncomp, + amrex::IntVect::TheZeroVector() ); + } + } + if (m_scalar_type != FieldType::None) { + const amrex::MultiFab* this_scalar = m_WarpX->getFieldPointer(m_scalar_type,lev,0); + amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, + amrex::IntVect::TheZeroVector() ); + } + } +} + +[[nodiscard]] amrex::Real WarpXSolverVec::dotProduct ( const WarpXSolverVec& a_X ) const +{ + assertIsDefined( a_X ); + assertSameType( a_X ); + amrex::Real result = 0.0; - const int lev = 0; const bool local = true; - for (int n = 0; n < 3; ++n) { - auto rtmp = amrex::MultiFab::Dot( *m_dotMask[lev][n], - *m_field_vec[lev][n], 0, - *a_X.getVec()[lev][n], 0, 1, 0, local); - result += rtmp; + for (int lev = 0; lev < m_num_amr_levels; ++lev) { + if (m_array_type != FieldType::None) { + for (int n = 0; n < 3; ++n) { + const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_array_type,lev,n); + auto rtmp = amrex::MultiFab::Dot( *dotMask, + *m_array_vec[lev][n], 0, + *a_X.getArrayVec()[lev][n], 0, 1, 0, local); + result += rtmp; + } + } + if (m_scalar_type != FieldType::None) { + const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_scalar_type,lev,0); + auto rtmp = amrex::MultiFab::Dot( *dotMask, + *m_scalar_vec[lev], 0, + *a_X.getScalarVec()[lev], 0, 1, 0, local); + result += rtmp; + } } amrex::ParallelAllReduce::Sum(result, amrex::ParallelContext::CommunicatorSub()); return result; diff --git a/Source/WarpX.H b/Source/WarpX.H index 4ca85aa1db3..4573808461e 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -539,6 +539,20 @@ public: [[nodiscard]] const amrex::Vector,3>>& getMultiLevelField(warpx::fields::FieldType field_type) const; + /** + * \brief + * Get pointer to the amrex::MultiFab containing the dotMask for the specified field + */ + [[nodiscard]] const amrex::iMultiFab* + getFieldDotMaskPointer (warpx::fields::FieldType field_type, int lev, int dir) const; + + /** + * \brief + * Set the dotMask container + */ + void SetDotMask( std::unique_ptr& field_dotMask, + warpx::fields::FieldType field_type, int lev, int dir ) const; + [[nodiscard]] bool DoPML () const {return do_pml;} [[nodiscard]] bool DoFluidSpecies () const {return do_fluid_species;} @@ -1512,6 +1526,13 @@ private: amrex::Vector, 3 > > Efield_avg_fp; amrex::Vector, 3 > > Bfield_avg_fp; + // Masks for computing dot product and global moments of fields when using grids that + // have shared locations across different ranks (e.g., a Yee grid) + mutable amrex::Vector,3 > > Efield_dotMask; + mutable amrex::Vector,3 > > Bfield_dotMask; + mutable amrex::Vector,3 > > Afield_dotMask; + mutable amrex::Vector< std::unique_ptr > phi_dotMask; + // Memory buffers for computing magnetostatic fields // Vector Potential A and previous step. Time buffer needed for computing dA/dt to first order amrex::Vector, 3 > > vector_potential_fp_nodal; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 220ee667e37..02f85422d12 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -327,6 +327,11 @@ WarpX::WarpX () Efield_fp.resize(nlevs_max); Bfield_fp.resize(nlevs_max); + Efield_dotMask.resize(nlevs_max); + Bfield_dotMask.resize(nlevs_max); + Afield_dotMask.resize(nlevs_max); + phi_dotMask.resize(nlevs_max); + // Only allocate vector potential arrays when using the Magnetostatic Solver if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { @@ -2095,6 +2100,10 @@ WarpX::ClearLevel (int lev) Efield_fp [lev][i].reset(); Bfield_fp [lev][i].reset(); + Efield_dotMask [lev][i].reset(); + Bfield_dotMask [lev][i].reset(); + Afield_dotMask [lev][i].reset(); + current_store[lev][i].reset(); if (do_current_centering) @@ -2141,6 +2150,8 @@ WarpX::ClearLevel (int lev) G_cp [lev].reset(); rho_cp[lev].reset(); + phi_dotMask[lev].reset(); + #ifdef WARPX_USE_FFT if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { spectral_solver_fp[lev].reset(); @@ -3670,3 +3681,42 @@ WarpX::getMultiLevelField(warpx::fields::FieldType field_type) const return Efield_fp; } } + +const amrex::iMultiFab* +WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const +{ + switch(field_type) + { + case FieldType::Efield_fp : + SetDotMask( Efield_dotMask[lev][dir], field_type, lev, dir ); + return Efield_dotMask[lev][dir].get(); + case FieldType::Bfield_fp : + SetDotMask( Bfield_dotMask[lev][dir], field_type, lev, dir ); + return Bfield_dotMask[lev][dir].get(); + case FieldType::vector_potential_fp : + SetDotMask( Afield_dotMask[lev][dir], field_type, lev, dir ); + return Afield_dotMask[lev][dir].get(); + case FieldType::phi_fp : + SetDotMask( phi_dotMask[lev], field_type, lev, 0 ); + return phi_dotMask[lev].get(); + default: + WARPX_ABORT_WITH_MESSAGE("Invalid field type for dotMask"); + return Efield_dotMask[lev][dir].get(); + } +} + +void WarpX::SetDotMask( std::unique_ptr& field_dotMask, + FieldType field_type, int lev, int dir ) const +{ + // Define the dot mask for this field_type needed to properly compute dotProduct() + // for field values that have shared locations on different MPI ranks + if (field_dotMask != nullptr) { return; } + + const amrex::MultiFab* this_field = getFieldPointer(field_type,lev,dir); + const amrex::BoxArray& this_ba = this_field->boxArray(); + const amrex::MultiFab tmp( this_ba, this_field->DistributionMap(), + 1, 0, amrex::MFInfo().SetAlloc(false) ); + const amrex::Periodicity& period = Geom(lev).periodicity(); + field_dotMask = tmp.OwnerMask(period); + +} From 452ae55a698a3b422018b6d81167e866cc850d9e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 02:09:44 +0200 Subject: [PATCH 03/91] [pre-commit.ci] pre-commit autoupdate (#5202) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.2 → v0.6.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.2...v0.6.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cd329b699f1..d35edbedc07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.6.3 hooks: # Run the linter - id: ruff From 2124a2377332c82f77b658673ddd2003a5e48413 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 3 Sep 2024 10:35:06 -0700 Subject: [PATCH 04/91] Set default for m_current_injection_position (#5195) --- Source/Particles/WarpXParticleContainer.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index b44cb33d66b..7e882c151e8 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -354,7 +354,7 @@ public: amrex::Vector m_E_external_particle; //! Current injection position - amrex::Real m_current_injection_position; + amrex::Real m_current_injection_position = 0.; // split along diagonals (0) or axes (1) int split_type = 0; From 8c4f1d47c5322adf1fa31657297a0c3f2aaed3ea Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 3 Sep 2024 11:42:55 -0700 Subject: [PATCH 05/91] Doc: Tioga (LLNL) (#5003) --- Docs/source/install/hpc.rst | 1 + Docs/source/install/hpc/tioga.rst | 210 ++++++++++++++++ .../tioga-llnl/install_mi300a_dependencies.sh | 234 ++++++++++++++++++ .../machines/tioga-llnl/install_mi300a_ml.sh | 42 ++++ Tools/machines/tioga-llnl/tioga_mi300a.sbatch | 44 ++++ .../tioga_mi300a_warpx.profile.example | 69 ++++++ 6 files changed, 600 insertions(+) create mode 100644 Docs/source/install/hpc/tioga.rst create mode 100644 Tools/machines/tioga-llnl/install_mi300a_dependencies.sh create mode 100644 Tools/machines/tioga-llnl/install_mi300a_ml.sh create mode 100644 Tools/machines/tioga-llnl/tioga_mi300a.sbatch create mode 100644 Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index f2ab3947094..af4c0fe3e61 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -52,6 +52,7 @@ This section documents quick-start guides for a selection of supercomputers that hpc/quartz hpc/summit hpc/taurus + hpc/tioga .. tip:: diff --git a/Docs/source/install/hpc/tioga.rst b/Docs/source/install/hpc/tioga.rst new file mode 100644 index 00000000000..2599d6d8ac4 --- /dev/null +++ b/Docs/source/install/hpc/tioga.rst @@ -0,0 +1,210 @@ +.. _building-tioga: + +Tioga (LLNL) +============ + +The `Tioga AMD GPU cluster `__ is located at LLNL. +It is equipped with two nodes that have each four AMD MI300A APUs. +Tioga is an LLNL El Capitan Early Access System. + +There are also "conventional" MI250X GPUs on Tioga nodes, which we did not yet document. +El Capitan will use MI300A GPUs. + + +Introduction +------------ + +If you are new to this system, **please see the following resources**: + +* `LLNL user account `__ (login required) +* `Tioga user guide `__ +* Batch system: `Flux with Slurm Wrappers `__ +* `Jupyter service `__ (`documentation `__, login required) +* `Production directories `__: + + * ``/p/lustre1/${USER}``: personal directory on the parallel filesystem (also: ``lustre2``) + * Note that the ``$HOME`` directory and the ``/usr/workspace/${USER}`` space are NFS mounted and *not* suitable for production quality data generation. + + +Login +----- + +.. code-block:: bash + + ssh tioga.llnl.gov + +To use the available MI300A nodes (currently two), request one via + +.. code-block:: bash + + salloc -N 1 -p mi300a -t 30:0 + + +.. _building-tioga-preparation: + +Preparation +----------- + +Use the following commands to download the WarpX source code: + +.. code-block:: bash + + git clone https://github.com/ECP-WarpX/WarpX.git /p/lustre1/${USER}/tioga/src/warpx + +We use system software modules, add environment hints and further dependencies via the file ``$HOME/tioga_mi300a_warpx.profile``. +Create it now: + +.. code-block:: bash + + cp /p/lustre1/${USER}/tioga/src/warpx/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example $HOME/tioga_mi300a_warpx.profile + +.. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example + :language: bash + +Edit the 2nd line of this script, which sets the ``export proj=""`` variable. +**Currently, this is unused and can be kept empty.** +Once project allocation becomes required, e.g., if you are member of the project ``abcde``, then run ``vi $HOME/tioga_mi300a_warpx.profile``. +Enter the edit mode by typing ``i`` and edit line 2 to read: + +.. code-block:: bash + + export proj="abcde" + +Exit the ``vi`` editor with ``Esc`` and then type ``:wq`` (write & quit). + +.. important:: + + Now, and as the first step on future logins to Tioga, activate these environment settings: + + .. code-block:: bash + + source $HOME/tioga_mi300a_warpx.profile + +Finally, since Tioga does not yet provide software modules for some of our dependencies, install them once: + + + .. code-block:: bash + + bash /p/lustre1/${USER}/tioga/src/warpx/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh + source /p/lustre1/${USER}/tioga/warpx/mi300a/gpu/venvs/warpx-trioga-mi300a/bin/activate + + .. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/tioga-llnl/install_mi300a_dependencies.sh + :language: bash + + .. dropdown:: AI/ML Dependencies (Optional) + :animate: fade-in-slide-down + + If you plan to run AI/ML workflows depending on PyTorch et al., run the next step as well. + This will take a while and should be skipped if not needed. + + .. code-block:: bash + + bash /p/lustre1/${USER}/tioga/src/warpx/Tools/machines/tioga-llnl/install_mi300a_ml.sh + + .. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/tioga-llnl/install_mi300a_ml.sh + :language: bash + + +.. _building-tioga-compilation: + +Compilation +----------- + +Use the following :ref:`cmake commands ` to compile the application executable: + +.. code-block:: bash + + cd /p/lustre1/${USER}/tioga/src/warpx + + cmake --fresh -S . -B build_tioga -DWarpX_COMPUTE=HIP -DWarpX_FFT=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_tioga -j 24 + +The WarpX application executables are now in ``/p/lustre1/${USER}/tioga/src/warpx/build_tioga/bin/``. +Additionally, the following commands will install WarpX as a Python module: + +.. code-block:: bash + + cmake --fresh -S . -B build_tioga_py -DWarpX_COMPUTE=HIP -DWarpX_FFT=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_tioga_py -j 24 --target pip_install + +Now, you can :ref:`submit tioga compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). +Or, you can use the WarpX executables to submit tioga jobs (:ref:`example inputs `). +For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$PROJWORK/$proj/``. + + +.. _building-tioga-update: + +Update WarpX & Dependencies +--------------------------- + +If you already installed WarpX in the past and want to update it, start by getting the latest source code: + +.. code-block:: bash + + cd /p/lustre1/${USER}/tioga/src/warpx + + # read the output of this command - does it look ok? + git status + + # get the latest WarpX source code + git fetch + git pull + + # read the output of these commands - do they look ok? + git status + git log # press q to exit + +And, if needed, + +- :ref:`update the tioga_mi300a_warpx.profile file `, +- log out and into the system, activate the now updated environment profile as usual, +- :ref:`execute the dependency install scripts `. + +As a last step :ref:`rebuild WarpX `. + + +.. _running-cpp-tioga: + +Running +------- + +.. _running-cpp-tioga-MI300A-APUs: + +MI300A APUs (128GB) +^^^^^^^^^^^^^^^^^^^ + +The batch script below can be used to run a WarpX simulation on 1 node with 4 APUs on the supercomputer Tioga at LLNL. +Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``plasma_mirror_inputs``. +WarpX runs with one MPI rank per GPU. + +Note that we append these non-default runtime options: + +* ``amrex.use_gpu_aware_mpi=1``: make use of fast APU to APU MPI communications +* ``amrex.the_arena_init_size=1``: avoid overallocating memory that is *shared* on APUs between CPU & GPU + +.. literalinclude:: ../../../../Tools/machines/tioga-llnl/tioga_mi300a.sbatch + :language: bash + :caption: You can copy this file from ``Tools/machines/tioga-llnl/tioga_mi300a.sbatch``. + +To run a simulation, copy the lines above to a file ``tioga_mi300a.sbatch`` and run + +.. code-block:: bash + + sbatch tioga_mi300a.sbatch + +to submit the job. diff --git a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh new file mode 100644 index 00000000000..7e002838e4a --- /dev/null +++ b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh @@ -0,0 +1,234 @@ +#!/bin/bash +# +# Copyright 2024 The WarpX Community +# +# This file is part of WarpX. +# +# Author: Axel Huebl +# License: BSD-3-Clause-LBNL + +# Exit on first error encountered ############################################# +# +set -eu -o pipefail + + +# Check: ###################################################################### +# +# Was tioga_mi300a_warpx.profile sourced and configured correctly? +# early access: not yet used! +#if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your tioga_mi300a_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi + + +# Remove old dependencies ##################################################### +# +SRC_DIR="/p/lustre1/${USER}/tioga/src" +SW_DIR="/p/lustre1/${USER}/tioga/warpx/mi300a" +rm -rf ${SW_DIR} +mkdir -p ${SW_DIR} + +# remove common user mistakes in python, located in .local instead of a venv +python3 -m pip uninstall -qq -y pywarpx +python3 -m pip uninstall -qq -y warpx +python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true + + +# General extra dependencies ################################################## +# + +# tmpfs build directory: avoids issues often seen with $HOME and is faster +build_dir=$(mktemp -d) +build_procs=24 + +# C-Blosc2 (I/O compression) +if [ -d ${SRC_DIR}/c-blosc2 ] +then + cd ${SRC_DIR}/c-blosc2 + git fetch --prune + git checkout v2.15.1 + cd - +else + git clone -b v2.15.1 https://github.com/Blosc/c-blosc2.git ${SRC_DIR}/c-blosc2 +fi +cmake \ + --fresh \ + -S ${SRC_DIR}/c-blosc2 \ + -B ${build_dir}/c-blosc2-build \ + -DBUILD_TESTS=OFF \ + -DBUILD_BENCHMARKS=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DBUILD_FUZZERS=OFF \ + -DBUILD_STATIC=OFF \ + -DDEACTIVATE_AVX2=OFF \ + -DDEACTIVATE_AVX512=OFF \ + -DWITH_SANITIZER=OFF \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-2.15.1 +cmake \ + --build ${build_dir}/c-blosc2-build \ + --target install \ + --parallel ${build_procs} +rm -rf ${build_dir}/c-blosc2-build + +# ADIOS2 +if [ -d ${SRC_DIR}/adios2 ] +then + cd ${SRC_DIR}/adios2 + git fetch --prune + git checkout v2.10.1 + cd - +else + git clone -b v2.10.1 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 +fi +cmake \ + --fresh \ + -S ${SRC_DIR}/adios2 \ + -B ${build_dir}/adios2-build \ + -DADIOS2_USE_Blosc2=ON \ + -DADIOS2_USE_Campaign=OFF \ + -DADIOS2_USE_Fortran=OFF \ + -DADIOS2_USE_Python=OFF \ + -DADIOS2_USE_ZeroMQ=OFF \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.1 +cmake \ + --build ${build_dir}/adios2-build \ + --target install \ + --parallel ${build_procs} +rm -rf ${build_dir}/adios2-build + +# BLAS++ (for PSATD+RZ) +if [ -d ${SRC_DIR}/blaspp ] +then + cd ${SRC_DIR}/blaspp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git ${SRC_DIR}/blaspp +fi +cmake \ + --fresh \ + -S ${SRC_DIR}/blaspp \ + -B ${build_dir}/blaspp-tioga-mi300a-build \ + -Duse_openmp=OFF \ + -Dgpu_backend=hip \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 +cmake \ + --build ${build_dir}/blaspp-tioga-mi300a-build \ + --target install \ + --parallel ${build_procs} +rm -rf ${build_dir}/blaspp-tioga-mi300a-build + +# LAPACK++ (for PSATD+RZ) +if [ -d ${SRC_DIR}/lapackpp ] +then + cd ${SRC_DIR}/lapackpp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git ${SRC_DIR}/lapackpp +fi +cmake \ + --fresh \ + -S ${SRC_DIR}/lapackpp \ + -B ${build_dir}/lapackpp-tioga-mi300a-build \ + -DCMAKE_CXX_STANDARD=17 \ + -Dgpu_backend=hip \ + -Dbuild_tests=OFF \ + -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 +cmake \ + --build ${build_dir}/lapackpp-tioga-mi300a-build \ + --target install \ + --parallel ${build_procs} +rm -rf ${build_dir}/lapackpp-tioga-mi300a-build + +# heFFTe +if [ -d ${SRC_DIR}/heffte ] +then + cd ${SRC_DIR}/heffte + git fetch --prune + git checkout v2.4.0 + cd - +else + git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${SRC_DIR}/heffte +fi +cmake \ + --fresh \ + -S ${SRC_DIR}/heffte \ + -B ${build_dir}/heffte-build \ + -DBUILD_SHARED_LIBS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ + -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ + -DHeffte_ENABLE_AVX=OFF \ + -DHeffte_ENABLE_AVX512=OFF \ + -DHeffte_ENABLE_FFTW=OFF \ + -DHeffte_ENABLE_CUDA=OFF \ + -DHeffte_ENABLE_ROCM=ON \ + -DHeffte_ENABLE_ONEAPI=OFF \ + -DHeffte_ENABLE_MKL=OFF \ + -DHeffte_ENABLE_DOXYGEN=OFF \ + -DHeffte_SEQUENTIAL_TESTING=OFF \ + -DHeffte_ENABLE_TESTING=OFF \ + -DHeffte_ENABLE_TRACING=OFF \ + -DHeffte_ENABLE_PYTHON=OFF \ + -DHeffte_ENABLE_FORTRAN=OFF \ + -DHeffte_ENABLE_SWIG=OFF \ + -DHeffte_ENABLE_MAGMA=OFF +cmake \ + --build ${build_dir}/heffte-build \ + --target install \ + --parallel ${build_procs} +rm -rf ${build_dir}/heffte-build + + +# Python ###################################################################### +# +# sometimes, the Lassen PIP Index is down +export PIP_EXTRA_INDEX_URL="https://pypi.org/simple" + +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade virtualenv +# python3 -m pip cache purge || true # Cache disabled on system +rm -rf ${SW_DIR}/venvs/warpx-trioga-mi300a +python3 -m venv ${SW_DIR}/venvs/warpx-trioga-mi300a +source ${SW_DIR}/venvs/warpx-trioga-mi300a/bin/activate +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade build +python3 -m pip install --upgrade packaging +python3 -m pip install --upgrade wheel +python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade cython +python3 -m pip install --upgrade numpy +python3 -m pip install --upgrade pandas +python3 -m pip install --upgrade scipy +python3 -m pip install --upgrade mpi4py --no-cache-dir --no-build-isolation --no-binary mpi4py +python3 -m pip install --upgrade openpmd-api +python3 -m pip install --upgrade openpmd-viewer +python3 -m pip install --upgrade matplotlib +python3 -m pip install --upgrade yt +# install or update WarpX dependencies such as picmistandard +python3 -m pip install --upgrade -r ${SRC_DIR}/warpx/requirements.txt +# cupy for ROCm +# https://docs.cupy.dev/en/stable/install.html#building-cupy-for-rocm-from-source +# https://docs.cupy.dev/en/stable/install.html#using-cupy-on-amd-gpu-experimental +# https://github.com/cupy/cupy/issues/7830 +# https://github.com/cupy/cupy/pull/8457 +# https://github.com/cupy/cupy/pull/8319 +#python3 -m pip install --upgrade "cython<3" +#HIPCC=${CXX} \ +#CXXFLAGS="-I${ROCM_PATH}/include/hipblas -I${ROCM_PATH}/include/hipsparse -I${ROCM_PATH}/include/hipfft -I${ROCM_PATH}/include/rocsolver -I${ROCM_PATH}/include/rccl" \ +#CUPY_INSTALL_USE_HIP=1 \ +#ROCM_HOME=${ROCM_PATH} \ +#HCC_AMDGPU_TARGET=${AMREX_AMD_ARCH} \ +# python3 -m pip install -v cupy +#python3 -m pip install --upgrade "cython>=3" + + +# for ML dependencies, see install_mi300a_ml.sh + +# remove build temporary directory +rm -rf ${build_dir} diff --git a/Tools/machines/tioga-llnl/install_mi300a_ml.sh b/Tools/machines/tioga-llnl/install_mi300a_ml.sh new file mode 100644 index 00000000000..178deed9975 --- /dev/null +++ b/Tools/machines/tioga-llnl/install_mi300a_ml.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Copyright 2024 The WarpX Community +# +# This file is part of WarpX. +# +# Author: Axel Huebl +# License: BSD-3-Clause-LBNL + +# Exit on first error encountered ############################################# +# +set -eu -o pipefail + + +# Check: ###################################################################### +# +# Was tioga_mi300a_warpx.profile sourced and configured correctly? +# early access: not yet used! +#if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your tioga_mi300a_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi + + +# Remove old dependencies ##################################################### +# +SRC_DIR="/p/lustre1/${USER}/tioga/src" +SW_DIR="/p/lustre1/${USER}/tioga/warpx/mi300a" + +# remove common user mistakes in python, located in .local instead of a venv +python3 -m pip uninstall -qqq -y torch 2>/dev/null || true + + +# Python ML ################################################################### +# +# for basic python dependencies, see install_mi300a_dependencies.sh + +# sometimes, the Lassen PIP Index is down +export PIP_EXTRA_INDEX_URL="https://pypi.org/simple" + +source ${SW_DIR}/venvs/warpx-trioga-mi300a/bin/activate + +python3 -m pip install --upgrade torch torchvision --index-url https://download.pytorch.org/whl/rocm6.1 +python3 -m pip install --upgrade scikit-learn +python3 -m pip install --upgrade "optimas[all]" diff --git a/Tools/machines/tioga-llnl/tioga_mi300a.sbatch b/Tools/machines/tioga-llnl/tioga_mi300a.sbatch new file mode 100644 index 00000000000..0e29e24adcb --- /dev/null +++ b/Tools/machines/tioga-llnl/tioga_mi300a.sbatch @@ -0,0 +1,44 @@ +#!/bin/bash -l + +# Copyright 2024 The WarpX Community +# +# This file is part of WarpX. +# +# Authors: Axel Huebl, Joshua David Ludwig +# License: BSD-3-Clause-LBNL + +#SBATCH -t 00:30:00 +#SBATCH -N 1 +#SBATCH -J WarpX +#S BATCH -A # project name not needed yet +#SBATCH -p mi300a +#SBATCH --gpu-bind=none +#SBATCH --ntasks-per-node=4 +#SBATCH --gpus-per-node=4 +#SBATCH -o WarpX.o%j +#SBATCH -e WarpX.e%j + +# executable & inputs file or python interpreter & PICMI script here +EXE=./warpx +INPUTS=inputs + +# pin to closest NIC to GPU +export MPICH_OFI_NIC_POLICY=GPU + +# threads for OpenMP and threaded compressors per MPI rank +# note: 16 avoids hyperthreading (32 virtual cores, 16 physical) +export SRUN_CPUS_PER_TASK=16 +export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} + +# GPU-aware MPI optimizations +GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1" + +# APUs share memory with the host: +# Do NOT pre-allocate a large heap in AMReX +APU_SHARED_MEMORY="amrex.the_arena_init_size=1" + +# MPI parallel processes +srun \ + ${EXE} ${INPUTS} \ + ${GPU_AWARE_MPI} ${APU_SHARED_MEMORY} \ + > output.txt diff --git a/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example new file mode 100644 index 00000000000..e3da37c5522 --- /dev/null +++ b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example @@ -0,0 +1,69 @@ +# please set your project account +export proj="" # change me! + +# remembers the location of this script +export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE) +# early access: not yet used +# if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi + +# required dependencies +module purge +module load PrgEnv-cray-amd/8.5.0 +# module load rocmcc/6.1.2-cce-18.0.0-magic +module load craype-x86-genoa # CPU +module load craype-accel-amd-gfx942 # GPU +module load cmake/3.29.2 +module load cray-mpich +module load cray-libsci +module load rocm/6.1.2 + +# optional: faster builds +# ccache is system provided +module load ninja/1.10.2 + +# optional: for QED support with detailed tables +# TODO: no Boost module found + +# optional: for openPMD and PSATD+RZ support +SW_DIR="/p/lustre1/${USER}/tioga/warpx/mi300a" +module load cray-hdf5-parallel/1.12.2.11 +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-2.15.1:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.1:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/heffte-2.4.0:$CMAKE_PREFIX_PATH + +export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-2.15.1/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.1/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/heffte-2.4.0/lib64:$LD_LIBRARY_PATH + +export PATH=${SW_DIR}/adios2-2.10.1/bin:${PATH} + +# python +module load cray-python/3.11.7 + +if [ -d "${SW_DIR}/venvs/warpx-trioga-mi300a" ] +then + source ${SW_DIR}/venvs/warpx-trioga-mi300a/bin/activate +fi + +# an alias to request an interactive batch node for one hour +# for parallel execution, start on the batch node: srun +alias getNode="salloc -N 1 -p mi300a -t 1:00:00" +# an alias to run a command on a batch node for up to 30min +# usage: runNode +alias runNode="srun -N 1 --ntasks-per-node=4 -t 0:30:00 -p mi300a" + +# GPU-aware MPI +export MPICH_GPU_SUPPORT_ENABLED=1 + +# optimize ROCm/HIP compilation for MI300A +export AMREX_AMD_ARCH=gfx942 + +# compiler environment hints +export CC=$(which cc) +export CXX=$(which CC) +export FC=$(which ftn) +export HIPCXX=${CXX} From 80bef35bf142098c960f8e8bcd6248f4d78492ee Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 3 Sep 2024 13:33:21 -0700 Subject: [PATCH 06/91] Set t_old (#5196) --- Source/Evolve/WarpXEvolve.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 32f7a493916..5a2dbdf2f30 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -206,6 +206,7 @@ WarpX::Evolve (int numsteps) // sync up time for (int i = 0; i <= max_level; ++i) { + t_old[i] = t_new[i]; t_new[i] = cur_time; } multi_diags->FilterComputePackFlush( step, false, true ); From ce7f5cc28cdfa0710f4199912caef4c4d9d2f161 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 3 Sep 2024 19:36:36 -0700 Subject: [PATCH 07/91] Add recent papers in documentation (#5207) * Add recent papers in documentation * Add latest numerical paper * Update Docs/source/acknowledge_us.rst Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/acknowledge_us.rst | 5 +++++ Docs/source/highlights.rst | 31 +++++++++++++++++++++++++++---- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/Docs/source/acknowledge_us.rst b/Docs/source/acknowledge_us.rst index b01ebf00a39..8c9b8dcf15c 100644 --- a/Docs/source/acknowledge_us.rst +++ b/Docs/source/acknowledge_us.rst @@ -53,6 +53,11 @@ Prior WarpX references If your project uses a specific algorithm or component, please consider citing the respective publications in addition. +- Shapoval O, Zoni E, Lehe R, Thévenet M, and Vay J-L. + **Pseudospectral particle-in-cell formulation with arbitrary charge and current-density time dependencies for the modeling of relativistic plasmas**. + Physical Review E **110**, 025206, 2024. + `DOI:10.1103/PhysRevE.110.025206 `__ + - Sandberg R T, Lehe R, Mitchell C E, Garten M, Myers A, Qiang J, Vay J-L and Huebl A. **Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines**. Proc. of Platform for Advanced Scientific Computing (PASC'24), *PASC24 Best Paper Award*, 2024. diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 108f685a551..66570644bdc 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -85,6 +85,11 @@ Laser-Plasma Interaction Scientific works in laser-ion acceleration and laser-matter interaction. +#. Garten M, Bulanov S S, Hakimi S, Obst-Huebl L, Mitchell C E, Schroeder C B, Esarey E, Geddes C G R, Vay J-L, Huebl A. + **Laser-plasma ion beam booster based on hollow-channel magnetic vortex acceleration**. + Physical Review Research **6**, 033148, 2024. + `DOI:10.1103/PhysRevResearch.6.033148 `__ + #. Knight B, Gautam C, Stoner C, Egner B, Smith J, Orban C, Manfredi J, Frische K, Dexter M, Chowdhury E, Patnaik A (2023). **Detailed Characterization of a kHz-rate Laser-Driven Fusion at a Thin Liquid Sheet with a Neutron Detection Suite**. High Power Laser Science and Engineering, 1-13, 2023. @@ -141,8 +146,8 @@ Scientific works in astrophysical plasma modeling. #. Klion H, Jambunathan R, Rowan ME, Yang E, Willcox D, Vay J-L, Lehe R, Myers A, Huebl A, Zhang W. **Particle-in-Cell simulations of relativistic magnetic reconnection with advanced Maxwell solver algorithms**. - arXiv pre-print, 2023. - `DOI:10.48550/arXiv.2304.10566 `__ + The Astrophysical Journal **952** 8, 2023. + `DOI:10.3847/1538-4357/acd75b `__ Microelectronics @@ -173,8 +178,13 @@ Scientific works in High-Performance Computing, applied mathematics and numerics Please see :ref:`this section `. -Nuclear Fusion - Magnetically Confined Plasmas -********************************************** +Nuclear Fusion and Plasma Confinement +************************************* + +#. Affolter M., Thompson R., Hepner S., Hayes E. C., Podolsky V., Borghei M., Carlsson J., Gargone A., Merthe D., McKee E., Langtry R., + **The Orbitron: A crossed-field device for co-confinement of high energy ions and electrons**. + AIP Advances **14**, 085025, 2024. + `DOI:10.1063/5.0201470 `__ #. Nicks B. S., Putvinski S. and Tajima T. **Stabilization of the Alfvén-ion cyclotron instability through short plasmas: Fully kinetic simulations in a high-beta regime**. @@ -185,3 +195,16 @@ Nuclear Fusion - Magnetically Confined Plasmas **Accelerated kinetic model for global macro stability studies of high-beta fusion reactors**. Physics of Plasmas **30**, 122508, 2023. `DOI:10.1063/5.0178288 `__ + +Plasma Thrusters and Electric Propulsion +**************************************** + +#. Xie L., Luo X., Zhou Z. and Zhao Y., + **Effect of plasma initialization on 3D PIC simulation of Hall thruster azimuthal instability**. + Physica Scripta, **99**, 095602, 2024. + `DOI:10.1088/1402-4896/ad69e5 `__ + +#. Marks T. A. and Gorodetsky A. A., + **Hall thruster simulations in WarpX**. + 38th International Electric Propulsion Conference, Toulouse, France, 2024. + `DOI:10.7302/234915 `__ From 0eb948a16cba961110c352932487969475c52800 Mon Sep 17 00:00:00 2001 From: Brian Naranjo Date: Wed, 4 Sep 2024 11:33:30 -0700 Subject: [PATCH 08/91] Fix directional comparisons in assertions (#5201) --- Source/Particles/LaserParticleContainer.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index bd266ab368a..e9509a1ef40 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -181,10 +181,11 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, if (WarpX::gamma_boost > 1.) { // Check that the laser direction is equal to the boost direction - AMREX_ALWAYS_ASSERT_WITH_MESSAGE( m_nvec[0]*WarpX::boost_direction[0] - + m_nvec[1]*WarpX::boost_direction[1] - + m_nvec[2]*WarpX::boost_direction[2] - 1. < 1.e-12, - "The Lorentz boost should be in the same direction as the laser propagation"); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + (m_nvec[0]-WarpX::boost_direction[0])*(m_nvec[0]-WarpX::boost_direction[0]) + + (m_nvec[1]-WarpX::boost_direction[1])*(m_nvec[1]-WarpX::boost_direction[1]) + + (m_nvec[2]-WarpX::boost_direction[2])*(m_nvec[2]-WarpX::boost_direction[2]) < 1.e-12, + "The Lorentz boost should be in the same direction as the laser propagation"); // Get the position of the plane, along the boost direction, in the lab frame // and convert the position of the antenna to the boosted frame m_Z0_lab = m_nvec[0]*m_position[0] + m_nvec[1]*m_position[1] + m_nvec[2]*m_position[2]; @@ -246,8 +247,10 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, windir[dir] = 1.0; #endif AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - (m_nvec[0]-windir[0]) + (m_nvec[1]-windir[1]) + (m_nvec[2]-windir[2]) - < 1.e-12, "do_continous_injection for laser particle only works" + + (m_nvec[0]-windir[0])*(m_nvec[0]-windir[0]) + + (m_nvec[1]-windir[1])*(m_nvec[1]-windir[1]) + + (m_nvec[2]-windir[2])*(m_nvec[2]-windir[2]) < 1.e-12, + "do_continous_injection for laser particle only works" + " if moving window direction and laser propagation direction are the same"); if ( WarpX::gamma_boost>1 ){ AMREX_ALWAYS_ASSERT_WITH_MESSAGE( From e099a99e29e1c4da36099c410f65b1c11f5bdeaa Mon Sep 17 00:00:00 2001 From: Justin Ray Angus Date: Wed, 4 Sep 2024 11:34:37 -0700 Subject: [PATCH 09/91] removing unnessary limit check. (#5188) --- .../Coulomb/UpdateMomentumPerezElastic.H | 39 ++++++------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/Source/Particles/Collision/BinaryCollision/Coulomb/UpdateMomentumPerezElastic.H b/Source/Particles/Collision/BinaryCollision/Coulomb/UpdateMomentumPerezElastic.H index 51bac0c0820..f6403071aec 100644 --- a/Source/Particles/Collision/BinaryCollision/Coulomb/UpdateMomentumPerezElastic.H +++ b/Source/Particles/Collision/BinaryCollision/Coulomb/UpdateMomentumPerezElastic.H @@ -239,33 +239,18 @@ void UpdateMomentumPerezElastic ( T_PR const p2fsz = -p1fsz; // Transform from COM to lab frame - T_PR p1fx; T_PR p2fx; - T_PR p1fy; T_PR p2fy; - T_PR p1fz; T_PR p2fz; - if ( vcms > std::numeric_limits::min() ) - { - T_PR const vcDp1fs = vcx*p1fsx + vcy*p1fsy + vcz*p1fsz; - T_PR const vcDp2fs = vcx*p2fsx + vcy*p2fsy + vcz*p2fsz; - /* factor = (gc-1.0)/vcms; Rewrite to avoid subtraction losing precision when gc is close to 1 */ - T_PR const factor = gc*gc*inv_c2/(gc+T_PR(1.0)); - T_PR const factor1 = factor*vcDp1fs + m1*g1s*gc; - T_PR const factor2 = factor*vcDp2fs + m2*g2s*gc; - p1fx = p1fsx + vcx * factor1; - p1fy = p1fsy + vcy * factor1; - p1fz = p1fsz + vcz * factor1; - p2fx = p2fsx + vcx * factor2; - p2fy = p2fsy + vcy * factor2; - p2fz = p2fsz + vcz * factor2; - } - else // If vcms = 0, don't do Lorentz-transform. - { - p1fx = p1fsx; - p1fy = p1fsy; - p1fz = p1fsz; - p2fx = p2fsx; - p2fy = p2fsy; - p2fz = p2fsz; - } + T_PR const vcDp1fs = vcx*p1fsx + vcy*p1fsy + vcz*p1fsz; + T_PR const vcDp2fs = vcx*p2fsx + vcy*p2fsy + vcz*p2fsz; + /* factor = (gc-1.0)/vcms; Rewrite to avoid subtraction losing precision when gc is close to 1 */ + T_PR const factor = gc*gc*inv_c2/(gc+T_PR(1.0)); + T_PR const factor1 = factor*vcDp1fs + m1*g1s*gc; + T_PR const factor2 = factor*vcDp2fs + m2*g2s*gc; + T_PR const p1fx = p1fsx + vcx * factor1; + T_PR const p1fy = p1fsy + vcy * factor1; + T_PR const p1fz = p1fsz + vcz * factor1; + T_PR const p2fx = p2fsx + vcx * factor2; + T_PR const p2fy = p2fsy + vcy * factor2; + T_PR const p2fz = p2fsz + vcz * factor2; // Rejection method r = amrex::Random(engine); From a4fbb137583372d7d632d5317845f7a15fdc162d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 4 Sep 2024 15:22:08 -0700 Subject: [PATCH 10/91] Doc: Conda-Forge w/ heFFTe (#4989) Add heFFTe to the developer conda-forge environment that uses MPI. --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index fadd38699e2..34150e42b5a 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -81,7 +81,7 @@ Conda (Linux/macOS/Windows) .. code-block:: bash - conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer python make numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv + conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git "heffte=*=mpi_mpich*" lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer python make numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv conda activate warpx-cpu-mpich-dev # compile WarpX with -DWarpX_MPI=ON From 6834a0024610caf39021ebcc877b0754f2de3feb Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 4 Sep 2024 17:04:27 -0700 Subject: [PATCH 11/91] CMake: Replace `FetchContent_Populate` (#5179) * CMake: Replace `FetchContent_Populate` In CMake superbuilds, `FetchContent_Populate` is now deprecated. Use `FetchContent_MakeAvailable` instead. * CI: Catalyst Image Update (CMake) Update the Catalyst CI image to `kitware/paraview:ci-catalyst-amrex-warpx-20240828` to pull in a newer CMake version with it. * CI: SENSEI Hack CMake Update Hack into container image. * Doc: CMake 3.24+ - dependencies/requirements - system docs * HPC3: CMake via `pipx` for now Ticket still pending completion. --- .github/workflows/insitu.yml | 8 ++++++-- CMakeLists.txt | 9 +-------- Docs/source/install/dependencies.rst | 2 +- .../frontier_warpx.profile.example | 3 ++- .../hpc3-uci/hpc3_gpu_warpx.profile.example | 2 +- .../hpc3-uci/install_gpu_dependencies.sh | 2 ++ .../lassen_v100_warpx_toss3.profile.example | 2 +- .../ookami-sbu/ookami_warpx.profile.example | 2 +- .../polaris_gpu_warpx.profile.example | 18 +++++++++++------- .../taurus-zih/taurus_warpx.profile.example | 2 +- cmake/dependencies/AMReX.cmake | 17 ++++++----------- cmake/dependencies/PICSAR.cmake | 14 ++++---------- cmake/dependencies/openPMD.cmake | 7 +------ cmake/dependencies/pyAMReX.cmake | 7 +------ cmake/dependencies/pybind11.cmake | 7 +------ pyproject.toml | 2 +- setup.py | 6 +++--- 17 files changed, 44 insertions(+), 66 deletions(-) diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index c36900cbb7d..d6184e64d28 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -22,6 +22,8 @@ jobs: image: senseiinsitu/ci:fedora35-amrex-20220613 steps: - uses: actions/checkout@v4 + - name: Setup cmake + uses: jwlawson/actions-setup-cmake@v2 - name: Configure run: | cmake -S . -B build \ @@ -73,7 +75,7 @@ jobs: catalyst: name: Catalyst - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false env: CXX: g++ @@ -83,8 +85,10 @@ jobs: CATALYST_IMPLEMENTATION_PATHS: /opt/paraview/lib/catalyst OMP_NUM_THREADS: 1 + # Container build scripts: + # https://gitlab.kitware.com/christos.tsolakis/catalyst-amrex-docker-images container: - image: kitware/paraview:ci-catalyst-amrex-warpx-20240701 + image: kitware/paraview:ci-catalyst-amrex-warpx-20240828 steps: - uses: actions/checkout@v4 - name: Configure diff --git a/CMakeLists.txt b/CMakeLists.txt index d94b684f3a4..9bbfba70c27 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ # Preamble #################################################################### # -cmake_minimum_required(VERSION 3.20.0) +cmake_minimum_required(VERSION 3.24.0) project(WarpX VERSION 24.08) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) @@ -29,13 +29,6 @@ if(POLICY CMP0104) cmake_policy(SET CMP0104 OLD) endif() -# We use simple syntax in cmake_dependent_option, so we are compatible with the -# extended syntax in CMake 3.22+ -# https://cmake.org/cmake/help/v3.22/policy/CMP0127.html -if(POLICY CMP0127) - cmake_policy(SET CMP0127 NEW) -endif() - # C++ Standard in Superbuilds ################################################# # diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 34150e42b5a..f46dc3d1640 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -7,7 +7,7 @@ WarpX depends on the following popular third party software. Please see installation instructions below. - a mature `C++17 `__ compiler, e.g., GCC 8.4+, Clang 7, NVCC 11.0, MSVC 19.15 or newer -- `CMake 3.20.0+ `__ +- `CMake 3.24.0+ `__ - `Git 2.18+ `__ - `AMReX `__: we automatically download and compile a copy of AMReX - `PICSAR `__: we automatically download and compile a copy of PICSAR diff --git a/Tools/machines/frontier-olcf/frontier_warpx.profile.example b/Tools/machines/frontier-olcf/frontier_warpx.profile.example index f59f2d3d058..5ca6e1e1622 100644 --- a/Tools/machines/frontier-olcf/frontier_warpx.profile.example +++ b/Tools/machines/frontier-olcf/frontier_warpx.profile.example @@ -6,7 +6,8 @@ export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies -module load cmake/3.23.2 +module switch Core Core/24.07 +module load cmake/3.27.9 module load craype-accel-amd-gfx90a module load rocm/5.7.1 module load cray-mpich/8.1.28 diff --git a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example index 017613f9d60..27b6a59592e 100644 --- a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example +++ b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example @@ -6,7 +6,7 @@ export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies -module load cmake/3.22.1 +module load cmake/3.22.1 # we need 3.24+ - installing via pipx until module is available module load gcc/11.2.0 module load cuda/11.7.1 module load openmpi/4.1.2/gcc.11.2.0 diff --git a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh index b585c2702b6..56f2bff4025 100755 --- a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh +++ b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh @@ -118,6 +118,8 @@ python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade pipx +python3 -m pipx install --upgrade cmake python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example b/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example index 98e8d6410b3..99e61a2fbf6 100644 --- a/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example +++ b/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example @@ -2,7 +2,7 @@ #export proj="" # edit this and comment in # required dependencies -module load cmake/3.23.1 +module load cmake/3.29.2 module load gcc/11.2.1 module load cuda/12.0.0 diff --git a/Tools/machines/ookami-sbu/ookami_warpx.profile.example b/Tools/machines/ookami-sbu/ookami_warpx.profile.example index 321e3ce1d59..dc8c7ac6639 100644 --- a/Tools/machines/ookami-sbu/ookami_warpx.profile.example +++ b/Tools/machines/ookami-sbu/ookami_warpx.profile.example @@ -2,7 +2,7 @@ #export proj= # required dependencies -module load cmake/3.19.0 +module load cmake/3.19.0 # please check for a 3.24+ module and report back module load gcc/10.3.0 module load openmpi/gcc10/4.1.0 diff --git a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example index 333434c1b97..d5cb1ec7a07 100644 --- a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example +++ b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example @@ -3,20 +3,24 @@ export proj="" # change me! # swap to GNU programming environment (with gcc 11.2) module swap PrgEnv-nvhpc PrgEnv-gnu -module swap gcc/12.2.0 gcc/11.2.0 -module load nvhpc-mixed/22.11 +module load gcc-native/12.3 +module load nvhpc-mixed/23.9 # swap to the Milan cray package -module swap craype-x86-rome craype-x86-milan +module load craype-x86-milan + +# extra modules +module use /soft/modulefiles +module load spack-pe-gnu # required dependencies -module load cmake/3.23.2 +module load cmake/3.27.7 # optional: for QED support with detailed tables -# module load boost/1.81.0 +module load boost # optional: for openPMD and PSATD+RZ support -module load cray-hdf5-parallel/1.12.2.3 +module load cray-hdf5-parallel/1.12.2.9 export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH @@ -30,7 +34,7 @@ export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/lapackpp-2024.05.31/lib64:$L export PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3/bin:${PATH} # optional: for Python bindings or libEnsemble -module load cray-python/3.9.13.1 +module load python/3.10.9 if [ -d "/home/${USER}/sw/polaris/gpu/venvs/warpx" ] then diff --git a/Tools/machines/taurus-zih/taurus_warpx.profile.example b/Tools/machines/taurus-zih/taurus_warpx.profile.example index f564f696c4a..434d773067b 100644 --- a/Tools/machines/taurus-zih/taurus_warpx.profile.example +++ b/Tools/machines/taurus-zih/taurus_warpx.profile.example @@ -5,7 +5,7 @@ module load modenv/hiera module load foss/2021b module load CUDA/11.8.0 -module load CMake/3.22.1 +module load CMake/3.27.6 # optional: for QED support with detailed tables #module load Boost # TODO diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 605ca2d3fa6..91d3542008b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -145,22 +145,17 @@ macro(find_amrex) endif() add_subdirectory(${WarpX_amrex_src} _deps/localamrex-build/) else() + if(WarpX_COMPUTE STREQUAL CUDA) + enable_language(CUDA) + # AMReX 21.06+ supports CUDA_ARCHITECTURES + endif() FetchContent_Declare(fetchedamrex GIT_REPOSITORY ${WarpX_amrex_repo} GIT_TAG ${WarpX_amrex_branch} BUILD_IN_SOURCE 0 ) - FetchContent_GetProperties(fetchedamrex) - - if(NOT fetchedamrex_POPULATED) - FetchContent_Populate(fetchedamrex) - list(APPEND CMAKE_MODULE_PATH "${fetchedamrex_SOURCE_DIR}/Tools/CMake") - if(WarpX_COMPUTE STREQUAL CUDA) - enable_language(CUDA) - # AMReX 21.06+ supports CUDA_ARCHITECTURES - endif() - add_subdirectory(${fetchedamrex_SOURCE_DIR} ${fetchedamrex_BINARY_DIR}) - endif() + FetchContent_MakeAvailable(fetchedamrex) + list(APPEND CMAKE_MODULE_PATH "${fetchedamrex_SOURCE_DIR}/Tools/CMake") # advanced fetch options mark_as_advanced(FETCHCONTENT_BASE_DIR) diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 6f2fb4f0137..ca06cf42315 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -53,19 +53,13 @@ function(find_picsar) get_source_version(PXRMP_QED ${WarpX_picsar_src}) else() FetchContent_Declare(fetchedpicsar - GIT_REPOSITORY ${WarpX_picsar_repo} - GIT_TAG ${WarpX_picsar_branch} + GIT_REPOSITORY ${WarpX_picsar_repo} + GIT_TAG ${WarpX_picsar_branch} BUILD_IN_SOURCE 0 + SOURCE_SUBDIR multi_physics/QED ) - FetchContent_GetProperties(fetchedpicsar) + FetchContent_MakeAvailable(fetchedpicsar) - if(NOT fetchedpicsar_POPULATED) - FetchContent_Populate(fetchedpicsar) - add_subdirectory( - ${fetchedpicsar_SOURCE_DIR}/multi_physics/QED - ${fetchedpicsar_BINARY_DIR} - ) - endif() get_source_version(PXRMP_QED ${fetchedpicsar_SOURCE_DIR}) if(NOT PXRMP_QED_GIT_VERSION) set(PXRMP_QED_GIT_VERSION "${WarpX_picsar_branch}" CACHE INTERNAL "") diff --git a/cmake/dependencies/openPMD.cmake b/cmake/dependencies/openPMD.cmake index f58d37ee92e..ce6ec4d0967 100644 --- a/cmake/dependencies/openPMD.cmake +++ b/cmake/dependencies/openPMD.cmake @@ -32,12 +32,7 @@ function(find_openpmd) GIT_TAG ${WarpX_openpmd_branch} BUILD_IN_SOURCE 0 ) - FetchContent_GetProperties(fetchedopenpmd) - - if(NOT fetchedopenpmd_POPULATED) - FetchContent_Populate(fetchedopenpmd) - add_subdirectory(${fetchedopenpmd_SOURCE_DIR} ${fetchedopenpmd_BINARY_DIR}) - endif() + FetchContent_MakeAvailable(fetchedopenpmd) # advanced fetch options mark_as_advanced(FETCHCONTENT_BASE_DIR) diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 793c7cfe598..a803e47eb2f 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -47,12 +47,7 @@ function(find_pyamrex) GIT_TAG ${WarpX_pyamrex_branch} BUILD_IN_SOURCE 0 ) - FetchContent_GetProperties(fetchedpyamrex) - - if(NOT fetchedpyamrex_POPULATED) - FetchContent_Populate(fetchedpyamrex) - add_subdirectory(${fetchedpyamrex_SOURCE_DIR} ${fetchedpyamrex_BINARY_DIR}) - endif() + FetchContent_MakeAvailable(fetchedpyamrex) # advanced fetch options mark_as_advanced(FETCHCONTENT_BASE_DIR) diff --git a/cmake/dependencies/pybind11.cmake b/cmake/dependencies/pybind11.cmake index 0a7ec260493..94d38e69112 100644 --- a/cmake/dependencies/pybind11.cmake +++ b/cmake/dependencies/pybind11.cmake @@ -21,12 +21,7 @@ function(find_pybind11) GIT_TAG ${WarpX_pybind11_branch} BUILD_IN_SOURCE 0 ) - FetchContent_GetProperties(fetchedpybind11) - - if(NOT fetchedpybind11_POPULATED) - FetchContent_Populate(fetchedpybind11) - add_subdirectory(${fetchedpybind11_SOURCE_DIR} ${fetchedpybind11_BINARY_DIR}) - endif() + FetchContent_MakeAvailable(fetchedpybind11) # advanced fetch options mark_as_advanced(FETCHCONTENT_BASE_DIR) diff --git a/pyproject.toml b/pyproject.toml index 9d5e78a6cc4..6210388f6e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "setuptools>=42", "wheel", - "cmake>=3.20.0,<4.0.0", + "cmake>=3.24.0,<4.0.0", "packaging>=23", ] build-backend = "setuptools.build_meta" diff --git a/setup.py b/setup.py index acf61165e98..713fb788319 100644 --- a/setup.py +++ b/setup.py @@ -63,14 +63,14 @@ def run(self): out = subprocess.check_output(["cmake", "--version"]) except OSError: raise RuntimeError( - "CMake 3.20.0+ must be installed to build the following " + "CMake 3.24.0+ must be installed to build the following " + "extensions: " + ", ".join(e.name for e in self.extensions) ) cmake_version = parse(re.search(r"version\s*([\d.]+)", out.decode()).group(1)) - if cmake_version < parse("3.20.0"): - raise RuntimeError("CMake >= 3.20.0 is required") + if cmake_version < parse("3.24.0"): + raise RuntimeError("CMake >= 3.24.0 is required") for ext in self.extensions: self.build_extension(ext) From c9d79ccf2de64b00f6be02432ad33bae641c50a8 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 4 Sep 2024 19:32:22 -0500 Subject: [PATCH 12/91] Simplify cache cleanup actions (#5215) In our old approach, a workflow file contains a job that uploads the PR number as an artifact. While the PR is still open, the workflow_run triggered by it will download the artifact and use the information to clean up all except the last used cache associated with that original workflow. When a PR is merged or closed, there will be a post-pr workflow that uploads the PR number as an artifact and triggers a workflow_run that clean up all caches associated with the PR. The reason we did it this way was in the cache cleanup workflows, we did not find an easy way to get the number of the PR triggering them. This is not convenient because we have to add jobs uploading artifacts to workflow files. After some experiments, we have found a reliable way to find the PR number without using artifacts. The workflow_run's payload always contains the head SHA of the commit that triggers it, whether the PR comes from a fork or not. We can then use `gh pr list` to search for that head and obtain the PR number. --- .github/workflows/clang_sanitizers.yml | 15 --------------- .github/workflows/clang_tidy.yml | 15 --------------- .github/workflows/cleanup-cache-postpr.yml | 7 +++++-- .github/workflows/cleanup-cache.yml | 8 ++++++-- .github/workflows/codeql.yml | 15 --------------- .github/workflows/cuda.yml | 15 --------------- .github/workflows/hip.yml | 15 --------------- .github/workflows/intel.yml | 15 --------------- .github/workflows/macos.yml | 15 --------------- .github/workflows/post-pr.yml | 18 +++++++----------- .github/workflows/ubuntu.yml | 15 --------------- 11 files changed, 18 insertions(+), 135 deletions(-) diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index ef005cbbc72..8efcdc9a431 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -160,18 +160,3 @@ jobs: ulimit -c unlimited mpirun -n 2 ../../../build_EB/bin/warpx.2d inputs_2d warpx.serialize_initial_conditions = 0 - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 96c7337a3f2..5a4f83f01f1 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -60,18 +60,3 @@ jobs: ccache -s du -hs ~/.cache/ccache - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/cleanup-cache-postpr.yml b/.github/workflows/cleanup-cache-postpr.yml index 9a2ffb0f61a..5e9a70cd5a4 100644 --- a/.github/workflows/cleanup-cache-postpr.yml +++ b/.github/workflows/cleanup-cache-postpr.yml @@ -23,8 +23,11 @@ jobs: REPO=${{ github.repository }} - gh run download ${{ github.event.workflow_run.id }} -n pr_number - pr_number=`cat pr_number.txt` + # For debugging cat ${GITHUB_EVENT_PATH} to see the payload. + + pr_head_sha=${{ github.event.workflow_run.head_sha }} + pr_number=$(gh pr list --state all --search $pr_head_sha --json number --jq '.[0].number') + echo "Post-PR cache cleanup for PR ${pr_number}" BRANCH=refs/pull/${pr_number}/merge # Setting this to not fail the workflow while deleting cache keys. diff --git a/.github/workflows/cleanup-cache.yml b/.github/workflows/cleanup-cache.yml index bd1a518acf4..3abe232b879 100644 --- a/.github/workflows/cleanup-cache.yml +++ b/.github/workflows/cleanup-cache.yml @@ -29,9 +29,12 @@ jobs: # Triggering workflow run name (e.g., LinuxClang) WORKFLOW_NAME="${{ github.event.workflow_run.name }}" + # For debugging, cat ${GITHUB_EVENT_PATH} to see the payload. + if [[ $EVENT == "pull_request" ]]; then - gh run download ${{ github.event.workflow_run.id }} -n pr_number - pr_number=`cat pr_number.txt` + pr_head_sha=${{ github.event.workflow_run.head_sha }} + pr_number=$(gh pr list --search $pr_head_sha --json number --jq '.[0].number') + echo "Clean up cache for PR ${pr_number}" BRANCH=refs/pull/${pr_number}/merge else BRANCH=refs/heads/${{ github.event.workflow_run.head_branch }} @@ -54,6 +57,7 @@ jobs: IFS=$'\n' for j in $cached_jobs do + # Delete all entries except the last used one old_keys=$(gh actions-cache list -L 100 -R $REPO -B $BRANCH --key "${j}-git-" --sort last-used | cut -f 1 | tail -n +2) for k in $old_keys do diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index bc0bee545cc..5c36b9d9f21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -110,18 +110,3 @@ jobs: uses: github/codeql-action/upload-sarif@v3 with: sarif_file: sarif-results/${{ matrix.language }}.sarif - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 9554e1faa3a..010ce8090ac 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -203,18 +203,3 @@ jobs: ccache -s du -hs ~/.cache/ccache - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index 8b89e3dc4d0..ba537e776d4 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -127,18 +127,3 @@ jobs: ccache -s du -hs ~/.cache/ccache - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 8a7867fcbc8..485a5229c6a 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -194,18 +194,3 @@ jobs: # python3 -m pip install --upgrade build packaging setuptools wheel # PYWARPX_LIB_DIR=$PWD/build_sp/lib/site-packages/pywarpx/ python3 -m pip wheel . # python3 -m pip install *.whl - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index dfd32f459f0..124d26fa7f7 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -85,18 +85,3 @@ jobs: export OMP_NUM_THREADS=1 mpirun -n 2 Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 diff --git a/.github/workflows/post-pr.yml b/.github/workflows/post-pr.yml index 2768ef376cc..5f0b1534970 100644 --- a/.github/workflows/post-pr.yml +++ b/.github/workflows/post-pr.yml @@ -4,17 +4,13 @@ on: types: - closed +# This workflow does not have the permission to clean up cache for PRs +# originated from a fork. The purpose here is to trigger a workflow_run +# cleanup-cache-postpr.yml that has the right permission. + jobs: - cleanup: + noop: runs-on: ubuntu-latest steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 + - name: No OP + run: echo "This workflow is going to trigger CleanUpCachePostPR." diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 05e53883534..bf6652e9c69 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -223,18 +223,3 @@ jobs: run: | export OMP_NUM_THREADS=1 mpirun -n 2 Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py - - save_pr_number: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Save PR number - env: - PR_NUMBER: ${{ github.event.number }} - run: | - echo $PR_NUMBER > pr_number.txt - - uses: actions/upload-artifact@v4 - with: - name: pr_number - path: pr_number.txt - retention-days: 1 From 418b71ad922e6b85415169f93ecf4294bc030640 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 4 Sep 2024 23:27:18 -0500 Subject: [PATCH 13/91] Use amrex::ParmParse::prettyPrintTable (#5216) Instead of amrex::ParmParse::dumpTable, we use prettyPrintTable that removes duplicates. --- Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp | 2 +- Source/ablastr/utils/UsedInputsFile.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 94b9901b06e..f6c73d9fa7e 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -226,7 +226,7 @@ FlushFormatPlotfile::WriteJobInfo(const std::string& dir) const jobInfoFile << " Inputs File Parameters\n"; jobInfoFile << PrettyLine; - ParmParse::dumpTable(jobInfoFile, true); + ParmParse::prettyPrintTable(jobInfoFile); jobInfoFile.close(); } diff --git a/Source/ablastr/utils/UsedInputsFile.cpp b/Source/ablastr/utils/UsedInputsFile.cpp index dfdc4bfa192..a7777556242 100644 --- a/Source/ablastr/utils/UsedInputsFile.cpp +++ b/Source/ablastr/utils/UsedInputsFile.cpp @@ -23,7 +23,7 @@ ablastr::utils::write_used_inputs_file (std::string const & filename) if (amrex::ParallelDescriptor::IOProcessor()) { std::ofstream jobInfoFile; jobInfoFile.open(filename.c_str(), std::ios::out); - amrex::ParmParse::dumpTable(jobInfoFile, true); + amrex::ParmParse::prettyPrintTable(jobInfoFile); jobInfoFile.close(); } } From 5b34b84dfc588c8f6cf8088e94cd884e850344f7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 4 Sep 2024 22:18:52 -0700 Subject: [PATCH 14/91] Release 24.09 (#5214) * AMReX: 24.09 * PICSAR: 24.09 * pyAMReX: 24.09 * WarpX: 24.09 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- run_test.sh | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 010ce8090ac..eeec4abcd9f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 12002e7283284281503ed4ae5e79ae02e006b897 && cd - + cd ../amrex && git checkout --detach 24.09 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index 9bbfba70c27..36e42433572 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.08) +project(WarpX VERSION 24.09) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 7d8edcb2c9c..ddc2173755b 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 12002e7283284281503ed4ae5e79ae02e006b897 +branch = 24.09 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 6e791199422..5b4036f5582 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 12002e7283284281503ed4ae5e79ae02e006b897 +branch = 24.09 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 91d3542008b..228bef37ca1 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -256,7 +256,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.08 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.09 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "12002e7283284281503ed4ae5e79ae02e006b897" +set(WarpX_amrex_branch "24.09" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index ca06cf42315..9eb9162238a 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -88,7 +88,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 23.11 CONFIG REQUIRED QED) + #find_package(PICSAR 24.09 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " @@ -109,7 +109,7 @@ if(WarpX_QED) set(WarpX_picsar_repo "https://github.com/ECP-WarpX/picsar.git" CACHE STRING "Repository URI to pull and build PICSAR from if(WarpX_picsar_internal)") - set(WarpX_picsar_branch "44a2dfdf0f8cae93f12328664e055703989e7185" + set(WarpX_picsar_branch "24.09" CACHE STRING "Repository branch for WarpX_picsar_repo if(WarpX_picsar_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index a803e47eb2f..cfb92ea63e0 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.08 CONFIG REQUIRED) + find_package(pyAMReX 24.09 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "6061d62ec1bd0d5c9a853f5005714fa79864707e" +set(WarpX_pyamrex_branch "24.09" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/run_test.sh b/run_test.sh index 9487e8015f1..b97d174f386 100755 --- a/run_test.sh +++ b/run_test.sh @@ -72,7 +72,7 @@ python3 -m pip cache purge # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 12002e7283284281503ed4ae5e79ae02e006b897 && cd - +cd amrex && git checkout --detach 24.09 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git # openPMD-example-datasets contains various required data sets From 14cd5e91f284a8085e852a6fceb1f34bc4756cd6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 5 Sep 2024 12:56:02 -0700 Subject: [PATCH 15/91] AMReX/pyAMReX/PICSAR: Weekly Update (#5219) * AMReX: Weekly Update * pyAMReX: Weekly Update --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- run_test.sh | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index eeec4abcd9f..11765013bb7 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 24.09 && cd - + cd ../amrex && git checkout --detach 216ce6f37de4b65be57fc1006b3457b4fc318e03 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index ddc2173755b..fade8193140 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 24.09 +branch = 216ce6f37de4b65be57fc1006b3457b4fc318e03 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 5b4036f5582..5fb937a0b94 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 24.09 +branch = 216ce6f37de4b65be57fc1006b3457b4fc318e03 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 228bef37ca1..f65f5d36cce 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "24.09" +set(WarpX_amrex_branch "216ce6f37de4b65be57fc1006b3457b4fc318e03" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index cfb92ea63e0..4c92ffa99ba 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "24.09" +set(WarpX_pyamrex_branch "da2d5a000330395b3fcbcb43a519b3c8a318c584" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/run_test.sh b/run_test.sh index b97d174f386..9e9f55d314e 100755 --- a/run_test.sh +++ b/run_test.sh @@ -72,7 +72,7 @@ python3 -m pip cache purge # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 24.09 && cd - +cd amrex && git checkout --detach 216ce6f37de4b65be57fc1006b3457b4fc318e03 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git # openPMD-example-datasets contains various required data sets From 952022e4716fd12eb27d99c10fbb503a65029d6c Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Thu, 5 Sep 2024 17:59:24 -0500 Subject: [PATCH 16/91] GatherAndPush: Use CTO ParallelFor (#5217) * GatherAndPush: Use CTO ParallelFor The GPU kernel in GatherAndPush contains Parsers for computing external E and B fields. It's known that even if the Parsers are not used at run time, the occupancy on AMD GPUs are still affected. This is probably reason why this kernel is 50% slower on MI200 than on A100. Using CTO ParallelFor should improve the performance. * Clang-Tidy: NOLINT(misc-const-correctness) * Fix first-capture for cuda --------- Co-authored-by: Axel Huebl --- Source/Fluids/WarpXFluidContainer.cpp | 36 +++++++++++++++++++++------ 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 11f25678dc0..99a1212ac90 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -1035,14 +1035,27 @@ void WarpXFluidContainer::GatherAndPush ( // Here, we do not perform any coarsening. const amrex::GpuArray coarsening_ratio = {1, 1, 1}; - amrex::ParallelFor(tile_box, - [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept + enum exte_flags : int { no_exte, has_exte }; + enum extb_flags : int { no_extb, has_extb }; + enum boost_flags : int { no_gamma_boost, has_gamma_boost }; + const int exte_runtime_flag = external_e_fields ? has_exte : no_exte; + const int extb_runtime_flag = external_b_fields ? has_extb : no_extb; + const int boost_runtime_flag = (gamma_boost > 1._rt) ? has_gamma_boost : no_gamma_boost; + + amrex::ParallelFor(TypeList, + CompileTimeOptions, + CompileTimeOptions>{}, + {exte_runtime_flag, extb_runtime_flag, boost_runtime_flag}, + tile_box, + [=] AMREX_GPU_DEVICE(int i, int j, int k, + auto exte_control, auto extb_control, auto boost_control) noexcept { // Only run if density is positive if (N_arr(i,j,k)>0.0) { // Interpolate fields from tmp to Nodal points + // NOLINTBEGIN(misc-const-correctness) amrex::Real Ex_Nodal = ablastr::coarsen::sample::Interp(Ex_arr, Ex_type, Nodal_type, coarsening_ratio, i, j, k, 0); amrex::Real Ey_Nodal = ablastr::coarsen::sample::Interp(Ey_arr, @@ -1055,9 +1068,16 @@ void WarpXFluidContainer::GatherAndPush ( By_type, Nodal_type, coarsening_ratio, i, j, k, 0); amrex::Real Bz_Nodal = ablastr::coarsen::sample::Interp(Bz_arr, Bz_type, Nodal_type, coarsening_ratio, i, j, k, 0); + // NOLINTEND(misc-const-correctness) + +#ifdef AMREX_USE_CUDA + amrex::ignore_unused(Exfield_parser, Eyfield_parser, Ezfield_parser, + Bxfield_parser, Byfield_parser, Bzfield_parser, + gamma_boost, problo, dx, t, beta_boost); +#endif - if (gamma_boost > 1._rt) { // Lorentz transform fields due to moving frame - if ( ( external_b_fields ) || ( external_e_fields ) ){ + if constexpr (boost_control == has_gamma_boost) { // Lorentz transform fields due to moving frame + if constexpr (exte_control == has_exte || extb_control == has_extb) { // Lorentz transform z (from boosted to lab frame) amrex::Real Ex_ext_boost, Ey_ext_boost, Ez_ext_boost; @@ -1086,7 +1106,7 @@ void WarpXFluidContainer::GatherAndPush ( const amrex::Real z_lab = gamma_boost*(z + beta_boost*PhysConst::c*t); // Grab the external fields in the lab frame: - if ( external_e_fields ) { + if ( exte_control == has_exte ) { Ex_ext_lab = Exfield_parser(x, y, z_lab, t_lab); Ey_ext_lab = Eyfield_parser(x, y, z_lab, t_lab); Ez_ext_lab = Ezfield_parser(x, y, z_lab, t_lab); @@ -1095,7 +1115,7 @@ void WarpXFluidContainer::GatherAndPush ( Ey_ext_lab = 0.0; Ez_ext_lab = 0.0; } - if ( external_b_fields ) { + if ( extb_control == has_extb ) { Bx_ext_lab = Bxfield_parser(x, y, z_lab, t_lab); By_ext_lab = Byfield_parser(x, y, z_lab, t_lab); Bz_ext_lab = Bzfield_parser(x, y, z_lab, t_lab); @@ -1126,7 +1146,7 @@ void WarpXFluidContainer::GatherAndPush ( } else { // Added external e fields: - if ( external_e_fields ){ + if constexpr ( exte_control == has_exte ){ #if defined(WARPX_DIM_3D) const amrex::Real x = problo[0] + i * dx[0]; const amrex::Real y = problo[1] + j * dx[1]; @@ -1147,7 +1167,7 @@ void WarpXFluidContainer::GatherAndPush ( } // Added external b fields: - if ( external_b_fields ){ + if ( extb_control == has_extb ){ #if defined(WARPX_DIM_3D) const amrex::Real x = problo[0] + i * dx[0]; const amrex::Real y = problo[1] + j * dx[1]; From e4cd1aa8c9314995afe04c4a1aa6e937088a1b74 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 5 Sep 2024 16:04:49 -0700 Subject: [PATCH 17/91] Set up CTest (#5068) * Set up CTest * Add argument for MPI procs, improve readability * Add missing input files * Apply suggestions from code review * Add more tests * Fix naming convention to unblock style check * Add more tests * Update source/style checks, use new input files in current CI workflows * Add more tests * Make additional runtime params input file optional * Add more tests * Fix broken test * Add more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix broken test * Do not enforce backward compatibility - Comment out from WarpX-tests.ini entries related to tests that have been migrated. - WarpX-tests.ini will be deleted at the end of the migration to CTest. * Testing Azure pipeline * Azure Update * Tests: Individual CMakeLists.txt per test Less collisions as people expand tests. * CI: CTest `registered_tests` Script Updates to search for `add_warpx_test` registration locations. * Individual CMakeLists.txt per test * Cleanup * Change interface of `add_warpx_test` * Set `PYTHONPATH` to import custom modules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Restore relative paths of custom modules in analysis scripts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Do not pass number of MPI procs - Hard-coded to 2 for all MPI tests - Run single-proc tests without MPI * Cleanup * `openpmd-viewer` required by checksum scripts * Cleanup * Install requirements, clone warpx-data repo * Cleanup * Fix broken dependencies * Fix broken dependencies (numpy, pandas) * Install correct requirements * Avoid duplicate runs with/without MPI * Build all dims for single-process tests * Fix path of warpx-data repo * Fix broken test * Cleanup * Add support for Python tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix `PYTHONPATH` environment variable * Build always with `WarpX_FFT=ON` * Fix BLAS++/LAPACK++ installation for RZ builds * Comment out Azure matrix elements until supported * Fix BLAS++/LAPACK++ installation for RZ builds * Cleanup * Fix logic for Python tests * Add more Python tests * Fix Azure pipeline YAML file * Add more tests * More Azure matrix elements * Add more tests * Exclude `pytest.AMReX` when running Python tests * Add `periodictable` to requirements * Add more tests * Add `picmistandard` to requirements * Add more tests * Fix broken test, remove build directory * Clear pip cache * Clean up input file names * Add more tests * Fix broken tests * Add more tests * Fix broken tests * Add more tests * Fix broken tests * Add more tests * Add more tests * Fix broken tests * Azure: always compile with Python, EB support * Add more tests * Simplify Azure matrix, fix broken tests * Add more tests * Separate Azure matrix element for EB tests * Cleanup * Add more tests * Add EB tests only if `WarpX_EB=ON` * Fix broken tests, add restart tests * Add more tests * Fix broken tests * Fix broken tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix broken tests * Add default AMReX CMake flags * Add more tests * Fix broken tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix codeQL warning, GitHub Action's YAML files * Fix codeQL warning * Add more tests * Fix GitHub Action's YAML files * Add more tests * Fix unbound variable in Azure pipelines * Fix GitHub Action's YAML files * Add more tests * Fix GitHub Action's YAML files * Remove unused checksum file * Add more tests * Add Python tests with custom command-line arguments * Add more tests * Cleanup * Cleanup * Fix broken tests * Fix style checks * WarpX_CI: Cleanup After Run * Fix: CTest independent of PWD/CWD Can run now from any working dir * Support CLI Args, Prep Script, Robust Python Startup * Compile tests with `-DWarpX_CI=ON` on Azure * Fix typo in CMakeLists.txt * Fix tests dependency cycle * Fix indentation in CMakeLists.txt * Clean up CMakeLists.txt * Fix Clang pywarpx build * Fix bug in CMakeLists.txt * Cleanup * Rewrite style checks in Python, cleanup * Update Python script for inputs check * Fix broken tests * CTest: `add_warpx_test` with Dependent Test * Fix calls to `add_warpx_test` * Fix restart tests * Cleanup * Fix restart tests * Fix Python tests * Fix GitHub Action's YAML files * Fix broken tests * Add more tests * Fix Python script for inputs check * Add more tests * Add LASY laser tests * Fix Python script for inputs check * Fix test names * Add LASY laser tests * Add more tests * Fix Langmuir tests * Add more tests * Fix Python script for inputs check * Add more tests * Remove obsolete scripts for inputs check * Debugging LASY tests * Add missing tests * Fix restart tests * Revert LASY debugging * Debugging LASY tests * CMake: Dependency Must Exist (run) Do not create a test that has an unfulfilled dependency. This can happen if the dependency has stricter compile-time requirements (e.g., needs Python) than the dependent (e.g., pure inputs file). * LASY Scripts: Recent 0.5.0 Changes Adopt inputs for breaking changes from 0.5.0 LASY update. * Restore all tests * Less dependencies in Azure pipeline * Add missing tests * Quotation marks around restart file paths * Cleanup * Rename `WarpX_CI` flag * Restore heFFTe dependency in Azure pipeline * Add CMake flag `WarpX_TEST_FPETRAP` * Address two Fixme Comments * Docs first draft * Use new `AMREX_INPUTS_FILE_PREFIX` env variable * Update docs * Fix temporary build command in Azure pipeline * Split steps for build and test in Azure pipeline * Fix indentation in docs * Style fixes in CMakeLists.txt files * Enable line to define `BUILD_TESTING` in CMakeLists.txt * Remove repo/branch build workaround in Azure pipeline --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- .azure-pipelines.yml | 115 +++-- .github/workflows/clang_sanitizers.yml | 18 +- .github/workflows/insitu.yml | 22 +- .github/workflows/intel.yml | 2 +- .github/workflows/macos.yml | 2 +- .../scripts/checkQEDTableGenerator.sh | 4 +- .github/workflows/source.yml | 6 +- .github/workflows/source/check_inputs.py | 109 +++++ .github/workflows/source/inputsNotTested | 41 -- .../workflows/source/wrongFileNameInExamples | 44 -- .github/workflows/ubuntu.yml | 15 +- .github/workflows/windows.yml | 4 +- CMakeLists.txt | 22 +- Docs/source/developers/testing.rst | 215 +++++---- Examples/CMakeLists.txt | 237 ++++++++++ Examples/Physics_applications/CMakeLists.txt | 11 + .../beam_beam_collision/CMakeLists.txt | 13 + .../README.rst => beam_beam_collision/README} | 0 .../analysis_default_openpmd_regression.py | 1 + .../inputs_test_3d_beam_beam_collision} | 0 .../capacitive_discharge/CMakeLists.txt | 58 +++ .../{README.rst => README} | 0 .../capacitive_discharge/analysis_2d.py | 2 +- .../analysis_default_regression.py | 1 + ...I_inputs_1d.py => inputs_base_1d_picmi.py} | 12 - ...nputs_2d => inputs_test_2d_background_mcc} | 1 + ...=> inputs_test_2d_background_mcc_picmi.py} | 4 - .../laser_acceleration/CMakeLists.txt | 156 +++++++ .../laser_acceleration/{README.rst => README} | 0 ...ysis_1d_fluids.py => analysis_1d_fluid.py} | 0 ...oosted.py => analysis_1d_fluid_boosted.py} | 0 .../analysis_default_openpmd_regression.py | 1 + .../analysis_default_regression.py | 1 + .../analysis_openpmd_rz.py | 5 +- .../{inputs_2d => inputs_base_2d} | 0 .../{inputs_3d => inputs_base_3d} | 0 .../{inputs_rz => inputs_base_rz} | 0 ...s_1d => inputs_test_1d_laser_acceleration} | 0 ...> inputs_test_1d_laser_acceleration_fluid} | 0 ..._test_1d_laser_acceleration_fluid_boosted} | 0 ...nputs_test_1d_laser_acceleration_picmi.py} | 4 - ...inputs_test_2d_laser_acceleration_boosted} | 4 +- .../inputs_test_2d_laser_acceleration_mr | 2 + ...ts_test_2d_laser_acceleration_mr_picmi.py} | 4 - .../inputs_test_2d_refined_injection | 6 + .../inputs_test_3d_laser_acceleration | 2 + ...nputs_test_3d_laser_acceleration_picmi.py} | 4 - ..._laser_acceleration_single_precision_comms | 5 + .../inputs_test_rz_laser_acceleration | 6 + .../inputs_test_rz_laser_acceleration_opmd | 9 + ...nputs_test_rz_laser_acceleration_picmi.py} | 4 - .../laser_ion/CMakeLists.txt | 24 + .../laser_ion/{README.rst => README} | 0 .../analysis_default_openpmd_regression.py | 1 + ...inputs_2d => inputs_test_2d_laser_ion_acc} | 0 ... => inputs_test_2d_laser_ion_acc_picmi.py} | 8 +- .../plasma_acceleration/CMakeLists.txt | 90 ++++ .../{README.rst => README} | 0 .../analysis_default_regression.py | 1 + .../{inputs_2d => inputs_base_2d} | 7 +- .../{inputs_3d_boost => inputs_base_3d} | 2 +- ...puts_test_1d_plasma_acceleration_picmi.py} | 2 - ...nputs_test_2d_plasma_acceleration_boosted} | 4 +- .../inputs_test_2d_plasma_acceleration_mr | 2 + ...plasma_acceleration_mr_momentum_conserving | 5 + ...inputs_test_3d_plasma_acceleration_boosted | 5 + ...test_3d_plasma_acceleration_boosted_hybrid | 7 + ...s_test_3d_plasma_acceleration_mr_picmi.py} | 2 - ...puts_test_3d_plasma_acceleration_picmi.py} | 2 - .../plasma_mirror/CMakeLists.txt | 13 + .../plasma_mirror/{README.rst => README} | 0 .../analysis_default_regression.py | 1 + ...inputs_2d => inputs_test_2d_plasma_mirror} | 4 +- .../spacecraft_charging/CMakeLists.txt | 15 + .../spacecraft_charging/analysis.py | 2 +- ...puts_test_rz_spacecraft_charging_picmi.py} | 4 - .../uniform_plasma/CMakeLists.txt | 35 ++ .../uniform_plasma/{README.rst => README} | 0 .../analysis_default_regression.py | 1 + .../analysis_default_restart.py | 1 + .../{inputs_3d => inputs_base_3d} | 0 ...nputs_2d => inputs_test_2d_uniform_plasma} | 0 .../inputs_test_3d_uniform_plasma | 2 + .../inputs_test_3d_uniform_plasma_restart | 5 + Examples/Tests/CMakeLists.txt | 78 ++++ .../Tests/accelerator_lattice/CMakeLists.txt | 35 ++ .../analysis.py | 0 .../inputs_test_3d_hard_edged_quadrupoles} | 0 ...ts_test_3d_hard_edged_quadrupoles_boosted} | 0 ...uts_test_3d_hard_edged_quadrupoles_moving} | 0 Examples/Tests/boosted_diags/CMakeLists.txt | 13 + ... => inputs_test_3d_laser_acceleration_btd} | 0 Examples/Tests/boundaries/CMakeLists.txt | 13 + ..._3d => inputs_test_3d_particle_boundaries} | 0 Examples/Tests/btd_rz/CMakeLists.txt | 13 + ...lysis_BTD_laser_antenna.py => analysis.py} | 0 ...ts_rz_z_boosted_BTD => inputs_test_rz_btd} | 0 .../collider_relevant_diags/CMakeLists.txt | 13 + ...ysis_multiple_particles.py => analysis.py} | 2 +- ...es => inputs_test_3d_collider_diagnostics} | 1 + Examples/Tests/collision/CMakeLists.txt | 68 +++ .../Tests/collision/analysis_collision_2d.py | 10 +- .../{inputs_1d => inputs_test_1d_collision_z} | 0 ...{inputs_2d => inputs_test_2d_collision_xz} | 0 ...y => inputs_test_2d_collision_xz_picmi.py} | 6 +- ...opization => inputs_test_3d_collision_iso} | 0 ...inputs_3d => inputs_test_3d_collision_xyz} | 0 .../{inputs_rz => inputs_test_rz_collision} | 0 Examples/Tests/diff_lumi_diag/CMakeLists.txt | 13 + .../{inputs => inputs_test_3d_diff_lumi_diag} | 0 Examples/Tests/divb_cleaning/CMakeLists.txt | 13 + Examples/Tests/divb_cleaning/analysis.py | 4 +- ...inputs_3d => inputs_test_3d_divb_cleaning} | 0 Examples/Tests/dive_cleaning/CMakeLists.txt | 24 + .../inputs_test_2d_dive_cleaning | 35 ++ ...inputs_3d => inputs_test_3d_dive_cleaning} | 2 +- .../electrostatic_dirichlet_bc/CMakeLists.txt | 24 + .../electrostatic_dirichlet_bc/analysis.py | 4 +- ...{inputs_2d => inputs_test_2d_dirichlet_bc} | 1 + ...y => inputs_test_2d_dirichlet_bc_picmi.py} | 6 +- .../Tests/electrostatic_sphere/CMakeLists.txt | 57 +++ .../analysis_electrostatic_sphere.py | 4 +- .../{inputs_3d => inputs_base_3d} | 0 .../inputs_test_3d_electrostatic_sphere | 5 + ...uts_test_3d_electrostatic_sphere_lab_frame | 6 + ...electrostatic_sphere_lab_frame_mr_emass_10 | 13 + ...uts_test_3d_electrostatic_sphere_rel_nodal | 6 + ...rz => inputs_test_rz_electrostatic_sphere} | 1 + .../electrostatic_sphere_eb/CMakeLists.txt | 67 +++ .../analysis_default_regression.py | 1 + ...=> inputs_test_3d_electrostatic_sphere_eb} | 1 + ..._test_3d_electrostatic_sphere_eb_mixed_bc} | 1 + ..._test_3d_electrostatic_sphere_eb_picmi.py} | 4 - ...=> inputs_test_rz_electrostatic_sphere_eb} | 1 + ...inputs_test_rz_electrostatic_sphere_eb_mr} | 2 + .../embedded_boundary_cube/CMakeLists.txt | 41 ++ .../embedded_boundary_cube/analysis_fields.py | 6 +- .../{inputs_3d => inputs_base_3d} | 1 + ... => inputs_test_2d_embedded_boundary_cube} | 1 + .../inputs_test_3d_embedded_boundary_cube | 2 + ...test_3d_embedded_boundary_cube_macroscopic | 8 + .../CMakeLists.txt | 15 + .../analysis_fields.py | 4 +- ...uts_test_rz_embedded_boundary_diffraction} | 0 .../CMakeLists.txt | 15 + ...inputs_test_3d_embedded_boundary_picmi.py} | 4 - .../CMakeLists.txt | 28 ++ ...alysis_fields.py => analysis_fields_3d.py} | 0 ...ts_test_2d_embedded_boundary_rotated_cube} | 1 + ...ts_test_3d_embedded_boundary_rotated_cube} | 1 + Examples/Tests/embedded_circle/CMakeLists.txt | 15 + ...puts_2d => inputs_test_2d_embedded_circle} | 0 .../CMakeLists.txt | 13 + ..._test_2d_energy_conserving_thermal_plasma} | 0 Examples/Tests/field_probe/CMakeLists.txt | 15 + .../{analysis_field_probe.py => analysis.py} | 0 .../{inputs_2d => inputs_test_2d_field_probe} | 0 Examples/Tests/flux_injection/CMakeLists.txt | 24 + ...nputs_3d => inputs_test_3d_flux_injection} | 0 ...nputs_rz => inputs_test_rz_flux_injection} | 0 Examples/Tests/gaussian_beam/CMakeLists.txt | 24 + .../gaussian_beam/{README.rst => README} | 0 ...{analysis_focusing_beam.py => analysis.py} | 0 .../analysis_default_regression.py | 1 + ... => inputs_test_3d_focusing_gaussian_beam} | 0 ... => inputs_test_3d_gaussian_beam_picmi.py} | 2 - Examples/Tests/implicit/CMakeLists.txt | 46 ++ .../{Implicit => implicit}/analysis_1d.py | 5 +- .../analysis_vandb_jfnk_2d.py | 0 .../inputs_test_1d_semi_implicit_picard} | 2 + .../inputs_test_1d_theta_implicit_picard} | 2 + .../inputs_test_2d_theta_implicit_jfnk_vandb} | 1 + ...est_2d_theta_implicit_jfnk_vandb_picmi.py} | 4 - .../Tests/initial_distribution/CMakeLists.txt | 13 + .../{analysis_distribution.py => analysis.py} | 0 ...ts => inputs_test_3d_initial_distribution} | 0 .../initial_plasma_profile/CMakeLists.txt | 13 + ..._test_2d_parabolic_channel_initialization} | 0 Examples/Tests/ion_stopping/CMakeLists.txt | 13 + .../{analysis_ion_stopping.py => analysis.py} | 15 +- ...{inputs_3d => inputs_test_3d_ion_stopping} | 1 + Examples/Tests/ionization/CMakeLists.txt | 35 ++ .../{analysis_ionization.py => analysis.py} | 0 ..._bf_rt => inputs_test_2d_ionization_boost} | 0 ...ts_2d_rt => inputs_test_2d_ionization_lab} | 0 ....py => inputs_test_2d_ionization_picmi.py} | 4 - Examples/Tests/langmuir/CMakeLists.txt | 435 ++++++++++++++++++ .../Tests/langmuir/{README.rst => README} | 0 Examples/Tests/langmuir/analysis_1d.py | 8 +- Examples/Tests/langmuir/analysis_2d.py | 13 +- Examples/Tests/langmuir/analysis_3d.py | 17 +- .../langmuir/analysis_default_regression.py | 1 + Examples/Tests/langmuir/analysis_rz.py | 3 +- .../langmuir/{inputs_2d => inputs_base_2d} | 0 .../langmuir/{inputs_3d => inputs_base_3d} | 0 .../langmuir/{inputs_rz => inputs_base_rz} | 0 ...nputs_1d => inputs_test_1d_langmuir_multi} | 3 + .../langmuir/inputs_test_2d_langmuir_multi_mr | 12 + ...puts_test_2d_langmuir_multi_mr_anisotropic | 12 + ...t_2d_langmuir_multi_mr_momentum_conserving | 13 + .../inputs_test_2d_langmuir_multi_mr_psatd | 14 + .../inputs_test_2d_langmuir_multi_nodal | 8 + ...=> inputs_test_2d_langmuir_multi_picmi.py} | 2 - .../inputs_test_2d_langmuir_multi_psatd | 11 + ...2d_langmuir_multi_psatd_current_correction | 13 + ...gmuir_multi_psatd_current_correction_nodal | 14 + ...d_langmuir_multi_psatd_momentum_conserving | 12 + ...inputs_test_2d_langmuir_multi_psatd_multiJ | 12 + ..._test_2d_langmuir_multi_psatd_multiJ_nodal | 13 + .../inputs_test_2d_langmuir_multi_psatd_nodal | 13 + ...est_2d_langmuir_multi_psatd_vay_deposition | 11 + ..._langmuir_multi_psatd_vay_deposition_nodal | 12 + ...ulti_psatd_vay_deposition_particle_shape_4 | 12 + .../langmuir/inputs_test_3d_langmuir_multi | 2 + .../inputs_test_3d_langmuir_multi_nodal | 6 + ...=> inputs_test_3d_langmuir_multi_picmi.py} | 2 - .../inputs_test_3d_langmuir_multi_psatd | 6 + ...3d_langmuir_multi_psatd_current_correction | 10 + ...gmuir_multi_psatd_current_correction_nodal | 11 + ..._test_3d_langmuir_multi_psatd_div_cleaning | 13 + ...d_langmuir_multi_psatd_momentum_conserving | 7 + ...inputs_test_3d_langmuir_multi_psatd_multiJ | 13 + ..._test_3d_langmuir_multi_psatd_multiJ_nodal | 14 + .../inputs_test_3d_langmuir_multi_psatd_nodal | 10 + ...est_3d_langmuir_multi_psatd_vay_deposition | 8 + ..._langmuir_multi_psatd_vay_deposition_nodal | 9 + .../langmuir/inputs_test_rz_langmuir_multi | 7 + ...=> inputs_test_rz_langmuir_multi_picmi.py} | 2 - .../inputs_test_rz_langmuir_multi_psatd | 15 + ...rz_langmuir_multi_psatd_current_correction | 16 + ...inputs_test_rz_langmuir_multi_psatd_multiJ | 22 + Examples/Tests/langmuir_fluids/CMakeLists.txt | 46 ++ ...nputs_1d => inputs_test_1d_langmuir_fluid} | 0 ...nputs_2d => inputs_test_2d_langmuir_fluid} | 0 ...nputs_3d => inputs_test_3d_langmuir_fluid} | 0 ...nputs_rz => inputs_test_rz_langmuir_fluid} | 0 Examples/Tests/larmor/CMakeLists.txt | 13 + .../larmor/analysis_default_regression.py | 1 + .../{inputs_2d_mr => inputs_test_2d_larmor} | 2 +- Examples/Tests/laser_injection/CMakeLists.txt | 35 ++ .../{analysis_laser.py => analysis_3d.py} | 0 ...s_1d_rt => inputs_test_1d_laser_injection} | 0 ...s_2d_rt => inputs_test_2d_laser_injection} | 0 ...s_3d_rt => inputs_test_3d_laser_injection} | 2 +- .../laser_injection_from_file/CMakeLists.txt | 156 +++++++ .../laser_injection_from_file/analysis_1d.py | 155 +++---- .../analysis_1d_boost.py | 161 +++---- .../laser_injection_from_file/analysis_2d.py | 195 ++++---- .../analysis_2d_binary.py | 238 ++++------ .../laser_injection_from_file/analysis_3d.py | 207 ++++----- .../laser_injection_from_file/analysis_RZ.py | 181 -------- .../analysis_from_RZ_file.py | 209 ++++----- .../laser_injection_from_file/analysis_rz.py | 144 ++++++ ...ts_test_1d_laser_injection_from_lasy_file} | 2 +- ...t_1d_laser_injection_from_lasy_file_boost} | 2 +- ..._injection_from_lasy_file_boost_prepare.py | 38 ++ ..._laser_injection_from_lasy_file_prepare.py | 37 ++ ..._test_2d_laser_injection_from_binary_file} | 2 +- ...aser_injection_from_binary_file_prepare.py | 107 +++++ ...ts_test_2d_laser_injection_from_lasy_file} | 2 +- ..._laser_injection_from_lasy_file_prepare.py | 38 ++ ...ts_test_3d_laser_injection_from_lasy_file} | 2 +- ..._laser_injection_from_lasy_file_prepare.py | 41 ++ ...test_rz_laser_injection_from_RZ_lasy_file} | 2 +- ...ser_injection_from_RZ_lasy_file_prepare.py | 49 ++ ...ts_test_rz_laser_injection_from_lasy_file} | 2 +- ..._laser_injection_from_lasy_file_prepare.py | 41 ++ Examples/Tests/laser_on_fine/CMakeLists.txt | 13 + .../analysis_default_regression.py | 1 + ...inputs_2d => inputs_test_2d_laser_on_fine} | 2 +- .../Tests/load_external_field/CMakeLists.txt | 68 +++ .../analysis_3d.py | 0 .../analysis_default_restart.py | 1 + .../analysis_rz.py | 0 ...test_3d_load_external_field_grid_picmi.py} | 4 - ..._3d_load_external_field_particle_picmi.py} | 4 - .../inputs_test_rz_load_external_field_grid} | 1 + ...s_test_rz_load_external_field_grid_restart | 5 + ...uts_test_rz_load_external_field_particles} | 1 + ...t_rz_load_external_field_particles_restart | 5 + .../Tests/magnetostatic_eb/CMakeLists.txt | 41 ++ .../analysis_default_regression.py | 1 + ...uts_3d => inputs_test_3d_magnetostatic_eb} | 1 - ... inputs_test_3d_magnetostatic_eb_picmi.py} | 4 - ... inputs_test_rz_magnetostatic_eb_picmi.py} | 4 - .../Tests/maxwell_hybrid_qed/CMakeLists.txt | 15 + ...ysis_Maxwell_QED_Hybrid.py => analysis.py} | 0 ... inputs_test_2d_maxwell_hybrid_qed_solver} | 2 +- .../Tests/nci_fdtd_stability/CMakeLists.txt | 24 + .../{inputs_2d => inputs_base_2d} | 0 .../inputs_test_2d_nci_corrector | 6 + .../inputs_test_2d_nci_corrector_mr | 9 + .../Tests/nci_psatd_stability/CMakeLists.txt | 223 +++++++++ .../analysis_default_regression.py | 1 + .../{inputs_2d => inputs_base_2d} | 0 ...{inputs_avg_2d => inputs_base_2d_averaged} | 0 .../{inputs_3d => inputs_base_3d} | 0 ...{inputs_avg_3d => inputs_base_3d_averaged} | 0 .../{inputs_rz => inputs_base_rz} | 0 .../inputs_test_2d_averaged_galilean_psatd | 6 + ...uts_test_2d_averaged_galilean_psatd_hybrid | 9 + .../inputs_test_2d_comoving_psatd_hybrid} | 2 + .../inputs_test_2d_galilean_psatd | 8 + ..._test_2d_galilean_psatd_current_correction | 10 + ...t_2d_galilean_psatd_current_correction_psb | 8 + ...d => inputs_test_2d_galilean_psatd_hybrid} | 2 + .../inputs_test_3d_averaged_galilean_psatd | 6 + ...uts_test_3d_averaged_galilean_psatd_hybrid | 7 + .../inputs_test_3d_galilean_psatd | 7 + ..._test_3d_galilean_psatd_current_correction | 10 + ...t_3d_galilean_psatd_current_correction_psb | 10 + .../inputs_test_3d_uniform_plasma_multiJ | 13 + .../inputs_test_rz_galilean_psatd | 8 + ..._test_rz_galilean_psatd_current_correction | 10 + ...t_rz_galilean_psatd_current_correction_psb | 8 + .../inputs_test_rz_multiJ_psatd} | 2 + .../Tests/nodal_electrostatic/CMakeLists.txt | 13 + .../{analysis_3d.py => analysis.py} | 0 ...inputs_test_3d_nodal_electrostatic_solver} | 1 + Examples/Tests/nuclear_fusion/CMakeLists.txt | 68 +++ .../analysis_proton_boron_fusion.py | 5 +- ..._2d => inputs_test_2d_proton_boron_fusion} | 0 ...inputs_test_3d_deuterium_deuterium_fusion} | 0 ...d_deuterium_deuterium_fusion_intraspecies} | 0 ...> inputs_test_3d_deuterium_tritium_fusion} | 0 ..._3d => inputs_test_3d_proton_boron_fusion} | 0 ...> inputs_test_rz_deuterium_tritium_fusion} | 0 .../Tests/ohm_solver_em_modes/CMakeLists.txt | 24 + .../README.rst => ohm_solver_em_modes/README} | 0 .../analysis.py | 0 .../analysis_rz.py | 0 ...puts_test_1d_ohm_solver_em_modes_picmi.py} | 4 - ...puts_test_rz_ohm_solver_em_modes_picmi.py} | 2 - .../CMakeLists.txt | 13 + .../{README.rst => README} | 0 ...est_2d_ohm_solver_landau_damping_picmi.py} | 4 - .../CMakeLists.txt | 13 + .../{README.rst => README} | 0 ...puts_test_1d_ohm_solver_ion_beam_picmi.py} | 4 - .../CMakeLists.txt | 13 + .../{README.rst => README} | 0 ...ohm_solver_magnetic_reconnection_picmi.py} | 4 - .../open_bc_poisson_solver/CMakeLists.txt | 15 + .../analysis.py | 0 .../inputs_test_3d_open_bc_poisson_solver} | 0 .../CMakeLists.txt | 15 + .../particle_boundary_interaction/analysis.py | 2 +- ...rz_particle_boundary_interaction_picmi.py} | 4 - .../particle_boundary_process/CMakeLists.txt | 26 ++ .../analysis_absorption.py | 7 +- ...puts_test_2d_particle_reflection_picmi.py} | 0 ...ion => inputs_test_3d_particle_absorption} | 0 .../particle_boundary_scrape/CMakeLists.txt | 28 ++ .../analysis_scrape.py | 13 +- ..._scrape => inputs_test_3d_particle_scrape} | 0 ...> inputs_test_3d_particle_scrape_picmi.py} | 4 - .../Tests/particle_data_python/CMakeLists.txt | 35 ++ .../analysis_default_regression.py | 1 + ...uts_test_2d_particle_attr_access_picmi.py} | 4 - ...=> inputs_test_2d_prev_positions_picmi.py} | 4 - .../particle_fields_diags/CMakeLists.txt | 25 + ...s => inputs_test_3d_particle_fields_diags} | 0 Examples/Tests/particle_pusher/CMakeLists.txt | 13 + .../{analysis_pusher.py => analysis.py} | 0 ...puts_3d => inputs_test_3d_particle_pusher} | 0 .../particle_thermal_boundary/CMakeLists.txt | 13 + .../{analysis_2d.py => analysis.py} | 0 ... inputs_test_2d_particle_thermal_boundary} | 0 .../Tests/particles_in_pml/CMakeLists.txt | 46 ++ ...uts_2d => inputs_test_2d_particles_in_pml} | 0 ..._2d => inputs_test_2d_particles_in_pml_mr} | 0 ...uts_3d => inputs_test_3d_particles_in_pml} | 0 ..._3d => inputs_test_3d_particles_in_pml_mr} | 0 .../pass_mpi_communicator/CMakeLists.txt | 17 + ... => inputs_test_2d_pass_mpi_comm_picmi.py} | 1 - Examples/Tests/pec/CMakeLists.txt | 35 ++ .../Tests/pec/analysis_default_regression.py | 1 + ..._field_PEC_3d => inputs_test_3d_pec_field} | 0 ..._PEC_mr_3d => inputs_test_3d_pec_field_mr} | 0 ...cle_PEC_3d => inputs_test_3d_pec_particle} | 0 Examples/Tests/photon_pusher/CMakeLists.txt | 13 + ...{analysis_photon_pusher.py => analysis.py} | 0 ...inputs_3d => inputs_test_3d_photon_pusher} | 0 Examples/Tests/plasma_lens/CMakeLists.txt | 57 +++ Examples/Tests/plasma_lens/analysis.py | 9 +- .../{inputs_3d => inputs_test_3d_plasma_lens} | 0 ..._3d => inputs_test_3d_plasma_lens_boosted} | 0 ... => inputs_test_3d_plasma_lens_hard_edged} | 0 ...py => inputs_test_3d_plasma_lens_picmi.py} | 4 - ...rt_3d => inputs_test_3d_plasma_lens_short} | 0 Examples/Tests/pml/CMakeLists.txt | 100 ++++ .../Tests/pml/analysis_default_regression.py | 1 + .../Tests/pml/analysis_default_restart.py | 1 + Examples/Tests/pml/analysis_pml_psatd.py | 13 +- Examples/Tests/pml/analysis_pml_yee.py | 6 - .../Tests/pml/{inputs_2d => inputs_base_2d} | 0 Examples/Tests/pml/inputs_test_2d_pml_x_ckc | 5 + .../Tests/pml/inputs_test_2d_pml_x_galilean | 14 + Examples/Tests/pml/inputs_test_2d_pml_x_psatd | 12 + .../pml/inputs_test_2d_pml_x_psatd_restart | 5 + Examples/Tests/pml/inputs_test_2d_pml_x_yee | 5 + .../pml/inputs_test_2d_pml_x_yee_restart | 5 + ...puts_test_3d_pml_psatd_dive_divb_cleaning} | 3 + .../{inputs_rz => inputs_test_rz_pml_psatd} | 3 + .../Tests/point_of_contact_eb/CMakeLists.txt | 28 ++ .../analysis.py | 0 .../inputs_test_3d_point_of_contact_eb} | 0 .../inputs_test_rz_point_of_contact_eb} | 0 .../projection_divb_cleaner/CMakeLists.txt | 35 ++ .../{analysis_rz.py => analysis.py} | 0 .../analysis_default_regression.py | 1 + ...projection_divb_cleaner_callback_picmi.py} | 4 +- ..._test_3d_projection_divb_cleaner_picmi.py} | 4 +- ...=> inputs_test_rz_projection_divb_cleaner} | 1 - Examples/Tests/python_wrappers/CMakeLists.txt | 15 + .../analysis_default_regression.py | 1 + ...> inputs_test_2d_python_wrappers_picmi.py} | 4 - Examples/Tests/qed/CMakeLists.txt | 112 +++++ ...core.py => analysis_breit_wheeler_core.py} | 0 ...opmd.py => analysis_breit_wheeler_opmd.py} | 4 +- ...sis_yt.py => analysis_breit_wheeler_yt.py} | 4 +- .../analysis.py => analysis_quantum_sync.py} | 0 .../qed/{schwinger => }/analysis_schwinger.py | 3 +- ...inputs_2d => inputs_base_2d_breit_wheeler} | 1 + ...inputs_3d => inputs_base_3d_breit_wheeler} | 0 ..._3d_schwinger => inputs_base_3d_schwinger} | 0 .../qed/inputs_test_2d_qed_breit_wheeler | 2 + .../qed/inputs_test_2d_qed_breit_wheeler_opmd | 6 + ...uts_2d => inputs_test_2d_qed_quantum_sync} | 1 + .../qed/inputs_test_3d_qed_breit_wheeler | 2 + .../qed/inputs_test_3d_qed_breit_wheeler_opmd | 6 + ...uts_3d => inputs_test_3d_qed_quantum_sync} | 1 + .../Tests/qed/inputs_test_3d_qed_schwinger_1 | 6 + .../Tests/qed/inputs_test_3d_qed_schwinger_2 | 8 + .../Tests/qed/inputs_test_3d_qed_schwinger_3 | 5 + .../Tests/qed/inputs_test_3d_qed_schwinger_4 | 8 + .../Tests/radiation_reaction/CMakeLists.txt | 13 + .../analysis_classicalRR.py => analysis.py} | 0 ...s_3d => inputs_test_3d_radiation_reaction} | 0 Examples/Tests/reduced_diags/CMakeLists.txt | 59 +++ ...lysis_reduced_diags_load_balance_costs.py} | 7 +- ...inputs_loadbalancecosts => inputs_base_3d} | 0 .../{inputs => inputs_test_3d_reduced_diags} | 0 ...reduced_diags_load_balance_costs_heuristic | 5 + ...3d_reduced_diags_load_balance_costs_timers | 5 + ..._diags_load_balance_costs_timers_picmi.py} | 5 +- ...uced_diags_load_balance_costs_timers_psatd | 5 + .../CMakeLists.txt | 13 + ..._relativistic_space_charge_initialization} | 0 .../Tests/repelling_particles/CMakeLists.txt | 13 + .../{analysis_repelling.py => analysis.py} | 0 ..._2d => inputs_test_2d_repelling_particles} | 0 Examples/Tests/resampling/CMakeLists.txt | 35 ++ ...lysis_leveling_thinning.py => analysis.py} | 0 .../resampling/analysis_default_regression.py | 1 + ...1d_resample_velocity_coincidence_thinning} | 0 ...e_velocity_coincidence_thinning_cartesian} | 0 ...nning => inputs_test_2d_leveling_thinning} | 0 Examples/Tests/restart/CMakeLists.txt | 115 +++++ .../restart/analysis_default_regression.py | 1 + .../Tests/restart/analysis_default_restart.py | 1 + .../Tests/restart/{inputs => inputs_base_3d} | 1 - ...py => inputs_test_2d_id_cpu_read_picmi.py} | 4 - ...nputs_test_2d_runtime_components_picmi.py} | 12 +- .../Tests/restart/inputs_test_3d_acceleration | 2 + .../restart/inputs_test_3d_acceleration_psatd | 11 + .../inputs_test_3d_acceleration_psatd_restart | 5 + ...inputs_test_3d_acceleration_psatd_time_avg | 12 + ...est_3d_acceleration_psatd_time_avg_restart | 5 + .../inputs_test_3d_acceleration_restart | 5 + Examples/Tests/restart_eb/CMakeLists.txt | 29 ++ .../restart_eb/analysis_default_regression.py | 1 + .../restart_eb/analysis_default_restart.py | 1 + ...start_eb.py => inputs_test_3d_eb_picmi.py} | 9 +- Examples/Tests/rigid_injection/CMakeLists.txt | 24 + ...ame.py => analysis_rigid_injection_btd.py} | 0 ...ame.py => analysis_rigid_injection_lab.py} | 0 ...ame => inputs_test_2d_rigid_injection_btd} | 0 ...ame => inputs_test_2d_rigid_injection_lab} | 0 Examples/Tests/scraping/CMakeLists.txt | 28 ++ .../{inputs_rz => inputs_test_rz_scraping} | 1 + ..._filter => inputs_test_rz_scraping_filter} | 1 + Examples/Tests/silver_mueller/CMakeLists.txt | 46 ++ ...analysis_silver_mueller.py => analysis.py} | 0 ...nputs_1d => inputs_test_1d_silver_mueller} | 0 ...s_2d_x => inputs_test_2d_silver_mueller_x} | 0 ...s_2d_z => inputs_test_2d_silver_mueller_z} | 0 ...s_rz_z => inputs_test_rz_silver_mueller_z} | 0 Examples/Tests/single_particle/CMakeLists.txt | 13 + ...nalysis_bilinear_filter.py => analysis.py} | 0 ...puts_2d => inputs_test_2d_bilinear_filter} | 2 + .../CMakeLists.txt | 24 + ...inputs_test_2d_space_charge_initialization | 37 ++ ...nputs_test_3d_space_charge_initialization} | 1 + Examples/Tests/subcycling/CMakeLists.txt | 13 + .../subcycling/analysis_default_regression.py | 1 + ...inputs_2d => inputs_test_2d_subcycling_mr} | 0 Examples/Tests/vay_deposition/CMakeLists.txt | 28 ++ ...nputs_2d => inputs_test_2d_vay_deposition} | 0 ...nputs_3d => inputs_test_3d_vay_deposition} | 0 Examples/analysis_default_regression.py | 1 + Examples/analysis_default_restart.py | 23 +- .../benchmarks_json/LaserIonAcc3d.json | 32 -- .../TwoParticle_electrostatic.json | 26 -- ...llisionZ.json => test_1d_collision_z.json} | 0 ...n_dsmc_1d.json => test_1d_dsmc_picmi.json} | 0 ...id_1D.json => test_1d_langmuir_fluid.json} | 0 ...ti_1d.json => test_1d_langmuir_multi.json} | 0 ...d.json => test_1d_laser_acceleration.json} | 0 ... => test_1d_laser_acceleration_fluid.json} | 0 ..._1d_laser_acceleration_fluid_boosted.json} | 0 ... => test_1d_laser_acceleration_picmi.json} | 0 ...n_1d.json => test_1d_laser_injection.json} | 0 ...st_1d_laser_injection_from_lasy_file.json} | 0 ...laser_injection_from_lasy_file_boost.json} | 0 ...=> test_1d_ohm_solver_em_modes_picmi.json} | 0 ...=> test_1d_ohm_solver_ion_beam_picmi.json} | 0 ...=> test_1d_plasma_acceleration_picmi.json} | 0 ...sample_velocity_coincidence_thinning.json} | 0 ...ocity_coincidence_thinning_cartesian.json} | 0 ...json => test_1d_semi_implicit_picard.json} | 0 ...er_1d.json => test_1d_silver_mueller.json} | 0 ...son => test_1d_theta_implicit_picard.json} | 0 ...n => test_2d_averaged_galilean_psatd.json} | 0 ...st_2d_averaged_galilean_psatd_hybrid.json} | 0 ...d_mcc.json => test_2d_background_mcc.json} | 0 ...son => test_2d_background_mcc_dp_psp.json} | 0 ...lter.json => test_2d_bilinear_filter.json} | 0 ...isionXZ.json => test_2d_collision_xz.json} | 0 ...son => test_2d_comoving_psatd_hybrid.json} | 0 ...ing_2d.json => test_2d_dive_cleaning.json} | 0 ...on => test_2d_embedded_boundary_cube.json} | 0 ...st_2d_embedded_boundary_rotated_cube.json} | 0 ...rcle.json => test_2d_embedded_circle.json} | 0 ..._2d_energy_conserving_thermal_plasma.json} | 0 ...psatd.json => test_2d_galilean_psatd.json} | 0 ...2d_galilean_psatd_current_correction.json} | 0 ...alilean_psatd_current_correction_psb.json} | 0 ...son => test_2d_galilean_psatd_hybrid.json} | 0 .../test_2d_id_cpu_read_picmi.json | 14 + ...ost.json => test_2d_ionization_boost.json} | 0 ...n_lab.json => test_2d_ionization_lab.json} | 0 ...ion.json => test_2d_ionization_picmi.json} | 0 ...id_2D.json => test_2d_langmuir_fluid.json} | 0 ...MR.json => test_2d_langmuir_multi_mr.json} | 0 ...est_2d_langmuir_multi_mr_anisotropic.json} | 0 ...angmuir_multi_mr_momentum_conserving.json} | 0 ...n => test_2d_langmuir_multi_mr_psatd.json} | 0 ...json => test_2d_langmuir_multi_nodal.json} | 0 ...json => test_2d_langmuir_multi_picmi.json} | 0 ...json => test_2d_langmuir_multi_psatd.json} | 0 ...gmuir_multi_psatd_current_correction.json} | 0 ...multi_psatd_current_correction_nodal.json} | 0 ...muir_multi_psatd_momentum_conserving.json} | 0 ... test_2d_langmuir_multi_psatd_multiJ.json} | 0 ...2d_langmuir_multi_psatd_multiJ_nodal.json} | 0 ...> test_2d_langmuir_multi_psatd_nodal.json} | 0 ..._langmuir_multi_psatd_vay_deposition.json} | 0 ...uir_multi_psatd_vay_deposition_nodal.json} | 0 ...satd_vay_deposition_particle_shape_4.json} | 0 .../{Larmor.json => test_2d_larmor.json} | 0 ...> test_2d_laser_acceleration_boosted.json} | 0 ...son => test_2d_laser_acceleration_mr.json} | 0 ... test_2d_laser_acceleration_mr_picmi.json} | 0 ...n_2d.json => test_2d_laser_injection.json} | 0 ..._2d_laser_injection_from_binary_file.json} | 0 ...st_2d_laser_injection_from_lasy_file.json} | 0 ...nAcc2d.json => test_2d_laser_ion_acc.json} | 0 ... test_2d_laser_ion_acc_no_field_diag.json} | 0 ....json => test_2d_laser_ion_acc_picmi.json} | 0 ...OnFine.json => test_2d_laser_on_fine.json} | 0 ...ng.json => test_2d_leveling_thinning.json} | 0 ...=> test_2d_maxwell_hybrid_qed_solver.json} | 0 ...rector.json => test_2d_nci_corrector.json} | 0 ...rMR.json => test_2d_nci_corrector_mr.json} | 0 ...t_2d_ohm_solver_landau_damping_picmi.json} | 0 ...m_solver_magnetic_reconnection_picmi.json} | 0 ..._2d_parabolic_channel_initialization.json} | 0 ...=> test_2d_particle_thermal_boundary.json} | 0 ..._2d.json => test_2d_particles_in_pml.json} | 0 ....json => test_2d_particles_in_pml_mr.json} | 0 ... test_2d_plasma_acceleration_boosted.json} | 0 ...on => test_2d_plasma_acceleration_mr.json} | 0 ..._acceleration_mr_momentum_conserving.json} | 0 ...Mirror.json => test_2d_plasma_mirror.json} | 0 ...{pml_x_ckc.json => test_2d_pml_x_ckc.json} | 0 ...ilean.json => test_2d_pml_x_galilean.json} | 0 ..._x_psatd.json => test_2d_pml_x_psatd.json} | 0 ...{pml_x_yee.json => test_2d_pml_x_yee.json} | 0 ..._yee_eb.json => test_2d_pml_x_yee_eb.json} | 0 ...json => test_2d_prev_positions_picmi.json} | 0 ....json => test_2d_proton_boron_fusion.json} | 0 ...son => test_2d_python_wrappers_picmi.json} | 0 ...2d.json => test_2d_qed_breit_wheeler.json} | 0 ..._2d.json => test_2d_qed_quantum_sync.json} | 0 ...on.json => test_2d_refined_injection.json} | 0 ....json => test_2d_repelling_particles.json} | 0 ....json => test_2d_rigid_injection_btd.json} | 0 ....json => test_2d_rigid_injection_lab.json} | 0 ...d_x.json => test_2d_silver_mueller_x.json} | 0 ...d_z.json => test_2d_silver_mueller_z.json} | 0 ... test_2d_space_charge_initialization.json} | 0 ...lingMR.json => test_2d_subcycling_mr.json} | 0 ...=> test_2d_theta_implicit_jfnk_vandb.json} | 0 ...t_2d_theta_implicit_jfnk_vandb_picmi.json} | 0 ...rm_2d.json => test_2d_uniform_plasma.json} | 0 ...ion2D.json => test_2d_vay_deposition.json} | 0 ...restart.json => test_3d_acceleration.json} | 0 ...d.json => test_3d_acceleration_psatd.json} | 0 ... test_3d_acceleration_psatd_time_avg.json} | 0 ...n => test_3d_averaged_galilean_psatd.json} | 0 ...st_3d_averaged_galilean_psatd_hybrid.json} | 0 ....json => test_3d_beam_beam_collision.json} | 0 ...json => test_3d_collider_diagnostics.json} | 0 ...ionISO.json => test_3d_collision_iso.json} | 0 ...ionXYZ.json => test_3d_collision_xyz.json} | 0 ...> test_3d_deuterium_deuterium_fusion.json} | 0 ...terium_deuterium_fusion_intraspecies.json} | 0 ... => test_3d_deuterium_tritium_fusion.json} | 0 ..._diag.json => test_3d_diff_lumi_diag.json} | 0 ...ing_3d.json => test_3d_divb_cleaning.json} | 0 ...ing_3d.json => test_3d_dive_cleaning.json} | 0 ..._restart_eb.json => test_3d_eb_picmi.json} | 0 ...json => test_3d_electrostatic_sphere.json} | 0 ...n => test_3d_electrostatic_sphere_eb.json} | 0 ..._3d_electrostatic_sphere_eb_mixed_bc.json} | 0 ...est_3d_electrostatic_sphere_eb_picmi.json} | 0 ...st_3d_electrostatic_sphere_lab_frame.json} | 0 ...ostatic_sphere_lab_frame_mr_emass_10.json} | 0 ...st_3d_electrostatic_sphere_rel_nodal.json} | 0 ...on => test_3d_embedded_boundary_cube.json} | 0 ...d_embedded_boundary_cube_macroscopic.json} | 0 ...st_3d_embedded_boundary_rotated_cube.json} | 0 ...ion3D.json => test_3d_flux_injection.json} | 0 ...on => test_3d_focusing_gaussian_beam.json} | 0 ...psatd.json => test_3d_galilean_psatd.json} | 0 ...3d_galilean_psatd_current_correction.json} | 0 ...alilean_psatd_current_correction_psb.json} | 0 ....json => test_3d_gaussian_beam_picmi.json} | 0 ...on => test_3d_hard_edged_quadrupoles.json} | 0 ...st_3d_hard_edged_quadrupoles_boosted.json} | 0 ...est_3d_hard_edged_quadrupoles_moving.json} | 0 ...json => test_3d_initial_distribution.json} | 0 ...topping.json => test_3d_ion_stopping.json} | 0 ...multi.json => test_3d_langmuir_fluid.json} | 0 ...multi.json => test_3d_langmuir_multi.json} | 0 ...json => test_3d_langmuir_multi_nodal.json} | 0 ...json => test_3d_langmuir_multi_picmi.json} | 0 ...json => test_3d_langmuir_multi_psatd.json} | 0 ...gmuir_multi_psatd_current_correction.json} | 0 ...multi_psatd_current_correction_nodal.json} | 0 ...3d_langmuir_multi_psatd_div_cleaning.json} | 0 ...muir_multi_psatd_momentum_conserving.json} | 0 ... test_3d_langmuir_multi_psatd_multiJ.json} | 0 ...3d_langmuir_multi_psatd_multiJ_nodal.json} | 0 ...> test_3d_langmuir_multi_psatd_nodal.json} | 0 ...angmuir_multi_psatd_single_precision.json} | 0 ..._langmuir_multi_psatd_vay_deposition.json} | 0 ...uir_multi_psatd_vay_deposition_nodal.json} | 0 ...t_3d_langmuir_multi_single_precision.json} | 0 ...n.json => test_3d_laser_acceleration.json} | 0 ...on => test_3d_laser_acceleration_btd.json} | 0 ... => test_3d_laser_acceleration_picmi.json} | 0 ..._acceleration_single_precision_comms.json} | 0 ...tion.json => test_3d_laser_injection.json} | 0 ...st_3d_laser_injection_from_lasy_file.json} | 0 ...st_3d_load_external_field_grid_picmi.json} | 0 ...d_load_external_field_particle_picmi.json} | 0 ..._3d.json => test_3d_magnetostatic_eb.json} | 0 ...on => test_3d_magnetostatic_eb_picmi.json} | 0 ...> test_3d_nodal_electrostatic_solver.json} | 0 ...on => test_3d_open_bc_poisson_solver.json} | 0 ....json => test_3d_particle_boundaries.json} | 0 ...son => test_3d_particle_fields_diags.json} | 0 ...rticle_fields_diags_single_precision.json} | 0 ...sher.json => test_3d_particle_pusher.json} | 0 ...pml.json => test_3d_particles_in_pml.json} | 0 ....json => test_3d_particles_in_pml_mr.json} | 0 ...{PEC_field.json => test_3d_pec_field.json} | 0 ...ield_mr.json => test_3d_pec_field_mr.json} | 0 ...article.json => test_3d_pec_particle.json} | 0 ...pusher.json => test_3d_photon_pusher.json} | 0 ... test_3d_plasma_acceleration_boosted.json} | 0 ...d_plasma_acceleration_boosted_hybrid.json} | 0 ...test_3d_plasma_acceleration_mr_picmi.json} | 0 ...=> test_3d_plasma_acceleration_picmi.json} | 0 ...sma_lens.json => test_3d_plasma_lens.json} | 0 ....json => test_3d_plasma_lens_boosted.json} | 0 ...on => test_3d_plasma_lens_hard_edged.json} | 0 ...rt.json => test_3d_plasma_lens_short.json} | 0 ...test_3d_pml_psatd_dive_divb_cleaning.json} | 0 ....json => test_3d_point_of_contact_eb.json} | 0 ...ojection_divb_cleaner_callback_picmi.json} | 0 ...est_3d_projection_divb_cleaner_picmi.json} | 0 ....json => test_3d_proton_boron_fusion.json} | 0 ...3d.json => test_3d_qed_breit_wheeler.json} | 0 ..._3d.json => test_3d_qed_quantum_sync.json} | 0 ...ger1.json => test_3d_qed_schwinger_1.json} | 0 ...ger2.json => test_3d_qed_schwinger_2.json} | 0 ...ger3.json => test_3d_qed_schwinger_3.json} | 0 ...ger4.json => test_3d_qed_schwinger_4.json} | 0 ...n.json => test_3d_radiation_reaction.json} | 0 ..._diags.json => test_3d_reduced_diags.json} | 0 ...d_diags_load_balance_costs_heuristic.json} | 0 ...uced_diags_load_balance_costs_timers.json} | 0 ...iags_load_balance_costs_timers_psatd.json} | 0 ...st_3d_reduced_diags_single_precision.json} | 0 ...tivistic_space_charge_initialization.json} | 0 ... test_3d_space_charge_initialization.json} | 0 ...start.json => test_3d_uniform_plasma.json} | 0 ...son => test_3d_uniform_plasma_multiJ.json} | 0 ...ion3D.json => test_3d_vay_deposition.json} | 0 .../{BTD_rz.json => test_rz_btd.json} | 0 ...ollisionRZ.json => test_rz_collision.json} | 0 ... => test_rz_deuterium_tritium_fusion.json} | 0 ...json => test_rz_electrostatic_sphere.json} | 0 ...n => test_rz_electrostatic_sphere_eb.json} | 0 ...> test_rz_electrostatic_sphere_eb_mr.json} | 0 ...est_rz_embedded_boundary_diffraction.json} | 0 ...ction.json => test_rz_flux_injection.json} | 0 ...psatd.json => test_rz_galilean_psatd.json} | 0 ...rz_galilean_psatd_current_correction.json} | 0 ...alilean_psatd_current_correction_psb.json} | 0 ...id_RZ.json => test_rz_langmuir_fluid.json} | 0 ...ti_rz.json => test_rz_langmuir_multi.json} | 0 ...json => test_rz_langmuir_multi_picmi.json} | 0 ...json => test_rz_langmuir_multi_psatd.json} | 0 ...gmuir_multi_psatd_current_correction.json} | 0 ... test_rz_langmuir_multi_psatd_multiJ.json} | 0 ...Z.json => test_rz_laser_acceleration.json} | 0 ... => test_rz_laser_acceleration_picmi.json} | 0 ...rz_laser_injection_from_RZ_lasy_file.json} | 0 ...st_rz_laser_injection_from_lasy_file.json} | 0 ... => test_rz_load_external_field_grid.json} | 0 ...est_rz_load_external_field_particles.json} | 0 ...on => test_rz_magnetostatic_eb_picmi.json} | 0 ...z_psatd.json => test_rz_multiJ_psatd.json} | 0 ...=> test_rz_ohm_solver_em_modes_picmi.json} | 0 ..._particle_boundary_interaction_picmi.json} | 0 ...l_psatd_rz.json => test_rz_pml_psatd.json} | 0 ....json => test_rz_point_of_contact_eb.json} | 0 ...n => test_rz_projection_divb_cleaner.json} | 0 .../{scraping.json => test_rz_scraping.json} | 0 ...z_z.json => test_rz_silver_mueller_z.json} | 0 ...=> test_rz_spacecraft_charging_picmi.json} | 0 Regression/PostProcessingUtils/__init__.py | 0 Regression/requirements.txt | 4 +- 747 files changed, 6023 insertions(+), 1514 deletions(-) create mode 100755 .github/workflows/source/check_inputs.py delete mode 100755 .github/workflows/source/inputsNotTested delete mode 100755 .github/workflows/source/wrongFileNameInExamples create mode 100644 Examples/CMakeLists.txt create mode 100644 Examples/Physics_applications/CMakeLists.txt create mode 100644 Examples/Physics_applications/beam_beam_collision/CMakeLists.txt rename Examples/Physics_applications/{beam-beam_collision/README.rst => beam_beam_collision/README} (100%) create mode 120000 Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py rename Examples/Physics_applications/{beam-beam_collision/inputs => beam_beam_collision/inputs_test_3d_beam_beam_collision} (100%) create mode 100644 Examples/Physics_applications/capacitive_discharge/CMakeLists.txt rename Examples/Physics_applications/capacitive_discharge/{README.rst => README} (100%) create mode 120000 Examples/Physics_applications/capacitive_discharge/analysis_default_regression.py rename Examples/Physics_applications/capacitive_discharge/{PICMI_inputs_1d.py => inputs_base_1d_picmi.py} (97%) rename Examples/Physics_applications/capacitive_discharge/{inputs_2d => inputs_test_2d_background_mcc} (98%) rename Examples/Physics_applications/capacitive_discharge/{PICMI_inputs_2d.py => inputs_test_2d_background_mcc_picmi.py} (98%) create mode 100644 Examples/Physics_applications/laser_acceleration/CMakeLists.txt rename Examples/Physics_applications/laser_acceleration/{README.rst => README} (100%) rename Examples/Physics_applications/laser_acceleration/{analysis_1d_fluids.py => analysis_1d_fluid.py} (100%) rename Examples/Physics_applications/laser_acceleration/{analysis_1d_fluids_boosted.py => analysis_1d_fluid_boosted.py} (100%) create mode 120000 Examples/Physics_applications/laser_acceleration/analysis_default_openpmd_regression.py create mode 120000 Examples/Physics_applications/laser_acceleration/analysis_default_regression.py rename Examples/{Tests/openpmd_rz => Physics_applications/laser_acceleration}/analysis_openpmd_rz.py (94%) rename Examples/Physics_applications/laser_acceleration/{inputs_2d => inputs_base_2d} (100%) rename Examples/Physics_applications/laser_acceleration/{inputs_3d => inputs_base_3d} (100%) rename Examples/Physics_applications/laser_acceleration/{inputs_rz => inputs_base_rz} (100%) rename Examples/Physics_applications/laser_acceleration/{inputs_1d => inputs_test_1d_laser_acceleration} (100%) rename Examples/Physics_applications/laser_acceleration/{inputs_1d_fluids => inputs_test_1d_laser_acceleration_fluid} (100%) rename Examples/Physics_applications/laser_acceleration/{inputs_1d_fluids_boosted => inputs_test_1d_laser_acceleration_fluid_boosted} (100%) rename Examples/Physics_applications/laser_acceleration/{PICMI_inputs_1d.py => inputs_test_1d_laser_acceleration_picmi.py} (95%) rename Examples/Physics_applications/laser_acceleration/{inputs_2d_boost => inputs_test_2d_laser_acceleration_boosted} (98%) create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr rename Examples/Physics_applications/laser_acceleration/{PICMI_inputs_2d.py => inputs_test_2d_laser_acceleration_mr_picmi.py} (96%) create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_2d_refined_injection create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration rename Examples/Physics_applications/laser_acceleration/{PICMI_inputs_3d.py => inputs_test_3d_laser_acceleration_picmi.py} (96%) create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_single_precision_comms create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_opmd rename Examples/Physics_applications/laser_acceleration/{PICMI_inputs_rz.py => inputs_test_rz_laser_acceleration_picmi.py} (96%) create mode 100644 Examples/Physics_applications/laser_ion/CMakeLists.txt rename Examples/Physics_applications/laser_ion/{README.rst => README} (100%) create mode 120000 Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py rename Examples/Physics_applications/laser_ion/{inputs_2d => inputs_test_2d_laser_ion_acc} (100%) rename Examples/Physics_applications/laser_ion/{PICMI_inputs_2d.py => inputs_test_2d_laser_ion_acc_picmi.py} (98%) create mode 100644 Examples/Physics_applications/plasma_acceleration/CMakeLists.txt rename Examples/Physics_applications/plasma_acceleration/{README.rst => README} (100%) create mode 120000 Examples/Physics_applications/plasma_acceleration/analysis_default_regression.py rename Examples/Physics_applications/plasma_acceleration/{inputs_2d => inputs_base_2d} (96%) rename Examples/Physics_applications/plasma_acceleration/{inputs_3d_boost => inputs_base_3d} (99%) rename Examples/Physics_applications/plasma_acceleration/{PICMI_inputs_plasma_acceleration_1d.py => inputs_test_1d_plasma_acceleration_picmi.py} (96%) rename Examples/Physics_applications/plasma_acceleration/{inputs_2d_boost => inputs_test_2d_plasma_acceleration_boosted} (98%) create mode 100644 Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr create mode 100644 Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr_momentum_conserving create mode 100644 Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted create mode 100644 Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted_hybrid rename Examples/Physics_applications/plasma_acceleration/{PICMI_inputs_plasma_acceleration_mr.py => inputs_test_3d_plasma_acceleration_mr_picmi.py} (97%) rename Examples/Physics_applications/plasma_acceleration/{PICMI_inputs_plasma_acceleration.py => inputs_test_3d_plasma_acceleration_picmi.py} (97%) create mode 100644 Examples/Physics_applications/plasma_mirror/CMakeLists.txt rename Examples/Physics_applications/plasma_mirror/{README.rst => README} (100%) create mode 120000 Examples/Physics_applications/plasma_mirror/analysis_default_regression.py rename Examples/Physics_applications/plasma_mirror/{inputs_2d => inputs_test_2d_plasma_mirror} (98%) create mode 100644 Examples/Physics_applications/spacecraft_charging/CMakeLists.txt rename Examples/Physics_applications/spacecraft_charging/{PICMI_inputs_rz.py => inputs_test_rz_spacecraft_charging_picmi.py} (98%) create mode 100644 Examples/Physics_applications/uniform_plasma/CMakeLists.txt rename Examples/Physics_applications/uniform_plasma/{README.rst => README} (100%) create mode 120000 Examples/Physics_applications/uniform_plasma/analysis_default_regression.py create mode 120000 Examples/Physics_applications/uniform_plasma/analysis_default_restart.py rename Examples/Physics_applications/uniform_plasma/{inputs_3d => inputs_base_3d} (100%) rename Examples/Physics_applications/uniform_plasma/{inputs_2d => inputs_test_2d_uniform_plasma} (100%) create mode 100644 Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma create mode 100644 Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma_restart create mode 100644 Examples/Tests/CMakeLists.txt create mode 100644 Examples/Tests/accelerator_lattice/CMakeLists.txt rename Examples/Tests/{AcceleratorLattice => accelerator_lattice}/analysis.py (100%) rename Examples/Tests/{AcceleratorLattice/inputs_quad_3d => accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles} (100%) rename Examples/Tests/{AcceleratorLattice/inputs_quad_boosted_3d => accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted} (100%) rename Examples/Tests/{AcceleratorLattice/inputs_quad_moving_3d => accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_moving} (100%) create mode 100644 Examples/Tests/boosted_diags/CMakeLists.txt rename Examples/Tests/boosted_diags/{inputs_3d => inputs_test_3d_laser_acceleration_btd} (100%) create mode 100644 Examples/Tests/boundaries/CMakeLists.txt rename Examples/Tests/boundaries/{inputs_3d => inputs_test_3d_particle_boundaries} (100%) create mode 100644 Examples/Tests/btd_rz/CMakeLists.txt rename Examples/Tests/btd_rz/{analysis_BTD_laser_antenna.py => analysis.py} (100%) rename Examples/Tests/btd_rz/{inputs_rz_z_boosted_BTD => inputs_test_rz_btd} (100%) create mode 100644 Examples/Tests/collider_relevant_diags/CMakeLists.txt rename Examples/Tests/collider_relevant_diags/{analysis_multiple_particles.py => analysis.py} (99%) rename Examples/Tests/collider_relevant_diags/{inputs_3d_multiple_particles => inputs_test_3d_collider_diagnostics} (99%) create mode 100644 Examples/Tests/collision/CMakeLists.txt rename Examples/Tests/collision/{inputs_1d => inputs_test_1d_collision_z} (100%) rename Examples/Tests/collision/{inputs_2d => inputs_test_2d_collision_xz} (100%) rename Examples/Tests/collision/{PICMI_inputs_2d.py => inputs_test_2d_collision_xz_picmi.py} (94%) rename Examples/Tests/collision/{inputs_3d_isotropization => inputs_test_3d_collision_iso} (100%) rename Examples/Tests/collision/{inputs_3d => inputs_test_3d_collision_xyz} (100%) rename Examples/Tests/collision/{inputs_rz => inputs_test_rz_collision} (100%) create mode 100644 Examples/Tests/diff_lumi_diag/CMakeLists.txt rename Examples/Tests/diff_lumi_diag/{inputs => inputs_test_3d_diff_lumi_diag} (100%) create mode 100644 Examples/Tests/divb_cleaning/CMakeLists.txt rename Examples/Tests/divb_cleaning/{inputs_3d => inputs_test_3d_divb_cleaning} (100%) create mode 100644 Examples/Tests/dive_cleaning/CMakeLists.txt create mode 100644 Examples/Tests/dive_cleaning/inputs_test_2d_dive_cleaning rename Examples/Tests/dive_cleaning/{inputs_3d => inputs_test_3d_dive_cleaning} (100%) create mode 100644 Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt rename Examples/Tests/electrostatic_dirichlet_bc/{inputs_2d => inputs_test_2d_dirichlet_bc} (93%) rename Examples/Tests/electrostatic_dirichlet_bc/{PICMI_inputs_2d.py => inputs_test_2d_dirichlet_bc_picmi.py} (90%) create mode 100644 Examples/Tests/electrostatic_sphere/CMakeLists.txt rename Examples/Tests/electrostatic_sphere/{inputs_3d => inputs_base_3d} (100%) create mode 100644 Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere create mode 100644 Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame create mode 100644 Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 create mode 100644 Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_rel_nodal rename Examples/Tests/electrostatic_sphere/{inputs_rz => inputs_test_rz_electrostatic_sphere} (96%) create mode 100644 Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt create mode 120000 Examples/Tests/electrostatic_sphere_eb/analysis_default_regression.py rename Examples/Tests/electrostatic_sphere_eb/{inputs_3d => inputs_test_3d_electrostatic_sphere_eb} (96%) rename Examples/Tests/electrostatic_sphere_eb/{inputs_3d_mixed_BCs => inputs_test_3d_electrostatic_sphere_eb_mixed_bc} (94%) rename Examples/Tests/electrostatic_sphere_eb/{PICMI_inputs_3d.py => inputs_test_3d_electrostatic_sphere_eb_picmi.py} (94%) rename Examples/Tests/electrostatic_sphere_eb/{inputs_rz => inputs_test_rz_electrostatic_sphere_eb} (94%) rename Examples/Tests/electrostatic_sphere_eb/{inputs_rz_mr => inputs_test_rz_electrostatic_sphere_eb_mr} (92%) create mode 100644 Examples/Tests/embedded_boundary_cube/CMakeLists.txt rename Examples/Tests/embedded_boundary_cube/{inputs_3d => inputs_base_3d} (97%) rename Examples/Tests/embedded_boundary_cube/{inputs_2d => inputs_test_2d_embedded_boundary_cube} (97%) create mode 100644 Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube create mode 100644 Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube_macroscopic create mode 100644 Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt rename Examples/Tests/embedded_boundary_diffraction/{inputs_rz => inputs_test_rz_embedded_boundary_diffraction} (100%) create mode 100644 Examples/Tests/embedded_boundary_python_api/CMakeLists.txt rename Examples/Tests/embedded_boundary_python_api/{PICMI_inputs_EB_API.py => inputs_test_3d_embedded_boundary_picmi.py} (97%) create mode 100644 Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt rename Examples/Tests/embedded_boundary_rotated_cube/{analysis_fields.py => analysis_fields_3d.py} (100%) rename Examples/Tests/embedded_boundary_rotated_cube/{inputs_2d => inputs_test_2d_embedded_boundary_rotated_cube} (96%) rename Examples/Tests/embedded_boundary_rotated_cube/{inputs_3d => inputs_test_3d_embedded_boundary_rotated_cube} (98%) create mode 100644 Examples/Tests/embedded_circle/CMakeLists.txt rename Examples/Tests/embedded_circle/{inputs_2d => inputs_test_2d_embedded_circle} (100%) create mode 100644 Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt rename Examples/Tests/energy_conserving_thermal_plasma/{inputs_2d_electrostatic => inputs_test_2d_energy_conserving_thermal_plasma} (100%) create mode 100644 Examples/Tests/field_probe/CMakeLists.txt rename Examples/Tests/field_probe/{analysis_field_probe.py => analysis.py} (100%) rename Examples/Tests/field_probe/{inputs_2d => inputs_test_2d_field_probe} (100%) create mode 100644 Examples/Tests/flux_injection/CMakeLists.txt rename Examples/Tests/flux_injection/{inputs_3d => inputs_test_3d_flux_injection} (100%) rename Examples/Tests/flux_injection/{inputs_rz => inputs_test_rz_flux_injection} (100%) create mode 100644 Examples/Tests/gaussian_beam/CMakeLists.txt rename Examples/Tests/gaussian_beam/{README.rst => README} (100%) rename Examples/Tests/gaussian_beam/{analysis_focusing_beam.py => analysis.py} (100%) create mode 120000 Examples/Tests/gaussian_beam/analysis_default_regression.py rename Examples/Tests/gaussian_beam/{inputs_focusing_beam => inputs_test_3d_focusing_gaussian_beam} (100%) rename Examples/Tests/gaussian_beam/{PICMI_inputs_gaussian_beam.py => inputs_test_3d_gaussian_beam_picmi.py} (97%) create mode 100644 Examples/Tests/implicit/CMakeLists.txt rename Examples/Tests/{Implicit => implicit}/analysis_1d.py (87%) rename Examples/Tests/{Implicit => implicit}/analysis_vandb_jfnk_2d.py (100%) rename Examples/Tests/{Implicit/inputs_1d_semiimplicit => implicit/inputs_test_1d_semi_implicit_picard} (97%) rename Examples/Tests/{Implicit/inputs_1d => implicit/inputs_test_1d_theta_implicit_picard} (97%) rename Examples/Tests/{Implicit/inputs_vandb_jfnk_2d => implicit/inputs_test_2d_theta_implicit_jfnk_vandb} (98%) rename Examples/Tests/{Implicit/PICMI_inputs_vandb_jfnk_2d.py => implicit/inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py} (96%) create mode 100644 Examples/Tests/initial_distribution/CMakeLists.txt rename Examples/Tests/initial_distribution/{analysis_distribution.py => analysis.py} (100%) rename Examples/Tests/initial_distribution/{inputs => inputs_test_3d_initial_distribution} (100%) create mode 100644 Examples/Tests/initial_plasma_profile/CMakeLists.txt rename Examples/Tests/initial_plasma_profile/{inputs => inputs_test_2d_parabolic_channel_initialization} (100%) create mode 100644 Examples/Tests/ion_stopping/CMakeLists.txt rename Examples/Tests/ion_stopping/{analysis_ion_stopping.py => analysis.py} (93%) rename Examples/Tests/ion_stopping/{inputs_3d => inputs_test_3d_ion_stopping} (99%) create mode 100644 Examples/Tests/ionization/CMakeLists.txt rename Examples/Tests/ionization/{analysis_ionization.py => analysis.py} (100%) rename Examples/Tests/ionization/{inputs_2d_bf_rt => inputs_test_2d_ionization_boost} (100%) rename Examples/Tests/ionization/{inputs_2d_rt => inputs_test_2d_ionization_lab} (100%) rename Examples/Tests/ionization/{PICMI_inputs_2d.py => inputs_test_2d_ionization_picmi.py} (96%) create mode 100644 Examples/Tests/langmuir/CMakeLists.txt rename Examples/Tests/langmuir/{README.rst => README} (100%) create mode 120000 Examples/Tests/langmuir/analysis_default_regression.py rename Examples/Tests/langmuir/{inputs_2d => inputs_base_2d} (100%) rename Examples/Tests/langmuir/{inputs_3d => inputs_base_3d} (100%) rename Examples/Tests/langmuir/{inputs_rz => inputs_base_rz} (100%) rename Examples/Tests/langmuir/{inputs_1d => inputs_test_1d_langmuir_multi} (95%) create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_anisotropic create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_momentum_conserving create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_psatd create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_nodal rename Examples/Tests/langmuir/{PICMI_inputs_2d.py => inputs_test_2d_langmuir_multi_picmi.py} (97%) create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_momentum_conserving create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_nodal rename Examples/Tests/langmuir/{PICMI_inputs_3d.py => inputs_test_3d_langmuir_multi_picmi.py} (97%) create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_div_cleaning create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_momentum_conserving create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition create mode 100644 Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal create mode 100644 Examples/Tests/langmuir/inputs_test_rz_langmuir_multi rename Examples/Tests/langmuir/{PICMI_inputs_rz.py => inputs_test_rz_langmuir_multi_picmi.py} (99%) create mode 100644 Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd create mode 100644 Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_current_correction create mode 100644 Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_multiJ create mode 100644 Examples/Tests/langmuir_fluids/CMakeLists.txt rename Examples/Tests/langmuir_fluids/{inputs_1d => inputs_test_1d_langmuir_fluid} (100%) rename Examples/Tests/langmuir_fluids/{inputs_2d => inputs_test_2d_langmuir_fluid} (100%) rename Examples/Tests/langmuir_fluids/{inputs_3d => inputs_test_3d_langmuir_fluid} (100%) rename Examples/Tests/langmuir_fluids/{inputs_rz => inputs_test_rz_langmuir_fluid} (100%) create mode 100644 Examples/Tests/larmor/CMakeLists.txt create mode 120000 Examples/Tests/larmor/analysis_default_regression.py rename Examples/Tests/larmor/{inputs_2d_mr => inputs_test_2d_larmor} (99%) create mode 100644 Examples/Tests/laser_injection/CMakeLists.txt rename Examples/Tests/laser_injection/{analysis_laser.py => analysis_3d.py} (100%) rename Examples/Tests/laser_injection/{inputs_1d_rt => inputs_test_1d_laser_injection} (100%) rename Examples/Tests/laser_injection/{inputs_2d_rt => inputs_test_2d_laser_injection} (100%) rename Examples/Tests/laser_injection/{inputs_3d_rt => inputs_test_3d_laser_injection} (99%) create mode 100644 Examples/Tests/laser_injection_from_file/CMakeLists.txt delete mode 100755 Examples/Tests/laser_injection_from_file/analysis_RZ.py create mode 100755 Examples/Tests/laser_injection_from_file/analysis_rz.py rename Examples/Tests/laser_injection_from_file/{inputs.1d_test => inputs_test_1d_laser_injection_from_lasy_file} (93%) rename Examples/Tests/laser_injection_from_file/{inputs.1d_boost_test => inputs_test_1d_laser_injection_from_lasy_file_boost} (93%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_prepare.py rename Examples/Tests/laser_injection_from_file/{inputs.2d_test_binary => inputs_test_2d_laser_injection_from_binary_file} (94%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file_prepare.py rename Examples/Tests/laser_injection_from_file/{inputs.2d_test => inputs_test_2d_laser_injection_from_lasy_file} (93%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file_prepare.py rename Examples/Tests/laser_injection_from_file/{inputs.3d_test => inputs_test_3d_laser_injection_from_lasy_file} (93%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file_prepare.py rename Examples/Tests/laser_injection_from_file/{inputs.from_RZ_file_test => inputs_test_rz_laser_injection_from_RZ_lasy_file} (93%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py rename Examples/Tests/laser_injection_from_file/{inputs.RZ_test => inputs_test_rz_laser_injection_from_lasy_file} (93%) create mode 100755 Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file_prepare.py create mode 100644 Examples/Tests/laser_on_fine/CMakeLists.txt create mode 120000 Examples/Tests/laser_on_fine/analysis_default_regression.py rename Examples/Tests/laser_on_fine/{inputs_2d => inputs_test_2d_laser_on_fine} (99%) create mode 100644 Examples/Tests/load_external_field/CMakeLists.txt rename Examples/Tests/{LoadExternalField => load_external_field}/analysis_3d.py (100%) create mode 120000 Examples/Tests/load_external_field/analysis_default_restart.py rename Examples/Tests/{LoadExternalField => load_external_field}/analysis_rz.py (100%) rename Examples/Tests/{LoadExternalField/PICMI_inputs_3d_grid_fields.py => load_external_field/inputs_test_3d_load_external_field_grid_picmi.py} (95%) rename Examples/Tests/{LoadExternalField/PICMI_inputs_3d_particle_fields.py => load_external_field/inputs_test_3d_load_external_field_particle_picmi.py} (95%) rename Examples/Tests/{LoadExternalField/inputs_rz_grid_fields => load_external_field/inputs_test_rz_load_external_field_grid} (97%) create mode 100644 Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid_restart rename Examples/Tests/{LoadExternalField/inputs_rz_particle_fields => load_external_field/inputs_test_rz_load_external_field_particles} (97%) create mode 100644 Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles_restart create mode 100644 Examples/Tests/magnetostatic_eb/CMakeLists.txt create mode 120000 Examples/Tests/magnetostatic_eb/analysis_default_regression.py rename Examples/Tests/magnetostatic_eb/{inputs_3d => inputs_test_3d_magnetostatic_eb} (96%) rename Examples/Tests/magnetostatic_eb/{PICMI_inputs_3d.py => inputs_test_3d_magnetostatic_eb_picmi.py} (97%) rename Examples/Tests/magnetostatic_eb/{PICMI_inputs_rz.py => inputs_test_rz_magnetostatic_eb_picmi.py} (97%) create mode 100644 Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt rename Examples/Tests/maxwell_hybrid_qed/{analysis_Maxwell_QED_Hybrid.py => analysis.py} (100%) rename Examples/Tests/maxwell_hybrid_qed/{inputs_2d => inputs_test_2d_maxwell_hybrid_qed_solver} (98%) create mode 100644 Examples/Tests/nci_fdtd_stability/CMakeLists.txt rename Examples/Tests/nci_fdtd_stability/{inputs_2d => inputs_base_2d} (100%) create mode 100644 Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector create mode 100644 Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector_mr create mode 100644 Examples/Tests/nci_psatd_stability/CMakeLists.txt create mode 120000 Examples/Tests/nci_psatd_stability/analysis_default_regression.py rename Examples/Tests/nci_psatd_stability/{inputs_2d => inputs_base_2d} (100%) rename Examples/Tests/nci_psatd_stability/{inputs_avg_2d => inputs_base_2d_averaged} (100%) rename Examples/Tests/nci_psatd_stability/{inputs_3d => inputs_base_3d} (100%) rename Examples/Tests/nci_psatd_stability/{inputs_avg_3d => inputs_base_3d_averaged} (100%) rename Examples/Tests/nci_psatd_stability/{inputs_rz => inputs_base_rz} (100%) create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd_hybrid rename Examples/Tests/{comoving/inputs_2d_hybrid => nci_psatd_stability/inputs_test_2d_comoving_psatd_hybrid} (97%) create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction_psb rename Examples/Tests/nci_psatd_stability/{inputs_2d_hybrid => inputs_test_2d_galilean_psatd_hybrid} (97%) create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd_hybrid create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction_psb create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_3d_uniform_plasma_multiJ create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction create mode 100644 Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction_psb rename Examples/Tests/{multi_j/inputs_rz => nci_psatd_stability/inputs_test_rz_multiJ_psatd} (97%) create mode 100644 Examples/Tests/nodal_electrostatic/CMakeLists.txt rename Examples/Tests/nodal_electrostatic/{analysis_3d.py => analysis.py} (100%) rename Examples/Tests/nodal_electrostatic/{inputs_3d => inputs_test_3d_nodal_electrostatic_solver} (98%) create mode 100644 Examples/Tests/nuclear_fusion/CMakeLists.txt rename Examples/Tests/nuclear_fusion/{inputs_proton_boron_2d => inputs_test_2d_proton_boron_fusion} (100%) rename Examples/Tests/nuclear_fusion/{inputs_deuterium_deuterium_3d => inputs_test_3d_deuterium_deuterium_fusion} (100%) rename Examples/Tests/nuclear_fusion/{inputs_deuterium_deuterium_3d_intraspecies => inputs_test_3d_deuterium_deuterium_fusion_intraspecies} (100%) rename Examples/Tests/nuclear_fusion/{inputs_deuterium_tritium_3d => inputs_test_3d_deuterium_tritium_fusion} (100%) rename Examples/Tests/nuclear_fusion/{inputs_proton_boron_3d => inputs_test_3d_proton_boron_fusion} (100%) rename Examples/Tests/nuclear_fusion/{inputs_deuterium_tritium_rz => inputs_test_rz_deuterium_tritium_fusion} (100%) create mode 100644 Examples/Tests/ohm_solver_em_modes/CMakeLists.txt rename Examples/Tests/{ohm_solver_EM_modes/README.rst => ohm_solver_em_modes/README} (100%) rename Examples/Tests/{ohm_solver_EM_modes => ohm_solver_em_modes}/analysis.py (100%) rename Examples/Tests/{ohm_solver_EM_modes => ohm_solver_em_modes}/analysis_rz.py (100%) rename Examples/Tests/{ohm_solver_EM_modes/PICMI_inputs.py => ohm_solver_em_modes/inputs_test_1d_ohm_solver_em_modes_picmi.py} (98%) rename Examples/Tests/{ohm_solver_EM_modes/PICMI_inputs_rz.py => ohm_solver_em_modes/inputs_test_rz_ohm_solver_em_modes_picmi.py} (98%) create mode 100644 Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt rename Examples/Tests/ohm_solver_ion_Landau_damping/{README.rst => README} (100%) rename Examples/Tests/ohm_solver_ion_Landau_damping/{PICMI_inputs.py => inputs_test_2d_ohm_solver_landau_damping_picmi.py} (98%) create mode 100644 Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt rename Examples/Tests/ohm_solver_ion_beam_instability/{README.rst => README} (100%) rename Examples/Tests/ohm_solver_ion_beam_instability/{PICMI_inputs.py => inputs_test_1d_ohm_solver_ion_beam_picmi.py} (98%) create mode 100644 Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt rename Examples/Tests/ohm_solver_magnetic_reconnection/{README.rst => README} (100%) rename Examples/Tests/ohm_solver_magnetic_reconnection/{PICMI_inputs.py => inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py} (98%) create mode 100644 Examples/Tests/open_bc_poisson_solver/CMakeLists.txt rename Examples/Tests/{openbc_poisson_solver => open_bc_poisson_solver}/analysis.py (100%) rename Examples/Tests/{openbc_poisson_solver/inputs_3d => open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver} (100%) create mode 100644 Examples/Tests/particle_boundary_interaction/CMakeLists.txt rename Examples/Tests/particle_boundary_interaction/{PICMI_inputs_rz.py => inputs_test_rz_particle_boundary_interaction_picmi.py} (97%) create mode 100644 Examples/Tests/particle_boundary_process/CMakeLists.txt rename Examples/Tests/particle_boundary_process/{PICMI_inputs_reflection.py => inputs_test_2d_particle_reflection_picmi.py} (100%) rename Examples/Tests/particle_boundary_process/{inputs_absorption => inputs_test_3d_particle_absorption} (100%) create mode 100644 Examples/Tests/particle_boundary_scrape/CMakeLists.txt rename Examples/Tests/particle_boundary_scrape/{inputs_scrape => inputs_test_3d_particle_scrape} (100%) rename Examples/Tests/particle_boundary_scrape/{PICMI_inputs_scrape.py => inputs_test_3d_particle_scrape_picmi.py} (96%) create mode 100644 Examples/Tests/particle_data_python/CMakeLists.txt create mode 120000 Examples/Tests/particle_data_python/analysis_default_regression.py rename Examples/Tests/particle_data_python/{PICMI_inputs_2d.py => inputs_test_2d_particle_attr_access_picmi.py} (94%) rename Examples/Tests/particle_data_python/{PICMI_inputs_prev_pos_2d.py => inputs_test_2d_prev_positions_picmi.py} (95%) create mode 100644 Examples/Tests/particle_fields_diags/CMakeLists.txt rename Examples/Tests/particle_fields_diags/{inputs => inputs_test_3d_particle_fields_diags} (100%) create mode 100644 Examples/Tests/particle_pusher/CMakeLists.txt rename Examples/Tests/particle_pusher/{analysis_pusher.py => analysis.py} (100%) rename Examples/Tests/particle_pusher/{inputs_3d => inputs_test_3d_particle_pusher} (100%) create mode 100644 Examples/Tests/particle_thermal_boundary/CMakeLists.txt rename Examples/Tests/particle_thermal_boundary/{analysis_2d.py => analysis.py} (100%) rename Examples/Tests/particle_thermal_boundary/{inputs_2d => inputs_test_2d_particle_thermal_boundary} (100%) create mode 100644 Examples/Tests/particles_in_pml/CMakeLists.txt rename Examples/Tests/particles_in_pml/{inputs_2d => inputs_test_2d_particles_in_pml} (100%) rename Examples/Tests/particles_in_pml/{inputs_mr_2d => inputs_test_2d_particles_in_pml_mr} (100%) rename Examples/Tests/particles_in_pml/{inputs_3d => inputs_test_3d_particles_in_pml} (100%) rename Examples/Tests/particles_in_pml/{inputs_mr_3d => inputs_test_3d_particles_in_pml_mr} (100%) create mode 100644 Examples/Tests/pass_mpi_communicator/CMakeLists.txt rename Examples/Tests/pass_mpi_communicator/{PICMI_inputs_2d.py => inputs_test_2d_pass_mpi_comm_picmi.py} (99%) create mode 100644 Examples/Tests/pec/CMakeLists.txt create mode 120000 Examples/Tests/pec/analysis_default_regression.py rename Examples/Tests/pec/{inputs_field_PEC_3d => inputs_test_3d_pec_field} (100%) rename Examples/Tests/pec/{inputs_field_PEC_mr_3d => inputs_test_3d_pec_field_mr} (100%) rename Examples/Tests/pec/{inputs_particle_PEC_3d => inputs_test_3d_pec_particle} (100%) create mode 100644 Examples/Tests/photon_pusher/CMakeLists.txt rename Examples/Tests/photon_pusher/{analysis_photon_pusher.py => analysis.py} (100%) rename Examples/Tests/photon_pusher/{inputs_3d => inputs_test_3d_photon_pusher} (100%) create mode 100644 Examples/Tests/plasma_lens/CMakeLists.txt rename Examples/Tests/plasma_lens/{inputs_3d => inputs_test_3d_plasma_lens} (100%) rename Examples/Tests/plasma_lens/{inputs_boosted_3d => inputs_test_3d_plasma_lens_boosted} (100%) rename Examples/Tests/plasma_lens/{inputs_lattice_3d => inputs_test_3d_plasma_lens_hard_edged} (100%) rename Examples/Tests/plasma_lens/{PICMI_inputs_3d.py => inputs_test_3d_plasma_lens_picmi.py} (94%) rename Examples/Tests/plasma_lens/{inputs_short_3d => inputs_test_3d_plasma_lens_short} (100%) create mode 100644 Examples/Tests/pml/CMakeLists.txt create mode 120000 Examples/Tests/pml/analysis_default_regression.py create mode 120000 Examples/Tests/pml/analysis_default_restart.py rename Examples/Tests/pml/{inputs_2d => inputs_base_2d} (100%) create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_ckc create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_galilean create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_psatd create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_psatd_restart create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_yee create mode 100644 Examples/Tests/pml/inputs_test_2d_pml_x_yee_restart rename Examples/Tests/pml/{inputs_3d => inputs_test_3d_pml_psatd_dive_divb_cleaning} (94%) rename Examples/Tests/pml/{inputs_rz => inputs_test_rz_pml_psatd} (93%) create mode 100644 Examples/Tests/point_of_contact_eb/CMakeLists.txt rename Examples/Tests/{point_of_contact_EB => point_of_contact_eb}/analysis.py (100%) rename Examples/Tests/{point_of_contact_EB/inputs_3d => point_of_contact_eb/inputs_test_3d_point_of_contact_eb} (100%) rename Examples/Tests/{point_of_contact_EB/inputs_rz => point_of_contact_eb/inputs_test_rz_point_of_contact_eb} (100%) create mode 100644 Examples/Tests/projection_divb_cleaner/CMakeLists.txt rename Examples/Tests/projection_divb_cleaner/{analysis_rz.py => analysis.py} (100%) create mode 120000 Examples/Tests/projection_divb_cleaner/analysis_default_regression.py rename Examples/Tests/projection_divb_cleaner/{PICMI_inputs_3D_pyload.py => inputs_test_3d_projection_divb_cleaner_callback_picmi.py} (98%) rename Examples/Tests/projection_divb_cleaner/{PICMI_inputs_3d.py => inputs_test_3d_projection_divb_cleaner_picmi.py} (95%) rename Examples/Tests/projection_divb_cleaner/{inputs_rz => inputs_test_rz_projection_divb_cleaner} (96%) create mode 100644 Examples/Tests/python_wrappers/CMakeLists.txt create mode 120000 Examples/Tests/python_wrappers/analysis_default_regression.py rename Examples/Tests/python_wrappers/{PICMI_inputs_2d.py => inputs_test_2d_python_wrappers_picmi.py} (99%) create mode 100644 Examples/Tests/qed/CMakeLists.txt rename Examples/Tests/qed/{breit_wheeler/analysis_core.py => analysis_breit_wheeler_core.py} (100%) rename Examples/Tests/qed/{breit_wheeler/analysis_opmd.py => analysis_breit_wheeler_opmd.py} (95%) rename Examples/Tests/qed/{breit_wheeler/analysis_yt.py => analysis_breit_wheeler_yt.py} (94%) rename Examples/Tests/qed/{quantum_synchrotron/analysis.py => analysis_quantum_sync.py} (100%) rename Examples/Tests/qed/{schwinger => }/analysis_schwinger.py (97%) rename Examples/Tests/qed/{breit_wheeler/inputs_2d => inputs_base_2d_breit_wheeler} (99%) rename Examples/Tests/qed/{breit_wheeler/inputs_3d => inputs_base_3d_breit_wheeler} (100%) rename Examples/Tests/qed/{schwinger/inputs_3d_schwinger => inputs_base_3d_schwinger} (100%) create mode 100644 Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler create mode 100644 Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler_opmd rename Examples/Tests/qed/{quantum_synchrotron/inputs_2d => inputs_test_2d_qed_quantum_sync} (99%) create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler_opmd rename Examples/Tests/qed/{quantum_synchrotron/inputs_3d => inputs_test_3d_qed_quantum_sync} (99%) create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_schwinger_1 create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_schwinger_2 create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_schwinger_3 create mode 100644 Examples/Tests/qed/inputs_test_3d_qed_schwinger_4 create mode 100644 Examples/Tests/radiation_reaction/CMakeLists.txt rename Examples/Tests/radiation_reaction/{test_const_B_analytical/analysis_classicalRR.py => analysis.py} (100%) rename Examples/Tests/radiation_reaction/{test_const_B_analytical/inputs_3d => inputs_test_3d_radiation_reaction} (100%) create mode 100644 Examples/Tests/reduced_diags/CMakeLists.txt rename Examples/Tests/reduced_diags/{analysis_reduced_diags_loadbalancecosts.py => analysis_reduced_diags_load_balance_costs.py} (92%) rename Examples/Tests/reduced_diags/{inputs_loadbalancecosts => inputs_base_3d} (100%) rename Examples/Tests/reduced_diags/{inputs => inputs_test_3d_reduced_diags} (100%) create mode 100644 Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_heuristic create mode 100644 Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers rename Examples/Tests/reduced_diags/{PICMI_inputs_loadbalancecosts.py => inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py} (93%) create mode 100644 Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd create mode 100644 Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt rename Examples/Tests/relativistic_space_charge_initialization/{inputs_3d => inputs_test_3d_relativistic_space_charge_initialization} (100%) create mode 100644 Examples/Tests/repelling_particles/CMakeLists.txt rename Examples/Tests/repelling_particles/{analysis_repelling.py => analysis.py} (100%) rename Examples/Tests/repelling_particles/{inputs_2d => inputs_test_2d_repelling_particles} (100%) create mode 100644 Examples/Tests/resampling/CMakeLists.txt rename Examples/Tests/resampling/{analysis_leveling_thinning.py => analysis.py} (100%) create mode 120000 Examples/Tests/resampling/analysis_default_regression.py rename Examples/Tests/resampling/{inputs_1d_velocity_coincidence_thinning => inputs_test_1d_resample_velocity_coincidence_thinning} (100%) rename Examples/Tests/resampling/{inputs_1d_velocity_coincidence_thinning_cartesian => inputs_test_1d_resample_velocity_coincidence_thinning_cartesian} (100%) rename Examples/Tests/resampling/{inputs_leveling_thinning => inputs_test_2d_leveling_thinning} (100%) create mode 100644 Examples/Tests/restart/CMakeLists.txt create mode 120000 Examples/Tests/restart/analysis_default_regression.py create mode 120000 Examples/Tests/restart/analysis_default_restart.py rename Examples/Tests/restart/{inputs => inputs_base_3d} (99%) rename Examples/Tests/restart/{PICMI_inputs_id_cpu_read.py => inputs_test_2d_id_cpu_read_picmi.py} (95%) rename Examples/Tests/restart/{PICMI_inputs_runtime_component_analyze.py => inputs_test_2d_runtime_components_picmi.py} (91%) create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration_psatd create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration_psatd_restart create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg_restart create mode 100644 Examples/Tests/restart/inputs_test_3d_acceleration_restart create mode 100644 Examples/Tests/restart_eb/CMakeLists.txt create mode 120000 Examples/Tests/restart_eb/analysis_default_regression.py create mode 120000 Examples/Tests/restart_eb/analysis_default_restart.py rename Examples/Tests/restart_eb/{PICMI_inputs_restart_eb.py => inputs_test_3d_eb_picmi.py} (92%) create mode 100644 Examples/Tests/rigid_injection/CMakeLists.txt rename Examples/Tests/rigid_injection/{analysis_rigid_injection_BoostedFrame.py => analysis_rigid_injection_btd.py} (100%) rename Examples/Tests/rigid_injection/{analysis_rigid_injection_LabFrame.py => analysis_rigid_injection_lab.py} (100%) rename Examples/Tests/rigid_injection/{inputs_2d_BoostedFrame => inputs_test_2d_rigid_injection_btd} (100%) rename Examples/Tests/rigid_injection/{inputs_2d_LabFrame => inputs_test_2d_rigid_injection_lab} (100%) create mode 100644 Examples/Tests/scraping/CMakeLists.txt rename Examples/Tests/scraping/{inputs_rz => inputs_test_rz_scraping} (97%) rename Examples/Tests/scraping/{inputs_rz_filter => inputs_test_rz_scraping_filter} (97%) create mode 100644 Examples/Tests/silver_mueller/CMakeLists.txt rename Examples/Tests/silver_mueller/{analysis_silver_mueller.py => analysis.py} (100%) rename Examples/Tests/silver_mueller/{inputs_1d => inputs_test_1d_silver_mueller} (100%) rename Examples/Tests/silver_mueller/{inputs_2d_x => inputs_test_2d_silver_mueller_x} (100%) rename Examples/Tests/silver_mueller/{inputs_2d_z => inputs_test_2d_silver_mueller_z} (100%) rename Examples/Tests/silver_mueller/{inputs_rz_z => inputs_test_rz_silver_mueller_z} (100%) create mode 100644 Examples/Tests/single_particle/CMakeLists.txt rename Examples/Tests/single_particle/{analysis_bilinear_filter.py => analysis.py} (100%) rename Examples/Tests/single_particle/{inputs_2d => inputs_test_2d_bilinear_filter} (93%) create mode 100644 Examples/Tests/space_charge_initialization/CMakeLists.txt create mode 100644 Examples/Tests/space_charge_initialization/inputs_test_2d_space_charge_initialization rename Examples/Tests/space_charge_initialization/{inputs_3d => inputs_test_3d_space_charge_initialization} (97%) create mode 100644 Examples/Tests/subcycling/CMakeLists.txt create mode 120000 Examples/Tests/subcycling/analysis_default_regression.py rename Examples/Tests/subcycling/{inputs_2d => inputs_test_2d_subcycling_mr} (100%) create mode 100644 Examples/Tests/vay_deposition/CMakeLists.txt rename Examples/Tests/vay_deposition/{inputs_2d => inputs_test_2d_vay_deposition} (100%) rename Examples/Tests/vay_deposition/{inputs_3d => inputs_test_3d_vay_deposition} (100%) delete mode 100644 Regression/Checksum/benchmarks_json/LaserIonAcc3d.json delete mode 100644 Regression/Checksum/benchmarks_json/TwoParticle_electrostatic.json rename Regression/Checksum/benchmarks_json/{collisionZ.json => test_1d_collision_z.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_dsmc_1d.json => test_1d_dsmc_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_fluid_1D.json => test_1d_langmuir_fluid.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_1d.json => test_1d_langmuir_multi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration_1d.json => test_1d_laser_acceleration.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration_1d_fluid.json => test_1d_laser_acceleration_fluid.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration_1d_fluid_boosted.json => test_1d_laser_acceleration_fluid_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LaserAcceleration_1d.json => test_1d_laser_acceleration_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjection_1d.json => test_1d_laser_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromLASYFile_1d.json => test_1d_laser_injection_from_lasy_file.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromLASYFile_1d_boost.json => test_1d_laser_injection_from_lasy_file_boost.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ohms_law_solver_EM_modes_1d.json => test_1d_ohm_solver_em_modes_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ohms_law_solver_ion_beam_1d.json => test_1d_ohm_solver_ion_beam_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_PlasmaAcceleration1d.json => test_1d_plasma_acceleration_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{resample_velocity_coincidence_thinning.json => test_1d_resample_velocity_coincidence_thinning.json} (100%) rename Regression/Checksum/benchmarks_json/{resample_velocity_coincidence_thinning_cartesian.json => test_1d_resample_velocity_coincidence_thinning_cartesian.json} (100%) rename Regression/Checksum/benchmarks_json/{SemiImplicitPicard_1d.json => test_1d_semi_implicit_picard.json} (100%) rename Regression/Checksum/benchmarks_json/{silver_mueller_1d.json => test_1d_silver_mueller.json} (100%) rename Regression/Checksum/benchmarks_json/{ThetaImplicitPicard_1d.json => test_1d_theta_implicit_picard.json} (100%) rename Regression/Checksum/benchmarks_json/{averaged_galilean_2d_psatd.json => test_2d_averaged_galilean_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{averaged_galilean_2d_psatd_hybrid.json => test_2d_averaged_galilean_psatd_hybrid.json} (100%) rename Regression/Checksum/benchmarks_json/{background_mcc.json => test_2d_background_mcc.json} (100%) rename Regression/Checksum/benchmarks_json/{background_mcc_dp_psp.json => test_2d_background_mcc_dp_psp.json} (100%) rename Regression/Checksum/benchmarks_json/{bilinear_filter.json => test_2d_bilinear_filter.json} (100%) rename Regression/Checksum/benchmarks_json/{collisionXZ.json => test_2d_collision_xz.json} (100%) rename Regression/Checksum/benchmarks_json/{comoving_2d_psatd_hybrid.json => test_2d_comoving_psatd_hybrid.json} (100%) rename Regression/Checksum/benchmarks_json/{dive_cleaning_2d.json => test_2d_dive_cleaning.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_boundary_cube_2d.json => test_2d_embedded_boundary_cube.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_boundary_rotated_cube_2d.json => test_2d_embedded_boundary_rotated_cube.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_circle.json => test_2d_embedded_circle.json} (100%) rename Regression/Checksum/benchmarks_json/{EnergyConservingThermalPlasma.json => test_2d_energy_conserving_thermal_plasma.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_2d_psatd.json => test_2d_galilean_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_2d_psatd_current_correction.json => test_2d_galilean_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_2d_psatd_current_correction_psb.json => test_2d_galilean_psatd_current_correction_psb.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_2d_psatd_hybrid.json => test_2d_galilean_psatd_hybrid.json} (100%) create mode 100644 Regression/Checksum/benchmarks_json/test_2d_id_cpu_read_picmi.json rename Regression/Checksum/benchmarks_json/{ionization_boost.json => test_2d_ionization_boost.json} (100%) rename Regression/Checksum/benchmarks_json/{ionization_lab.json => test_2d_ionization_lab.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ionization.json => test_2d_ionization_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_fluid_2D.json => test_2d_langmuir_fluid.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_MR.json => test_2d_langmuir_multi_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_MR_anisotropic.json => test_2d_langmuir_multi_mr_anisotropic.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_MR_momentum_conserving.json => test_2d_langmuir_multi_mr_momentum_conserving.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_MR_psatd.json => test_2d_langmuir_multi_mr_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_nodal.json => test_2d_langmuir_multi_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_Langmuir_2d.json => test_2d_langmuir_multi_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd.json => test_2d_langmuir_multi_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_current_correction.json => test_2d_langmuir_multi_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_current_correction_nodal.json => test_2d_langmuir_multi_psatd_current_correction_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_momentum_conserving.json => test_2d_langmuir_multi_psatd_momentum_conserving.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_multiJ.json => test_2d_langmuir_multi_psatd_multiJ.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_multiJ_nodal.json => test_2d_langmuir_multi_psatd_multiJ_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_nodal.json => test_2d_langmuir_multi_psatd_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_Vay_deposition.json => test_2d_langmuir_multi_psatd_vay_deposition.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_Vay_deposition_nodal.json => test_2d_langmuir_multi_psatd_vay_deposition_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_2d_psatd_Vay_deposition_particle_shape_4.json => test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4.json} (100%) rename Regression/Checksum/benchmarks_json/{Larmor.json => test_2d_larmor.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAccelerationBoost.json => test_2d_laser_acceleration_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAccelerationMR.json => test_2d_laser_acceleration_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LaserAccelerationMR.json => test_2d_laser_acceleration_mr_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjection_2d.json => test_2d_laser_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromBINARYFile.json => test_2d_laser_injection_from_binary_file.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromLASYFile_2d.json => test_2d_laser_injection_from_lasy_file.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserIonAcc2d.json => test_2d_laser_ion_acc.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserIonAcc2d_no_field_diag.json => test_2d_laser_ion_acc_no_field_diag.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LaserIonAcc2d.json => test_2d_laser_ion_acc_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserOnFine.json => test_2d_laser_on_fine.json} (100%) rename Regression/Checksum/benchmarks_json/{leveling_thinning.json => test_2d_leveling_thinning.json} (100%) rename Regression/Checksum/benchmarks_json/{Maxwell_Hybrid_QED_solver.json => test_2d_maxwell_hybrid_qed_solver.json} (100%) rename Regression/Checksum/benchmarks_json/{nci_corrector.json => test_2d_nci_corrector.json} (100%) rename Regression/Checksum/benchmarks_json/{nci_correctorMR.json => test_2d_nci_corrector_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ohms_law_solver_landau_damping_2d.json => test_2d_ohm_solver_landau_damping_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ohms_law_solver_magnetic_reconnection_2d.json => test_2d_ohm_solver_magnetic_reconnection_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{parabolic_channel_initialization_2d_single_precision.json => test_2d_parabolic_channel_initialization.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_thermal_boundary.json => test_2d_particle_thermal_boundary.json} (100%) rename Regression/Checksum/benchmarks_json/{particles_in_pml_2d.json => test_2d_particles_in_pml.json} (100%) rename Regression/Checksum/benchmarks_json/{particles_in_pml_2d_MR.json => test_2d_particles_in_pml_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{PlasmaAccelerationBoost2d.json => test_2d_plasma_acceleration_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{PlasmaAccelerationMR.json => test_2d_plasma_acceleration_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{momentum-conserving-gather.json => test_2d_plasma_acceleration_mr_momentum_conserving.json} (100%) rename Regression/Checksum/benchmarks_json/{PlasmaMirror.json => test_2d_plasma_mirror.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_x_ckc.json => test_2d_pml_x_ckc.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_x_galilean.json => test_2d_pml_x_galilean.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_x_psatd.json => test_2d_pml_x_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_x_yee.json => test_2d_pml_x_yee.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_x_yee_eb.json => test_2d_pml_x_yee_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_prev_positions.json => test_2d_prev_positions_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Proton_Boron_Fusion_2D.json => test_2d_proton_boron_fusion.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_wrappers.json => test_2d_python_wrappers_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_breit_wheeler_2d.json => test_2d_qed_breit_wheeler.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_quantum_sync_2d.json => test_2d_qed_quantum_sync.json} (100%) rename Regression/Checksum/benchmarks_json/{RefinedInjection.json => test_2d_refined_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{RepellingParticles.json => test_2d_repelling_particles.json} (100%) rename Regression/Checksum/benchmarks_json/{RigidInjection_BTD.json => test_2d_rigid_injection_btd.json} (100%) rename Regression/Checksum/benchmarks_json/{RigidInjection_lab.json => test_2d_rigid_injection_lab.json} (100%) rename Regression/Checksum/benchmarks_json/{silver_mueller_2d_x.json => test_2d_silver_mueller_x.json} (100%) rename Regression/Checksum/benchmarks_json/{silver_mueller_2d_z.json => test_2d_silver_mueller_z.json} (100%) rename Regression/Checksum/benchmarks_json/{space_charge_initialization_2d.json => test_2d_space_charge_initialization.json} (100%) rename Regression/Checksum/benchmarks_json/{subcyclingMR.json => test_2d_subcycling_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{ThetaImplicitJFNK_VandB_2d.json => test_2d_theta_implicit_jfnk_vandb.json} (100%) rename Regression/Checksum/benchmarks_json/{ThetaImplicitJFNK_VandB_2d_PICMI.json => test_2d_theta_implicit_jfnk_vandb_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Uniform_2d.json => test_2d_uniform_plasma.json} (100%) rename Regression/Checksum/benchmarks_json/{VayDeposition2D.json => test_2d_vay_deposition.json} (100%) rename Regression/Checksum/benchmarks_json/{restart.json => test_3d_acceleration.json} (100%) rename Regression/Checksum/benchmarks_json/{restart_psatd.json => test_3d_acceleration_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{restart_psatd_time_avg.json => test_3d_acceleration_psatd_time_avg.json} (100%) rename Regression/Checksum/benchmarks_json/{averaged_galilean_3d_psatd.json => test_3d_averaged_galilean_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{averaged_galilean_3d_psatd_hybrid.json => test_3d_averaged_galilean_psatd_hybrid.json} (100%) rename Regression/Checksum/benchmarks_json/{BeamBeamCollision.json => test_3d_beam_beam_collision.json} (100%) rename Regression/Checksum/benchmarks_json/{collider_diagnostics.json => test_3d_collider_diagnostics.json} (100%) rename Regression/Checksum/benchmarks_json/{collisionISO.json => test_3d_collision_iso.json} (100%) rename Regression/Checksum/benchmarks_json/{collisionXYZ.json => test_3d_collision_xyz.json} (100%) rename Regression/Checksum/benchmarks_json/{Deuterium_Deuterium_Fusion_3D.json => test_3d_deuterium_deuterium_fusion.json} (100%) rename Regression/Checksum/benchmarks_json/{Deuterium_Deuterium_Fusion_3D_intraspecies.json => test_3d_deuterium_deuterium_fusion_intraspecies.json} (100%) rename Regression/Checksum/benchmarks_json/{Deuterium_Tritium_Fusion_3D.json => test_3d_deuterium_tritium_fusion.json} (100%) rename Regression/Checksum/benchmarks_json/{diff_lumi_diag.json => test_3d_diff_lumi_diag.json} (100%) rename Regression/Checksum/benchmarks_json/{divb_cleaning_3d.json => test_3d_divb_cleaning.json} (100%) rename Regression/Checksum/benchmarks_json/{dive_cleaning_3d.json => test_3d_dive_cleaning.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_restart_eb.json => test_3d_eb_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphere.json => test_3d_electrostatic_sphere.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereEB.json => test_3d_electrostatic_sphere_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereEB_mixedBCs.json => test_3d_electrostatic_sphere_eb_mixed_bc.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ElectrostaticSphereEB.json => test_3d_electrostatic_sphere_eb_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereLabFrame.json => test_3d_electrostatic_sphere_lab_frame.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereLabFrame_MR_emass_10.json => test_3d_electrostatic_sphere_lab_frame_mr_emass_10.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereRelNodal.json => test_3d_electrostatic_sphere_rel_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_boundary_cube.json => test_3d_embedded_boundary_cube.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_boundary_cube_macroscopic.json => test_3d_embedded_boundary_cube_macroscopic.json} (100%) rename Regression/Checksum/benchmarks_json/{embedded_boundary_rotated_cube.json => test_3d_embedded_boundary_rotated_cube.json} (100%) rename Regression/Checksum/benchmarks_json/{FluxInjection3D.json => test_3d_flux_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{focusing_gaussian_beam.json => test_3d_focusing_gaussian_beam.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_3d_psatd.json => test_3d_galilean_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_3d_psatd_current_correction.json => test_3d_galilean_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_3d_psatd_current_correction_psb.json => test_3d_galilean_psatd_current_correction_psb.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_gaussian_beam.json => test_3d_gaussian_beam_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{hard_edged_quadrupoles.json => test_3d_hard_edged_quadrupoles.json} (100%) rename Regression/Checksum/benchmarks_json/{hard_edged_quadrupoles_boosted.json => test_3d_hard_edged_quadrupoles_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{hard_edged_quadrupoles_moving.json => test_3d_hard_edged_quadrupoles_moving.json} (100%) rename Regression/Checksum/benchmarks_json/{initial_distribution.json => test_3d_initial_distribution.json} (100%) rename Regression/Checksum/benchmarks_json/{ion_stopping.json => test_3d_ion_stopping.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_fluid_multi.json => test_3d_langmuir_fluid.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi.json => test_3d_langmuir_multi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_nodal.json => test_3d_langmuir_multi_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_Langmuir.json => test_3d_langmuir_multi_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd.json => test_3d_langmuir_multi_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_current_correction.json => test_3d_langmuir_multi_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_current_correction_nodal.json => test_3d_langmuir_multi_psatd_current_correction_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_div_cleaning.json => test_3d_langmuir_multi_psatd_div_cleaning.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_momentum_conserving.json => test_3d_langmuir_multi_psatd_momentum_conserving.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_multiJ.json => test_3d_langmuir_multi_psatd_multiJ.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_multiJ_nodal.json => test_3d_langmuir_multi_psatd_multiJ_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_nodal.json => test_3d_langmuir_multi_psatd_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_single_precision.json => test_3d_langmuir_multi_psatd_single_precision.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_Vay_deposition.json => test_3d_langmuir_multi_psatd_vay_deposition.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_psatd_Vay_deposition_nodal.json => test_3d_langmuir_multi_psatd_vay_deposition_nodal.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_single_precision.json => test_3d_langmuir_multi_single_precision.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration.json => test_3d_laser_acceleration.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration_BTD.json => test_3d_laser_acceleration_btd.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LaserAcceleration.json => test_3d_laser_acceleration_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAcceleration_single_precision_comms.json => test_3d_laser_acceleration_single_precision_comms.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjection.json => test_3d_laser_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromLASYFile.json => test_3d_laser_injection_from_lasy_file.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LoadExternalGridField3D.json => test_3d_load_external_field_grid_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LoadExternalParticleField3D.json => test_3d_load_external_field_particle_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{magnetostatic_eb_3d.json => test_3d_magnetostatic_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_magnetostatic_eb_3d.json => test_3d_magnetostatic_eb_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{NodalElectrostaticSolver.json => test_3d_nodal_electrostatic_solver.json} (100%) rename Regression/Checksum/benchmarks_json/{openbc_poisson_solver.json => test_3d_open_bc_poisson_solver.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_boundaries_3d.json => test_3d_particle_boundaries.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_fields_diags.json => test_3d_particle_fields_diags.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_fields_diags_single_precision.json => test_3d_particle_fields_diags_single_precision.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_pusher.json => test_3d_particle_pusher.json} (100%) rename Regression/Checksum/benchmarks_json/{particles_in_pml.json => test_3d_particles_in_pml.json} (100%) rename Regression/Checksum/benchmarks_json/{particles_in_pml_3d_MR.json => test_3d_particles_in_pml_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{PEC_field.json => test_3d_pec_field.json} (100%) rename Regression/Checksum/benchmarks_json/{PEC_field_mr.json => test_3d_pec_field_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{PEC_particle.json => test_3d_pec_particle.json} (100%) rename Regression/Checksum/benchmarks_json/{photon_pusher.json => test_3d_photon_pusher.json} (100%) rename Regression/Checksum/benchmarks_json/{PlasmaAccelerationBoost3d.json => test_3d_plasma_acceleration_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{PlasmaAccelerationBoost3d_hybrid.json => test_3d_plasma_acceleration_boosted_hybrid.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_PlasmaAccelerationMR.json => test_3d_plasma_acceleration_mr_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_PlasmaAcceleration.json => test_3d_plasma_acceleration_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Plasma_lens.json => test_3d_plasma_lens.json} (100%) rename Regression/Checksum/benchmarks_json/{Plasma_lens_boosted.json => test_3d_plasma_lens_boosted.json} (100%) rename Regression/Checksum/benchmarks_json/{hard_edged_plasma_lens.json => test_3d_plasma_lens_hard_edged.json} (100%) rename Regression/Checksum/benchmarks_json/{Plasma_lens_short.json => test_3d_plasma_lens_short.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_psatd_dive_divb_cleaning.json => test_3d_pml_psatd_dive_divb_cleaning.json} (100%) rename Regression/Checksum/benchmarks_json/{Point_of_contact_EB_3d.json => test_3d_point_of_contact_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_projection_divb_cleaner_callback_3d.json => test_3d_projection_divb_cleaner_callback_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_projection_divb_cleaner_3d.json => test_3d_projection_divb_cleaner_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Proton_Boron_Fusion_3D.json => test_3d_proton_boron_fusion.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_breit_wheeler_3d.json => test_3d_qed_breit_wheeler.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_quantum_sync_3d.json => test_3d_qed_quantum_sync.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_schwinger1.json => test_3d_qed_schwinger_1.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_schwinger2.json => test_3d_qed_schwinger_2.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_schwinger3.json => test_3d_qed_schwinger_3.json} (100%) rename Regression/Checksum/benchmarks_json/{qed_schwinger4.json => test_3d_qed_schwinger_4.json} (100%) rename Regression/Checksum/benchmarks_json/{radiation_reaction.json => test_3d_radiation_reaction.json} (100%) rename Regression/Checksum/benchmarks_json/{reduced_diags.json => test_3d_reduced_diags.json} (100%) rename Regression/Checksum/benchmarks_json/{reduced_diags_loadbalancecosts_heuristic.json => test_3d_reduced_diags_load_balance_costs_heuristic.json} (100%) rename Regression/Checksum/benchmarks_json/{reduced_diags_loadbalancecosts_timers.json => test_3d_reduced_diags_load_balance_costs_timers.json} (100%) rename Regression/Checksum/benchmarks_json/{reduced_diags_loadbalancecosts_timers_psatd.json => test_3d_reduced_diags_load_balance_costs_timers_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{reduced_diags_single_precision.json => test_3d_reduced_diags_single_precision.json} (100%) rename Regression/Checksum/benchmarks_json/{relativistic_space_charge_initialization.json => test_3d_relativistic_space_charge_initialization.json} (100%) rename Regression/Checksum/benchmarks_json/{space_charge_initialization.json => test_3d_space_charge_initialization.json} (100%) rename Regression/Checksum/benchmarks_json/{uniform_plasma_restart.json => test_3d_uniform_plasma.json} (100%) rename Regression/Checksum/benchmarks_json/{uniform_plasma_multiJ.json => test_3d_uniform_plasma_multiJ.json} (100%) rename Regression/Checksum/benchmarks_json/{VayDeposition3D.json => test_3d_vay_deposition.json} (100%) rename Regression/Checksum/benchmarks_json/{BTD_rz.json => test_rz_btd.json} (100%) rename Regression/Checksum/benchmarks_json/{collisionRZ.json => test_rz_collision.json} (100%) rename Regression/Checksum/benchmarks_json/{Deuterium_Tritium_Fusion_RZ.json => test_rz_deuterium_tritium_fusion.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereRZ.json => test_rz_electrostatic_sphere.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereEB_RZ.json => test_rz_electrostatic_sphere_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{ElectrostaticSphereEB_RZ_MR.json => test_rz_electrostatic_sphere_eb_mr.json} (100%) rename Regression/Checksum/benchmarks_json/{EmbeddedBoundaryDiffraction.json => test_rz_embedded_boundary_diffraction.json} (100%) rename Regression/Checksum/benchmarks_json/{FluxInjection.json => test_rz_flux_injection.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_rz_psatd.json => test_rz_galilean_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_rz_psatd_current_correction.json => test_rz_galilean_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{galilean_rz_psatd_current_correction_psb.json => test_rz_galilean_psatd_current_correction_psb.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_fluid_RZ.json => test_rz_langmuir_fluid.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_rz.json => test_rz_langmuir_multi.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_Langmuir_rz_multimode.json => test_rz_langmuir_multi_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_rz_psatd.json => test_rz_langmuir_multi_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_rz_psatd_current_correction.json => test_rz_langmuir_multi_psatd_current_correction.json} (100%) rename Regression/Checksum/benchmarks_json/{Langmuir_multi_rz_psatd_multiJ.json => test_rz_langmuir_multi_psatd_multiJ.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserAccelerationRZ.json => test_rz_laser_acceleration.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_LaserAccelerationRZ.json => test_rz_laser_acceleration_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromRZLASYFile.json => test_rz_laser_injection_from_RZ_lasy_file.json} (100%) rename Regression/Checksum/benchmarks_json/{LaserInjectionFromLASYFile_RZ.json => test_rz_laser_injection_from_lasy_file.json} (100%) rename Regression/Checksum/benchmarks_json/{LoadExternalFieldRZGrid.json => test_rz_load_external_field_grid.json} (100%) rename Regression/Checksum/benchmarks_json/{LoadExternalFieldRZParticles.json => test_rz_load_external_field_particles.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_magnetostatic_eb_rz.json => test_rz_magnetostatic_eb_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{multi_J_rz_psatd.json => test_rz_multiJ_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Python_ohms_law_solver_EM_modes_rz.json => test_rz_ohm_solver_em_modes_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{particle_boundary_interaction.json => test_rz_particle_boundary_interaction_picmi.json} (100%) rename Regression/Checksum/benchmarks_json/{pml_psatd_rz.json => test_rz_pml_psatd.json} (100%) rename Regression/Checksum/benchmarks_json/{Point_of_contact_EB_rz.json => test_rz_point_of_contact_eb.json} (100%) rename Regression/Checksum/benchmarks_json/{projection_divb_cleaner_rz.json => test_rz_projection_divb_cleaner.json} (100%) rename Regression/Checksum/benchmarks_json/{scraping.json => test_rz_scraping.json} (100%) rename Regression/Checksum/benchmarks_json/{silver_mueller_rz_z.json => test_rz_silver_mueller_z.json} (100%) rename Regression/Checksum/benchmarks_json/{spacecraft_charging.json => test_rz_spacecraft_charging_picmi.json} (100%) create mode 100644 Regression/PostProcessingUtils/__init__.py diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index fa14902283c..6e9884966fe 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -9,6 +9,7 @@ pr: jobs: - job: + # FIXME remove unused variables variables: BLASPP_HOME: '/usr/local' CEI_SUDO: 'sudo' @@ -16,30 +17,33 @@ jobs: CMAKE_GENERATOR: 'Ninja' FFTW_HOME: '/usr' LAPACKPP_HOME: '/usr/local' - OMP_NUM_THREADS: 1 WARPX_CI_CCACHE: 'TRUE' - WARPX_CI_CLEAN_TESTS: 'TRUE' - WARPX_CI_NUM_MAKE_JOBS: 2 - WARPX_CI_OPENPMD: 'TRUE' - WARPX_CI_TMP: '/tmp/ci' + #WARPX_OPENPMD: 'TRUE' strategy: matrix: - cartesian1d: - WARPX_CI_REGULAR_CARTESIAN_1D: 'TRUE' - WARPX_CI_PSATD: 'FALSE' - cartesian2d: - WARPX_CI_REGULAR_CARTESIAN_2D: 'TRUE' - cartesian3d: - WARPX_CI_REGULAR_CARTESIAN_3D: 'TRUE' - single_precision: - WARPX_CI_SINGLE_PRECISION: 'TRUE' - rz_or_nompi: - WARPX_CI_RZ_OR_NOMPI: 'TRUE' - qed: - WARPX_CI_QED: 'TRUE' - embedded_boundary: - WARPX_CI_EB: 'TRUE' + # Cartesian 1D + cartesian_1d: + WARPX_CMAKE_FLAGS: -DWarpX_DIMS=1 -DWarpX_FFT=ON -DWarpX_PYTHON=ON + # Cartesian 2D + cartesian_2d: + WARPX_CMAKE_FLAGS: -DWarpX_DIMS=2 -DWarpX_FFT=ON -DWarpX_PYTHON=ON + # Cartesian 3D + cartesian_3d: + WARPX_CMAKE_FLAGS: -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_PYTHON=ON + WARPX_HEFFTE: 'TRUE' + # Cylindrical RZ + cylindrical_rz: + WARPX_CMAKE_FLAGS: -DWarpX_DIMS=RZ -DWarpX_FFT=ON -DWarpX_PYTHON=ON + WARPX_RZ_FFT: 'TRUE' + # embedded boundaries + embedded_boundaries: + WARPX_CMAKE_FLAGS: -DWarpX_DIMS='1;2;3;RZ' -DWarpX_FFT=ON -DWarpX_PYTHON=ON -DWarpX_EB=ON + WARPX_RZ_FFT: 'TRUE' + # single precision + #single_precision: + # WARPX_CMAKE_FLAGS: -DWarpX_DIMS='1;2;3;RZ' -DWarpX_FFT=ON -DWarpX_PYTHON=ON -DWarpX_PRECISION=SINGLE + # WARPX_RZ_FFT: 'TRUE' # default: 60; maximum: 360 timeoutInMinutes: 240 @@ -51,9 +55,8 @@ jobs: - task: Cache@2 continueOnError: true inputs: - key: 'Ccache | "$(System.JobName)" | .azure-pipelines.yml | cmake/dependencies/AMReX.cmake | run_test.sh' + key: 'Ccache | "$(System.JobName)" | .azure-pipelines.yml | cmake/dependencies/AMReX.cmake' restoreKeys: | - Ccache | "$(System.JobName)" | .azure-pipelines.yml | cmake/dependencies/AMReX.cmake | run_test.sh Ccache | "$(System.JobName)" | .azure-pipelines.yml | cmake/dependencies/AMReX.cmake Ccache | "$(System.JobName)" | .azure-pipelines.yml path: /home/vsts/.ccache @@ -63,9 +66,8 @@ jobs: - task: Cache@2 continueOnError: true inputs: - key: 'Python3 | "$(System.JobName)" | .azure-pipelines.yml | run_test.sh' + key: 'Python3 | "$(System.JobName)" | .azure-pipelines.yml' restoreKeys: | - Python3 | "$(System.JobName)" | .azure-pipelines.yml | run_test.sh Python3 | "$(System.JobName)" | .azure-pipelines.yml path: /home/vsts/.local/lib/python3.8 cacheHitVar: PYTHON38_CACHE_RESTORED @@ -83,6 +85,8 @@ jobs: python3 python3-pandas python3-pip python3-venv python3-setuptools libblas-dev liblapack-dev ccache --set-config=max_size=10.0G python3 -m pip install --upgrade pip + python3 -m pip install --upgrade build + python3 -m pip install --upgrade packaging python3 -m pip install --upgrade setuptools python3 -m pip install --upgrade wheel python3 -m pip install --upgrade virtualenv @@ -92,25 +96,29 @@ jobs: export PATH="$HOME/.local/bin:$PATH" sudo curl -L -o /usr/local/bin/cmake-easyinstall https://raw.githubusercontent.com/ax3l/cmake-easyinstall/main/cmake-easyinstall sudo chmod a+x /usr/local/bin/cmake-easyinstall - if [ "${WARPX_CI_OPENPMD:-FALSE}" == "TRUE" ]; then - cmake-easyinstall --prefix=/usr/local \ - git+https://github.com/openPMD/openPMD-api.git@0.14.3 \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF - python3 -m pip install --upgrade openpmd-api - fi - if [[ "${WARPX_CI_RZ_OR_NOMPI:-FALSE}" == "TRUE" ]]; then - cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/blaspp.git \ + #if [ "${WARPX_OPENPMD:-FALSE}" == "TRUE" ]; then + # cmake-easyinstall --prefix=/usr/local \ + # git+https://github.com/openPMD/openPMD-api.git@0.14.3 \ + # -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ + # -DCMAKE_VERBOSE_MAKEFILE=ON \ + # -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF + # #python3 -m pip install --upgrade openpmd-api + #fi + if [ "${WARPX_RZ_FFT:-FALSE}" == "TRUE" ]; then + # BLAS++ + cmake-easyinstall --prefix=/usr/local \ + git+https://github.com/icl-utk-edu/blaspp.git \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ -DCMAKE_CXX_STANDARD=17 \ -Duse_openmp=OFF -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON - cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/lapackpp.git \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_CXX_STANDARD=17 \ + # LAPACK++ + cmake-easyinstall --prefix=/usr/local \ + git+https://github.com/icl-utk-edu/lapackpp.git \ + -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ + -DCMAKE_CXX_STANDARD=17 \ -Duse_cmake_find_lapack=ON -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON fi - if [[ "${WARPX_CI_REGULAR_CARTESIAN_3D:-FALSE}" == "TRUE" ]]; then + if [ "${WARPX_HEFFTE:-FALSE}" == "TRUE" ]; then cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/heffte.git@v2.4.0 \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ -DCMAKE_CXX_STANDARD=17 -DHeffte_ENABLE_DOXYGEN=OFF \ @@ -121,6 +129,15 @@ jobs: -DHeffte_ENABLE_MAGMA=OFF \ -DCMAKE_VERBOSE_MAKEFILE=ON fi + # Python modules required for test analysis + python3 -m pip install --upgrade -r Regression/requirements.txt + python3 -m pip cache purge + # external repositories required for test analysis + cd .. + git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git + # TODO select only specific datasets? + git clone --depth 1 https://github.com/openPMD/openPMD-example-datasets.git + cd - rm -rf ${CEI_TMP} df -h displayName: 'Install dependencies' @@ -128,7 +145,23 @@ jobs: - bash: | set -eu -o pipefail df -h - ./run_test.sh - rm -rf ${WARPX_CI_TMP} + + # configure + export AMReX_CMAKE_FLAGS="-DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON" + cmake -S . -B build \ + ${AMReX_CMAKE_FLAGS} \ + ${WARPX_CMAKE_FLAGS} \ + -DWarpX_TEST_CLEANUP=ON \ + -DWarpX_TEST_FPETRAP=ON + + # build + cmake --build build -j 2 df -h - displayName: 'Build & test' + displayName: 'Build' + + - bash: | + set -eu -o pipefail + + # run tests (exclude pytest.AMReX when running Python tests) + ctest --test-dir build --output-on-failure -E AMReX + displayName: 'Test' diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 8efcdc9a431..119a893eb72 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -68,10 +68,10 @@ jobs: #MPI implementations often leak memory export "ASAN_OPTIONS=detect_leaks=0" - mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_rz - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_1d - mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_2d - mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_3d + mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d + mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d build_thread_sanitizer: name: Clang thread sanitizer @@ -149,14 +149,14 @@ jobs: export OMP_NUM_THREADS=2 - mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_rz warpx.serialize_initial_conditions = 0 - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_1d warpx.serialize_initial_conditions = 0 - mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_2d warpx.serialize_initial_conditions = 0 - mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_3d warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d warpx.serialize_initial_conditions = 0 git clone https://github.com/ECP-WarpX/warpx-data ../warpx-data cd Examples/Tests/embedded_circle ulimit -c unlimited - mpirun -n 2 ../../../build_EB/bin/warpx.2d inputs_2d warpx.serialize_initial_conditions = 0 + mpirun -n 2 ../../../build_EB/bin/warpx.2d inputs_test_2d_embedded_circle warpx.serialize_initial_conditions = 0 diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index d6184e64d28..be93dfb9beb 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -58,10 +58,10 @@ jobs: cmake --build build -j 4 - name: Test run: | - cp Examples/Physics_applications/laser_acceleration/inputs_3d . + cp Examples/Physics_applications/laser_acceleration/inputs_base_3d . cp Examples/Physics_applications/laser_acceleration/3d_ascent_actions.yaml ascent_actions.yaml mpiexec -n 2 ./build/bin/warpx.3d \ - inputs_3d \ + inputs_base_3d \ max_step = 40 \ diag1.intervals = 30:40:10 \ diag1.format = ascent @@ -101,10 +101,10 @@ jobs: cmake --build build -j 10 - name: 2D Test run: | - cp Examples/Tests/ionization/inputs_2d_rt . + cp Examples/Tests/ionization/inputs_test_2d_ionization_lab . cp Examples/Tests/ionization/catalyst_pipeline.py . mpiexec -n 2 ./build/bin/warpx.2d \ - inputs_2d_rt \ + inputs_test_2d_ionization_lab \ catalyst.script_paths = catalyst_pipeline.py\ catalyst.implementation = paraview\ diag1.intervals = 16\ @@ -112,15 +112,15 @@ jobs: diag1.format = catalyst - name: 3D Test run: | - cp Examples/Tests/electrostatic_sphere/inputs_3d . + cp Examples/Tests/electrostatic_sphere/inputs_base_3d . cp Examples/Tests/electrostatic_sphere/catalyst_pipeline.py . mpiexec -n 2 ./build/bin/warpx.3d \ - inputs_3d \ - catalyst.script_paths = catalyst_pipeline.py \ - catalyst.implementation = paraview \ - diagnostics.diags_names = diag1 \ - diag1.format = catalyst\ - diag1.intervals = 3 + inputs_base_3d \ + catalyst.script_paths = catalyst_pipeline.py \ + catalyst.implementation = paraview \ + diagnostics.diags_names = diag1 \ + diag1.format = catalyst\ + diag1.intervals = 3 - uses: actions/upload-artifact@v4 with: name: catalyst-test-artifacts diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 485a5229c6a..4d0b9ebe9c6 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -132,7 +132,7 @@ jobs: source /opt/intel/oneapi/setvars.sh set -e export OMP_NUM_THREADS=2 - Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py + Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py build_dpcc: name: oneAPI DPC++ SP diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 124d26fa7f7..596920a3911 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -84,4 +84,4 @@ jobs: source py-venv/bin/activate export OMP_NUM_THREADS=1 - mpirun -n 2 Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py + mpirun -n 2 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py diff --git a/.github/workflows/scripts/checkQEDTableGenerator.sh b/.github/workflows/scripts/checkQEDTableGenerator.sh index e14a7a2d6f2..a773015c6c7 100755 --- a/.github/workflows/scripts/checkQEDTableGenerator.sh +++ b/.github/workflows/scripts/checkQEDTableGenerator.sh @@ -29,7 +29,7 @@ export OMP_NUM_THREADS=2 # Generate QED lookup tables using WarpX # ./build/bin/warpx.2d \ - ./Examples/Tests/qed/quantum_synchrotron/inputs_2d \ + ./Examples/Tests/qed/inputs_test_2d_qed_quantum_sync \ qed_bw.lookup_table_mode = "generate" \ qed_bw.tab_dndt_chi_min = 0.01 \ qed_bw.tab_dndt_chi_max = 100.0 \ @@ -70,7 +70,7 @@ diff qs_table_dndt qs_table_tool_dndt # Run a WarpX simulation using the lookup tables generated by the external tool # ./build/bin/warpx.2d \ - ./Examples/Tests/qed/quantum_synchrotron/inputs_2d \ + ./Examples/Tests/qed/inputs_test_2d_qed_quantum_sync \ qed_bw.lookup_table_mode = "load" \ qed_bw.load_table_from = "bw_table_tool" \ qed_qs.lookup_table_mode = "load" \ diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index a1c29416b3e..143be1971fb 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -25,10 +25,8 @@ jobs: run: .github/workflows/source/hasTabs - name: End-of-Line whitespaces run: .github/workflows/source/hasEOLwhiteSpace - - name: Proper file names in Examples - run: .github/workflows/source/wrongFileNameInExamples - - name: Examples are tested - run: .github/workflows/source/inputsNotTested + - name: Check test input files + run: .github/workflows/source/check_inputs.py - name: Check that the test matrix for CI includes all tests run: .github/workflows/source/test_ci_matrix.sh - name: Doxygen diff --git a/.github/workflows/source/check_inputs.py b/.github/workflows/source/check_inputs.py new file mode 100755 index 00000000000..3cb2d8f735e --- /dev/null +++ b/.github/workflows/source/check_inputs.py @@ -0,0 +1,109 @@ +#! /usr/bin/env python3 + +import os +import re +import sys + +# mandatory prefixes for test names +testname_prefix = ["test_1d_", "test_2d_", "test_3d_", "test_rz_"] + +# collect all test names and test input filenames from CMakeLists.txt files +tests = [] +# walk through all files under Examples/, including subdirectories +for dirpath, dirnames, filenames in os.walk(top="./Examples"): + # loop over CMakeLists.txt files + for name in [filename for filename in filenames if filename == "CMakeLists.txt"]: + filepath = os.path.join(dirpath, name) + # open CMakeLists.txt file + with open(filepath) as f: + # loop over lines of CMakeLists.txt + for line in f: + # strip leading whitespaces + line = line.lstrip() + # find lines where 'add_warpx_test' is called + if re.match("add_warpx_test", line): + # strip leading whitespaces, remove end-of-line comments + testname = next(f).lstrip().split(" ")[0] + # skip lines related to other function arguments + # NOTE: update range call to reflect changes + # in the interface of 'add_warpx_test' + for _ in range(3): + next(f) + # strip leading whitespaces, remove end-of-line comments + testinput = next(f).lstrip().split(" ")[0] + # some Python input scripts are quoted + # to account for command-line arguments: + # strip initial quotation mark from string + if testinput.startswith('"'): + testinput = re.sub('"', "", testinput) + # extract filename from path + testinput = os.path.split(testinput)[1] + tests.append( + {"name": testname, "input": testinput, "path": filepath} + ) + +# check consistency of test names and test input filenames +print("\nCheck that test names and input names are correct...") +wrong_testname = False +wrong_testinput = False +for test in tests: + testname = test["name"].rstrip() + testinput = test["input"].rstrip() + testpath = test["path"].rstrip() + if not testname.startswith(tuple(testname_prefix)): + print(f"Wrong test name: {testname}") + print(f"(from {testpath})") + wrong_testname = True + # PICMI tests + if "picmi" in testname: + if not testname.endswith("_picmi") and not testname.endswith("_picmi_restart"): + print(f"Wrong test name: {testname}") + print(f"(from {testpath})") + wrong_testname = True + # restart tests + if "restart" in testname: + if not testname.endswith("_restart"): + print(f"Wrong test name: {testname}") + print(f"(from {testpath})") + wrong_testname = True + # test input file names + if ( + not testinput == f"inputs_{testname}" + and not testinput == f"inputs_{testname}.py" + ): + # we may be running a base input file/script or a restart PICMI test + if not testinput.startswith("inputs_base") and not testinput.endswith( + "_picmi.py" + ): + print(f"Wrong input name: {testinput}") + print(f"(from {testpath})") + wrong_testinput = True + +if wrong_testname: + print(f"NOTE: Test names must start with one of {testname_prefix}.") + print(" Test names must end with '_restart' for restart tests.") + print(" Test names must end with '_picmi' for PICMI tests.") +if wrong_testinput: + print("NOTE: Test input names must start with 'inputs_' followed by the test name.") + print(" Test input names must end with '.py' for PICMI tests.") + +# check that all input files in Examples/ are tested +print("\nCheck that all test input files are tested...") +missing_input = False +# walk through all files under Examples/, including subdirectories +for dirpath, dirnames, filenames in os.walk(top="./Examples"): + # loop over files starting with "inputs_test_" + for name in [ + filename for filename in filenames if filename.startswith("inputs_test_") + ]: + if name not in [test["input"] for test in tests]: + print(f"Input not tested: {os.path.join(dirpath, name)}") + missing_input = True + +if missing_input: + print("NOTE: All test input files must be tested.\n") +else: + print() + +if wrong_testname or wrong_testinput or missing_input: + sys.exit("FAILED\n") diff --git a/.github/workflows/source/inputsNotTested b/.github/workflows/source/inputsNotTested deleted file mode 100755 index 497d322a610..00000000000 --- a/.github/workflows/source/inputsNotTested +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -# Search input files in Examples/ and verify if all input files are tested - -set -eu -o pipefail - -ok=0 - -for file in $(find Examples -type f) -do - # Name of file without path - filename=$(basename $file) - # If file is an input file - if [[ ${filename:0:6 } =~ inputs ]] || - [[ ${filename:0:12} =~ PICMI_inputs ]] - then - cr=$'$' - file_cr="$file$cr" - # Search file name in test list - string_match=$(grep -m1 "$file_cr" Regression/WarpX-tests.ini || echo "") - # If match is empty, inputs examples is not tested - if [[ -z $string_match ]] - then - echo "$file is not tested!" - ok=1 - fi - fi -done - -if [ $ok -ne 0 ] -then - echo "" - echo "All files in Examples that start with one of" - echo " - inputs" - echo " - PICMI_inputs" - echo "must have an automated test." - echo "Please add a test in Regression/WarpX-tests.ini" - echo "for all files listed above." -fi - -exit $ok diff --git a/.github/workflows/source/wrongFileNameInExamples b/.github/workflows/source/wrongFileNameInExamples deleted file mode 100755 index 23ba1c7abb7..00000000000 --- a/.github/workflows/source/wrongFileNameInExamples +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# -# Search inside Examples/ and check that file names start with -# inputs -# PICMI_inputs -# analysis -# README - -set -eu -o pipefail - -ok=0 -files=() - -for pathtofile in $(find Examples -type f) -do - file=$(basename $pathtofile) - if [[ ${file:0:6 } != inputs ]] && - [[ ${file:0:12} != PICMI_inputs ]] && - [[ ${file:0:8 } != analysis ]] && - [[ ${file:0:8 } != catalyst ]] && - [[ ${file: -4} != yaml ]] && - [[ ${file:0:4 } != plot ]] && - [[ ${file:0:6 } != README ]] - then - files+=($file) - echo "$pathtofile does not have a proper name!" - ok=1 - fi -done - -if [ $ok -ne 0 ] -then - echo "" - echo "Files in Examples/ must start with one of:" - echo " - inputs : for WarpX input files" - echo " - PICMI_inputs : for PICMI-compliant input scripts" - echo " - analysis : for scripts testing the accuracy of a test" - echo " - *.yaml : for third-party input, e.g. Ascent in situ visualization" - echo " - README : for readme files" - echo "" - echo "Please rename the file(s) to comply, or move to another folder" -fi - -exit $ok diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index bf6652e9c69..68d2b2156e9 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -39,8 +39,8 @@ jobs: -DWarpX_MPI=OFF \ -DWarpX_QED=OFF cmake --build build -j 4 - ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_3d - ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_rz + ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d + ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz ccache -s du -hs ~/.cache/ccache @@ -82,8 +82,8 @@ jobs: -DWarpX_QED_TOOLS=ON cmake --build build -j 4 - ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_1d - ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_2d + ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d ccache -s du -hs ~/.cache/ccache @@ -133,8 +133,8 @@ jobs: -DWarpX_QED_TABLE_GEN=ON cmake --build build -j 4 - ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_3d - ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_rz + ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d + ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz ccache -s du -hs ~/.cache/ccache @@ -210,7 +210,6 @@ jobs: cmake -S . -B build \ -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DWarpX_APP=OFF \ -DWarpX_FFT=ON \ -DWarpX_PYTHON=ON \ -DWarpX_QED_TABLE_GEN=ON @@ -222,4 +221,4 @@ jobs: - name: run pywarpx run: | export OMP_NUM_THREADS=1 - mpirun -n 2 Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py + mpirun -n 2 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3c0faaf3636..d6030743524 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -54,7 +54,7 @@ jobs: run: | $env:PATH += ';C:/Program Files (x86)/WarpX/bin/' - python3 Examples\Tests\gaussian_beam\PICMI_inputs_gaussian_beam.py + python3 Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py # JSON writes are currently very slow (50min) with MSVC # --diagformat=openpmd @@ -118,5 +118,5 @@ jobs: call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\vc\Auxiliary\build\vcvarsall.bat" x64 set "PATH=C:/Program Files (x86)/WarpX/bin/;%PATH%" - python3 Examples\Tests\gaussian_beam\PICMI_inputs_gaussian_beam.py --diagformat=openpmd + python3 Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py --diagformat=openpmd if errorlevel 1 exit 1 diff --git a/CMakeLists.txt b/CMakeLists.txt index 36e42433572..d20de57f81c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,6 +82,14 @@ option(WarpX_QED_TABLE_GEN "QED table generation (requires PICSAR and Boost)" option(WarpX_QED_TOOLS "Build external tool to generate QED lookup tables (requires PICSAR and Boost)" OFF) +# Advanced option to automatically clean up CI test directories +option(WarpX_TEST_CLEANUP "Clean up CI test directories" OFF) +mark_as_advanced(WarpX_TEST_CLEANUP) + +# Advanced option to run CI tests with FPE-trapping runtime parameters +option(WarpX_TEST_FPETRAP "Run CI tests with FPE-trapping runtime parameters" OFF) +mark_as_advanced(WarpX_TEST_FPETRAP) + set(WarpX_DIMS_VALUES 1 2 3 RZ) set(WarpX_DIMS 3 CACHE STRING "Simulation dimensionality <1;2;3;RZ>") list(REMOVE_DUPLICATES WarpX_DIMS) @@ -188,7 +196,7 @@ if(WarpX_HEFFTE) endif() # this defined the variable BUILD_TESTING which is ON by default -#include(CTest) +include(CTest) # Dependencies ################################################################ @@ -787,12 +795,12 @@ endif() # Tests ####################################################################### # - -#if(BUILD_TESTING) -# enable_testing() -# -# add_test(...) -#endif() +if(BUILD_TESTING) + enable_testing() + if(WarpX_APP OR WarpX_PYTHON) + add_subdirectory(Examples) + endif() +endif() # Status Summary for Build Options ############################################ diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index c6a09d970df..fd57b61fa17 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -3,7 +3,7 @@ Testing the code ================ -When adding a new feature, you want to make sure that (i) you did not break the existing code and (ii) your contribution gives correct results. While existing capabilities are tested regularly remotely (when commits are pushed to an open PR on CI, and every night on local clusters), it can also be useful to run tests on your custom input file. This section details how to use both automated and custom tests. +When adding a new feature, you want to make sure that (i) you did not break the existing code and (ii) your contribution gives correct results. While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. This section details how to use both automated and custom tests. Continuous Integration in WarpX ------------------------------- @@ -11,31 +11,14 @@ Continuous Integration in WarpX Configuration ^^^^^^^^^^^^^ -Our regression tests are using the suite published and documented at `AMReX-Codes/regression_testing `__. +Our regression tests are run with `CTest `__, an executable that comes with CMake. -Most of the configuration of our regression tests happens in ``Regression/Warpx-tests.ini``. -We slightly modify this file in ``Regression/prepare_file_ci.py``. +The test suite is ready to run once you have configured and built WarpX with CMake, following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. -For example, if you like to change the compiler to compilation to build on Nvidia GPUs, modify this block to add ``-DWarpX_COMPUTE=CUDA``: +A test that requires a build option that was not configured and built will be skipped automatically. For example, if you configure and build WarpX in 1D only, any test of dimensionality other than 1D, which would require WarpX to be configured and built in the corresponding dimensionality, will be skipped automatically. -.. code-block:: ini - - [source] - dir = /home/regtester/AMReX_RegTesting/warpx - branch = development - cmakeSetupOpts = -DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON -DWarpX_COMPUTE=CUDA - -We also support changing compilation options via the usual :ref:`build environment variables `. -For instance, compiling with ``clang++ -Werror`` would be: - -.. code-block:: sh - - export CXX=$(which clang++) - export CXXFLAGS="-Werror" - - -Run Pre-Commit Tests Locally ----------------------------- +How to run pre-commit tests locally +----------------------------------- When proposing code changes to Warpx, we perform a couple of automated stylistic and correctness checks on the code change. You can run those locally before you push to save some time, install them once like this: @@ -47,97 +30,151 @@ You can run those locally before you push to save some time, install them once l See `pre-commit.com `__ and our ``.pre-commit-config.yaml`` file in the repository for more details. +How to run automated tests locally +---------------------------------- + +Once your new feature is ready, there are ways to check that you did not break anything. +WarpX has automated tests running every time a commit is pushed to an open pull request. +The input files and scripts used by the automated tests can be found in the `Examples `__ directory, either under `Physics_applications `__ or `Tests `__. + +For easier debugging, it can be convenient to run the tests on your local machine by executing CTest as illustrated in the examples below (where we assume that WarpX was configured and built in the directory ``build``): + +* List tests available for the current build options: + + .. code-block:: sh + + ctest --test-dir build -N + +* Run tests available for the current build options: + + .. code-block:: sh + + ctest --test-dir build + +* Run tests available for the current build options in parallel (while preserving existing dependencies between tests): + + .. code-block:: sh + + ctest --test-dir build -j 2 -Run the test suite locally +* Run tests available for the current build options and output anything outputted by the test program if the test should fail: + + .. code-block:: sh + + ctest --test-dir build --output-on-failure + +* Run tests available for the current build options with verbose output: + + .. code-block:: sh + + ctest --test-dir build --verbose + +* Run tests matching the regular expression ``laser_acceleration``: + + .. code-block:: sh + + ctest --test-dir build -R laser_acceleration + +* Run tests except those matching the regular expression ``laser_acceleration``: + + .. code-block:: sh + + ctest --test-dir build -E laser_acceleration + +Once the execution of CTest is completed, you can find all files associated with each test in its corresponding directory under ``build/bin/``. +For example, if you run the single test ``test_3d_laser_acceleration``, you can find all files associated with this test in the directory ``build/bin/test_3d_laser_acceleration/``. + +If you modify the code base locally and want to assess the effects of your code changes on the automated tests, you need to first rebuild WarpX including your code changes and then rerun CTest. + +How to add automated tests -------------------------- -Once your new feature is ready, there are ways to check that you did not break anything. -WarpX has automated tests running every time a commit is added to an open pull request. -The list of automated tests is defined in `./Regression/WarpX-tests.ini `__. +As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, either under `Physics_applications `__ or `Tests `__. -For easier debugging, it can be convenient to run the tests on your local machine by executing the script -`./run_test.sh `__ from WarpX's root folder, as illustrated in the examples below: +Each test directory must contain a file named ``CMakeLists.txt`` where all tests associated with the input files and scripts in that directory must be listed. -.. code-block:: sh +A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as illustrated in the examples below: - # Example: - # run all tests defined in ./Regression/WarpX-tests.ini - ./run_test.sh +* Add the **regular test** ``test_1d_laser_acceleration``: - # Example: - # run only the test named 'pml_x_yee' - ./run_test.sh pml_x_yee + .. code-block:: cmake - # Example: - # run only the tests named 'pml_x_yee', 'pml_x_ckc' and 'pml_x_psatd' - ./run_test.sh pml_x_yee pml_x_ckc pml_x_psatd + add_warpx_test( + test_1d_laser_acceleration # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_acceleration # inputs + analysis.py # analysis + diags/diag1000100 # output (plotfile) + OFF # dependency + ) -Note that the script `./run_test.sh `__ runs the tests with the exact same compile-time options and runtime options used to run the tests remotely. +* Add the **PICMI test** ``test_2d_laser_acceleration_picmi``: -Moreover, the script `./run_test.sh `__ compiles all the executables that are necessary in order to run the chosen tests. -The default number of threads allotted for compiling is set with ``numMakeJobs = 8`` in `./Regression/WarpX-tests.ini `__. -However, when running the tests on a local machine, it is usually possible and convenient to allot more threads for compiling, in order to speed up the builds. -This can be accomplished by setting the environment variable ``WARPX_CI_NUM_MAKE_JOBS``, with the preferred number of threads that fits your local machine, e.g. ``export WARPX_CI_NUM_MAKE_JOBS=16`` (or less if your machine is smaller). -On public CI, we overwrite the value to ``WARPX_CI_NUM_MAKE_JOBS=2``, in order to avoid overloading the available remote resources. -Note that this will not change the number of threads used to run each test, but only the number of threads used to compile each executable necessary to run the tests. + .. code-block:: cmake -Once the execution of `./run_test.sh `__ is completed, you can find all the relevant files associated with each test in one single directory. -For example, if you run the single test ``pml_x_yee``, as shown above, on 04/30/2021, you can find all relevant files in the directory ``./test_dir/rt-WarpX/WarpX-tests/2021-04-30/pml_x_yee/``. -The content of this directory will look like the following (possibly including backtraces if the test crashed at runtime): + add_warpx_test( + test_2d_laser_acceleration_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_acceleration_picmi.py # inputs + analysis.py # analysis + diags/diag1000100 # output (plotfile) + OFF # dependency + ) -.. code-block:: sh +* Add the **restart test** ``test_3d_laser_acceleration_restart``: + + .. code-block:: cmake + + add_warpx_test( + test_3d_laser_acceleration_restart # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_acceleration_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000100 # output (plotfile) + test_3d_laser_acceleration # dependency + ) - $ ls ./test_dir/rt-WarpX/WarpX-tests/2021-04-30/pml_x_yee/ - analysis_pml_yee.py # Python analysis script - inputs_2d # input file - main2d.gnu.TEST.TPROF.MTMPI.OMP.QED.ex # executable - pml_x_yee.analysis.out # Python analysis output - pml_x_yee.err.out # error output - pml_x_yee.make.out # build output - pml_x_yee_plt00000/ # data output (initialization) - pml_x_yee_plt00300/ # data output (last time step) - pml_x_yee.run.out # test output + Note that the restart has an explicit dependency, namely it can run only provided that the original test, from which the restart checkpoint files will be read, runs first. +* A more complex example. Add the **PICMI test** ``test_rz_laser_acceleration_picmi``, with custom command-line arguments ``--test`` and ``dir``, and openPMD time series output: -Add a test to the suite ------------------------ + .. code-block:: cmake -There are three steps to follow to add a new automated test (illustrated here for PML boundary conditions): + add_warpx_test( + test_rz_laser_acceleration_picmi # name + RZ # dims + 2 # nprocs + OFF # eb + "inputs_test_rz_laser_acceleration_picmi.py --test --dir 1" # inputs + analysis.py # analysis + diags/diag1/ # output (openPMD time series) + OFF # dependency + ) -* An input file for your test, in folder `Example/Tests/...`. For the PML test, the input file is at ``Examples/Tests/pml/inputs_2d``. You can also re-use an existing input file (even better!) and pass specific parameters at runtime (see below). -* A Python script that reads simulation output and tests correctness versus theory or calibrated results. For the PML test, see ``Examples/Tests/pml/analysis_pml_yee.py``. It typically ends with Python statement ``assert( error<0.01 )``. -* If you need a new Python package dependency for testing, add it in ``Regression/requirements.txt`` -* Add an entry to ``Regression/WarpX-tests.ini``, so that a WarpX simulation runs your test in the continuous integration process, and the Python script is executed to assess the correctness. For the PML test, the entry is +If you need a new Python package dependency for testing, please add it in `Regression/requirements.txt `__. -.. code-block:: +Sometimes, two tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. - [pml_x_yee] - buildDir = . - inputFile = Examples/Tests/pml/inputs2d - runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=yee - dim = 2 - addToCompileString = - cmakeSetupOpts = -DWarpX_DIMS=2 - restartTest = 0 - useMPI = 1 - numprocs = 2 - useOMP = 1 - numthreads = 1 - compileTest = 0 - doVis = 0 - analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py +Naming conventions for automated tests +-------------------------------------- -If you re-use an existing input file, you can add arguments to ``runtime_params``, like ``runtime_params = amr.max_level=1 amr.n_cell=32 512 max_step=100 plasma_e.zmin=-200.e-6``. +Note that we currently obey the following snake\_case naming conventions for test names and test input files (which make automation tasks easier, e.g., parsing visually, parsing through code, sorting alphabetically, filtering tests in CTest via ``-R``, etc.): -.. note:: +#. **Regular test names** start with the string ``test_1d_``, ``test_2d_``, ``test_3d_`` or ``test_rz_``, followed by a string that is descriptive of the test. For example, ``test_3d_laser_acceleration``. - If you added ``analysisRoutine = Examples/analysis_default_regression.py``, then run the new test case locally and add the :ref:`checksum ` file for the expected output. +#. **PICMI test names** start with the string ``test_1d_``, ``test_2d_``, ``test_3d_`` or ``test_rz_``, followed by a string that is descriptive of the test, and end with the string ``_picmi``. For example, ``test_3d_laser_acceleration_picmi``. -.. note:: +#. **Restart test names** end with the string ``_restart``. For example, ``test_3d_laser_acceleration_restart``. - We run those tests on our continuous integration services, which at the moment only have 2 virtual CPU cores. - Thus, make sure that the product of ``numprocs`` and ``numthreads`` for a test is ``<=2``. +#. **Test input files** start with the string ``inputs_`` followed by the test name. For example, ``inputs_test_3d_laser_acceleration`` or ``inputs_test_3d_laser_acceleration_picmi.py`` or ``inputs_test_3d_laser_acceleration_restart``. +#. **Base input files** (that is, files collecting input parameters shared between two or more tests) are typically named ``inputs_base_1d``, ``inputs_base_2d``, ``inputs_base_3d`` or ``inputs_base_rz``, possibly followed by additional strings if need be. Useful tool for plotfile comparison: ``fcompare`` ------------------------------------------------- diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt new file mode 100644 index 00000000000..f2898b557f4 --- /dev/null +++ b/Examples/CMakeLists.txt @@ -0,0 +1,237 @@ +# Configuration ############################################################### +# +if(WarpX_MPI) + # OpenMPI root guard: https://github.com/open-mpi/ompi/issues/4451 + if("$ENV{USER}" STREQUAL "root") + # calling even --help as root will abort and warn on stderr + execute_process( + COMMAND ${MPIEXEC_EXECUTABLE} --help + ERROR_VARIABLE MPIEXEC_HELP_TEXT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(${MPIEXEC_HELP_TEXT} MATCHES "^.*allow-run-as-root.*$") + set(MPI_ALLOW_ROOT --allow-run-as-root) + endif() + endif() +endif() + +# Add a WarpX test set (with sub-tests) +# +# name: unique name of this test +# dims: 1,2,RZ,3 +# nprocs: 1 or 2 (maybe refactor later on to just depend on WarpX_MPI) +# eb: needs EB support? (temporary until handled as runtime parameter) +# inputs: inputs file or PICMI script, WarpX_MPI decides w/ or w/o MPI +# analysis: analysis script, always run without MPI +# output: output file(s) to analyze +# dependency: name of base test that must run first +# +function(add_warpx_test + name + dims + nprocs + eb + inputs + analysis + output + dependency +) + # cannot run MPI tests w/o MPI build + if(nprocs GREATER_EQUAL 2 AND NOT WarpX_MPI) + message(WARNING "${name}: cannot run MPI tests without MPI build") + return() + endif() + + # cannot run EB tests w/o EB build + if(eb AND NOT WarpX_EB) + message(WARNING "${name}: cannot run EB tests without EB build") + return() + endif() + + # do not run no-EB tests w/ EB build + if(NOT eb AND WarpX_EB) + return() + endif() + + # cannot run tests with unsupported geometry + if(NOT dims IN_LIST WarpX_DIMS) + return() + endif() + + # cannot run tests with unfulfilled dependencies + if(dependency AND NOT TEST ${dependency}.run) + return() + endif() + + # set dimension suffix + warpx_set_suffix_dims(SD ${dims}) + + # make a unique run directory + file(MAKE_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${name}) + set(THIS_WORKING_DIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${name}) + + # get input file/script and optional command-line arguments + separate_arguments(INPUTS_LIST UNIX_COMMAND "${inputs}") + list(GET INPUTS_LIST 0 INPUTS_FILE) + list(LENGTH INPUTS_LIST INPUTS_LIST_LENGTH) + if(INPUTS_LIST_LENGTH GREATER 1) + list(SUBLIST INPUTS_LIST 1 -1 INPUTS_ARGS) + list(JOIN INPUTS_ARGS " " INPUTS_ARGS) + else() + set(INPUTS_ARGS "") + endif() + + # get analysis script and optional command-line arguments + separate_arguments(ANALYSIS_LIST UNIX_COMMAND "${analysis}") + list(GET ANALYSIS_LIST 0 ANALYSIS_FILE) + cmake_path(SET ANALYSIS_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${ANALYSIS_FILE}") + # TODO Enable lines below to handle command-line arguments + #list(LENGTH ANALYSIS_LIST ANALYSIS_LIST_LENGTH) + #if(ANALYSIS_LIST_LENGTH GREATER 1) + # list(SUBLIST ANALYSIS_LIST 1 -1 ANALYSIS_ARGS) + # list(JOIN ANALYSIS_ARGS " " ANALYSIS_ARGS) + #else() + # set(ANALYSIS_ARGS "") + #endif() + + # Python test? + set(python OFF) + if(${INPUTS_FILE} MATCHES ".*\.py$") + set(python ON) + cmake_path(SET INPUTS_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${INPUTS_FILE}") + endif() + + # cannot run Python tests w/o Python support + if(python AND NOT WarpX_PYTHON) + return() + endif() + + # set MPI executable + set(THIS_MPI_TEST_EXE + ${MPIEXEC_EXECUTABLE} + ${MPI_ALLOW_ROOT} + ${MPIEXEC_NUMPROC_FLAG} ${nprocs} + ${MPIEXEC_POSTFLAGS} + ${MPIEXEC_PREFLAGS} + ) + + # set Python executable + if(python) + set(THIS_Python_EXE ${Python_EXECUTABLE}) + else() + set(THIS_Python_EXE "") + endif() + + # test run + if(python) + # for argparse, do not pass command-line arguments as one quoted string + separate_arguments(INPUTS_ARGS UNIX_COMMAND "${INPUTS_ARGS}") + add_test( + NAME ${name}.run + COMMAND + ${THIS_MPI_TEST_EXE} + ${THIS_Python_EXE} + ${INPUTS_FILE} + ${INPUTS_ARGS} + WORKING_DIRECTORY ${THIS_WORKING_DIR} + ) + # FIXME Use helper function to handle Windows exceptions + set_property(TEST ${name}.run APPEND PROPERTY ENVIRONMENT "PYTHONPATH=$ENV{PYTHONPATH}:${CMAKE_PYTHON_OUTPUT_DIRECTORY}") + else() + # TODO Use these for Python tests too + set(runtime_params + "amrex.abort_on_unused_inputs = 1" + "amrex.throw_exception = 1" + "warpx.always_warn_immediately = 1" + "warpx.do_dynamic_scheduling = 0" + "warpx.serialize_initial_conditions = 1" + # FIXME should go before input file + #"warpx.abort_on_warning_threshold = low" + ) + set(runtime_params_fpetrap "") + if(WarpX_TEST_FPETRAP) + set(runtime_params_fpetrap + "amrex.fpe_trap_invalid = 1" + "amrex.fpe_trap_overflow = 1" + "amrex.fpe_trap_zero = 1" + ) + endif() + add_test( + NAME ${name}.run + COMMAND + ${THIS_MPI_TEST_EXE} + $ + ${INPUTS_FILE} + ${runtime_params} + ${runtime_params_fpetrap} + ${INPUTS_ARGS} + WORKING_DIRECTORY ${THIS_WORKING_DIR} + ) + endif() + + # AMReX ParmParse prefix: FILE = + set_property(TEST ${name}.run APPEND PROPERTY ENVIRONMENT "AMREX_INPUTS_FILE_PREFIX=${CMAKE_CURRENT_SOURCE_DIR}/") + + # run all tests with 1 OpenMP thread by default + set_property(TEST ${name}.run APPEND PROPERTY ENVIRONMENT "OMP_NUM_THREADS=1") + + if(python OR WIN32) + set(THIS_Python_SCRIPT_EXE ${Python_EXECUTABLE}) + else() + set(THIS_Python_SCRIPT_EXE "") + endif() + + # test analysis + if(analysis) + add_test( + NAME ${name}.analysis + COMMAND + ${THIS_Python_SCRIPT_EXE} ${ANALYSIS_FILE} + ${output} + WORKING_DIRECTORY ${THIS_WORKING_DIR} + ) + # test analysis depends on test run + set_property(TEST ${name}.analysis APPEND PROPERTY DEPENDS "${name}.run") + # FIXME Use helper function to handle Windows exceptions + set(PYTHONPATH "$ENV{PYTHONPATH}:${CMAKE_PYTHON_OUTPUT_DIRECTORY}") + # add paths for custom Python modules + set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Regression/Checksum") + set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Regression/PostProcessingUtils") + set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Tools/Parser") + set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Tools/PostProcessing") + set_property(TEST ${name}.analysis APPEND PROPERTY ENVIRONMENT "PYTHONPATH=${PYTHONPATH}") + endif() + + # CI: remove test directory after run + if(WarpX_TEST_CLEANUP) + add_test( + NAME ${name}.cleanup + COMMAND ${CMAKE_COMMAND} -E rm -rf ${THIS_WORKING_DIR} + ) + # test cleanup depends on test run + set_property(TEST ${name}.cleanup APPEND PROPERTY DEPENDS "${name}.run") + if(analysis) + # test cleanup depends on test analysis + set_property(TEST ${name}.cleanup APPEND PROPERTY DEPENDS "${name}.analysis") + endif() + endif() + + # Do we depend on another test? + if(dependency) + # current test depends on dependency test run (and analysis) + set_property(TEST ${name}.run APPEND PROPERTY DEPENDS "${dependency}.run") + if(analysis) + set_property(TEST ${name}.run APPEND PROPERTY DEPENDS "${dependency}.analysis") + endif() + if(WarpX_TEST_CLEANUP) + # do not clean up dependency test before current test is completed + set_property(TEST ${dependency}.cleanup APPEND PROPERTY DEPENDS "${name}.cleanup") + endif() + endif() +endfunction() + +# Add tests (alphabetical order) ############################################## +# + +add_subdirectory(Tests) +add_subdirectory(Physics_applications) diff --git a/Examples/Physics_applications/CMakeLists.txt b/Examples/Physics_applications/CMakeLists.txt new file mode 100644 index 00000000000..e4f8565a140 --- /dev/null +++ b/Examples/Physics_applications/CMakeLists.txt @@ -0,0 +1,11 @@ +# Add tests (alphabetical order) ############################################## +# + +add_subdirectory(beam_beam_collision) +add_subdirectory(capacitive_discharge) +add_subdirectory(laser_acceleration) +add_subdirectory(laser_ion) +add_subdirectory(plasma_acceleration) +add_subdirectory(plasma_mirror) +add_subdirectory(spacecraft_charging) +add_subdirectory(uniform_plasma) diff --git a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt new file mode 100644 index 00000000000..793675efaba --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_beam_beam_collision # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_beam_beam_collision # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1/ # output + OFF # dependency +) diff --git a/Examples/Physics_applications/beam-beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README similarity index 100% rename from Examples/Physics_applications/beam-beam_collision/README.rst rename to Examples/Physics_applications/beam_beam_collision/README diff --git a/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py b/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py new file mode 120000 index 00000000000..73e5ec47001 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py @@ -0,0 +1 @@ +../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/beam-beam_collision/inputs b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision similarity index 100% rename from Examples/Physics_applications/beam-beam_collision/inputs rename to Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision diff --git a/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt new file mode 100644 index 00000000000..4f67131556e --- /dev/null +++ b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt @@ -0,0 +1,58 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_background_mcc_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + "inputs_base_1d_picmi.py --test --pythonsolver" # inputs + analysis_1d.py # analysis + diags/diag1000050 # output + OFF # dependency +) + +add_warpx_test( + test_1d_dsmc_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + "inputs_base_1d_picmi.py --test --dsmc" # inputs + analysis_dsmc.py # analysis + diags/diag1000050 # output + OFF # dependency +) + +add_warpx_test( + test_2d_background_mcc # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_background_mcc # inputs + analysis_default_regression.py # analysis + diags/diag1000050 # output + OFF # dependency +) + +# FIXME: can we make this a single precision for now? +#add_warpx_test( +# test_2d_background_mcc_dp_psp # name +# 2 # dims +# 2 # nprocs +# OFF # eb +# inputs_test_2d_background_mcc_dp_psp # inputs +# analysis_default_regression.py # analysis +# diags/diag1000050 # output +# OFF # dependency +#) + +add_warpx_test( + test_2d_background_mcc_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_background_mcc_picmi.py # inputs + analysis_2d.py # analysis + diags/diag1000050 # output + OFF # dependency +) diff --git a/Examples/Physics_applications/capacitive_discharge/README.rst b/Examples/Physics_applications/capacitive_discharge/README similarity index 100% rename from Examples/Physics_applications/capacitive_discharge/README.rst rename to Examples/Physics_applications/capacitive_discharge/README diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py index 21f5c7714c4..e9782fabe23 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py @@ -14,5 +14,5 @@ import checksumAPI my_check = checksumAPI.evaluate_checksum( - "background_mcc", "Python_background_mcc_plt000050", do_particles=True, rtol=5e-3 + "test_2d_background_mcc", "diags/diag1000050", do_particles=True, rtol=5e-3 ) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_default_regression.py b/Examples/Physics_applications/capacitive_discharge/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/capacitive_discharge/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py similarity index 97% rename from Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py rename to Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py index 2477eaf68dd..3de88f3b3cb 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py @@ -359,14 +359,6 @@ def setup_run(self): # Add diagnostics for the CI test to be happy # ####################################################################### - if self.dsmc: - file_prefix = "Python_dsmc_1d_plt" - else: - if self.pythonsolver: - file_prefix = "Python_background_mcc_1d_plt" - else: - file_prefix = "Python_background_mcc_1d_tridiag_plt" - species = [self.electrons, self.ions] if self.dsmc: species.append(self.neutrals) @@ -374,16 +366,12 @@ def setup_run(self): species=species, name="diag1", period=0, - write_dir=".", - warpx_file_prefix=file_prefix, ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=self.grid, period=0, data_list=["rho_electrons", "rho_he_ions"], - write_dir=".", - warpx_file_prefix=file_prefix, ) self.sim.add_diagnostic(particle_diag) self.sim.add_diagnostic(field_diag) diff --git a/Examples/Physics_applications/capacitive_discharge/inputs_2d b/Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc similarity index 98% rename from Examples/Physics_applications/capacitive_discharge/inputs_2d rename to Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc index 2b11fd12978..e42e531c9e2 100644 --- a/Examples/Physics_applications/capacitive_discharge/inputs_2d +++ b/Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc @@ -13,6 +13,7 @@ warpx.const_dt = 1.0/(400*freq) warpx.do_electrostatic = labframe warpx.self_fields_required_precision = 1e-06 warpx.use_filter = 0 +warpx.abort_on_warning_threshold = high amr.n_cell = 128 8 amr.max_grid_size = 128 diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py b/Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc_picmi.py similarity index 98% rename from Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py rename to Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc_picmi.py index 094a9cc8881..7879239d5ce 100755 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc_picmi.py @@ -311,16 +311,12 @@ def solve(self): particle_diag = picmi.ParticleDiagnostic( name="diag1", period=diagnostic_intervals, - write_dir=".", - warpx_file_prefix="Python_background_mcc_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=diagnostic_intervals, data_list=["rho_electrons", "rho_he_ions"], - write_dir=".", - warpx_file_prefix="Python_background_mcc_plt", ) ########################## diff --git a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt new file mode 100644 index 00000000000..9f4a5f1dc58 --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt @@ -0,0 +1,156 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_laser_acceleration # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_acceleration # inputs + analysis_default_regression.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_1d_laser_acceleration_fluid # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_acceleration_fluid # inputs + analysis_1d_fluid.py # analysis + diags/diag1040000 # output + OFF # dependency +) + +add_warpx_test( + test_1d_laser_acceleration_fluid_boosted # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_acceleration_fluid_boosted # inputs + analysis_1d_fluid_boosted.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_1d_laser_acceleration_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_acceleration_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_acceleration_boosted # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_acceleration_boosted # inputs + analysis_default_regression.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_acceleration_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_acceleration_mr # inputs + analysis_default_regression.py # analysis + diags/diag1000200 # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_acceleration_mr_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_acceleration_mr_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000200 # output + OFF # dependency +) + +add_warpx_test( + test_2d_refined_injection # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_refined_injection # inputs + analysis_refined_injection.py # analysis + diags/diag1000200 # output + OFF # dependency +) + +add_warpx_test( + test_3d_laser_acceleration # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_acceleration # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_3d_laser_acceleration_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_acceleration_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_3d_laser_acceleration_single_precision_comms # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_acceleration_single_precision_comms # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_rz_laser_acceleration # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_laser_acceleration # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_rz_laser_acceleration_opmd # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_laser_acceleration_opmd # inputs + analysis_openpmd_rz.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_rz_laser_acceleration_picmi # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_laser_acceleration_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Physics_applications/laser_acceleration/README.rst b/Examples/Physics_applications/laser_acceleration/README similarity index 100% rename from Examples/Physics_applications/laser_acceleration/README.rst rename to Examples/Physics_applications/laser_acceleration/README diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py similarity index 100% rename from Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py rename to Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py similarity index 100% rename from Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py rename to Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py diff --git a/Examples/Physics_applications/laser_acceleration/analysis_default_openpmd_regression.py b/Examples/Physics_applications/laser_acceleration/analysis_default_openpmd_regression.py new file mode 120000 index 00000000000..73e5ec47001 --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/analysis_default_openpmd_regression.py @@ -0,0 +1 @@ +../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/laser_acceleration/analysis_default_regression.py b/Examples/Physics_applications/laser_acceleration/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py similarity index 94% rename from Examples/Tests/openpmd_rz/analysis_openpmd_rz.py rename to Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py index 13dcd0016a9..f136ffeb1d4 100755 --- a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py @@ -1,9 +1,12 @@ #!/usr/bin/env python3 +import sys + import numpy as np import openpmd_api as io -series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only) +filename = sys.argv[1] +series = io.Series(f"{filename}/openpmd_%T.h5", io.Access.read_only) assert len(series.iterations) == 3, "improper number of iterations stored" diff --git a/Examples/Physics_applications/laser_acceleration/inputs_2d b/Examples/Physics_applications/laser_acceleration/inputs_base_2d similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_2d rename to Examples/Physics_applications/laser_acceleration/inputs_base_2d diff --git a/Examples/Physics_applications/laser_acceleration/inputs_3d b/Examples/Physics_applications/laser_acceleration/inputs_base_3d similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_3d rename to Examples/Physics_applications/laser_acceleration/inputs_base_3d diff --git a/Examples/Physics_applications/laser_acceleration/inputs_rz b/Examples/Physics_applications/laser_acceleration/inputs_base_rz similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_rz rename to Examples/Physics_applications/laser_acceleration/inputs_base_rz diff --git a/Examples/Physics_applications/laser_acceleration/inputs_1d b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_1d rename to Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration diff --git a/Examples/Physics_applications/laser_acceleration/inputs_1d_fluids b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_1d_fluids rename to Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid diff --git a/Examples/Physics_applications/laser_acceleration/inputs_1d_fluids_boosted b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid_boosted similarity index 100% rename from Examples/Physics_applications/laser_acceleration/inputs_1d_fluids_boosted rename to Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid_boosted diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_picmi.py similarity index 95% rename from Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py rename to Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_picmi.py index 328817c7b49..b7b86b47821 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_picmi.py @@ -82,16 +82,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=100, - write_dir=".", - warpx_file_prefix="Python_LaserAcceleration_1d_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=100, data_list=diag_field_list, - write_dir=".", - warpx_file_prefix="Python_LaserAcceleration_1d_plt", ) # Set up simulation diff --git a/Examples/Physics_applications/laser_acceleration/inputs_2d_boost b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_boosted similarity index 98% rename from Examples/Physics_applications/laser_acceleration/inputs_2d_boost rename to Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_boosted index c2aa92c3634..1997054e885 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_2d_boost +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_boosted @@ -1,9 +1,9 @@ ################################# ######### BOX PARAMETERS ######## ################################# -max_step = 2700 +max_step = 300 #2700 # stop_time = 1.9e-12 -amr.n_cell = 128 1024 +amr.n_cell = 64 512 #128 1024 amr.max_grid_size = 64 amr.blocking_factor = 32 amr.max_level = 0 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr new file mode 100644 index 00000000000..5a98fa590ee --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_2d diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr_picmi.py similarity index 96% rename from Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py rename to Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr_picmi.py index 5e961fea826..8d112c0ac09 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_laser_acceleration_mr_picmi.py @@ -117,16 +117,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=200, - write_dir=".", - warpx_file_prefix="Python_LaserAccelerationMR_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=200, data_list=diag_field_list, - write_dir=".", - warpx_file_prefix="Python_LaserAccelerationMR_plt", ) # Set up simulation diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_2d_refined_injection b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_refined_injection new file mode 100644 index 00000000000..ed836e87e6b --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_2d_refined_injection @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +amr.ref_ratio_vect = 2 1 +warpx.refine_plasma = 1 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration new file mode 100644 index 00000000000..7665a846eef --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_3d diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py similarity index 96% rename from Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py rename to Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py index 4a736b7cc2b..999c92600e2 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py @@ -117,16 +117,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=100, - write_dir=".", - warpx_file_prefix="Python_LaserAcceleration_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=100, data_list=diag_field_list, - write_dir=".", - warpx_file_prefix="Python_LaserAcceleration_plt", ) # Set up simulation diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_single_precision_comms b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_single_precision_comms new file mode 100644 index 00000000000..99155ed0ecc --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_single_precision_comms @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +warpx.do_single_precision_comms = 1 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration new file mode 100644 index 00000000000..5879688b00a --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +diag1.dump_rz_modes = 1 +warpx.abort_on_warning_threshold = high diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_opmd b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_opmd new file mode 100644 index 00000000000..16a84950996 --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_opmd @@ -0,0 +1,9 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +diag1.fields_to_plot = Er Bt Bz jr jt jz rho part_per_cell part_per_grid rho_beam rho_electrons +diag1.format = openpmd +diag1.openpmd_backend = h5 +max_step = 20 +warpx.abort_on_warning_threshold = high diff --git a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_picmi.py similarity index 96% rename from Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py rename to Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_picmi.py index c19dc09dcb1..cfbf9879ed4 100755 --- a/Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_picmi.py @@ -116,8 +116,6 @@ period=10, data_list=diag_field_list, warpx_dump_rz_modes=1, - write_dir=".", - warpx_file_prefix="Python_LaserAccelerationRZ_plt", ) diag_particle_list = ["weighting", "momentum"] particle_diag = picmi.ParticleDiagnostic( @@ -125,8 +123,6 @@ period=10, species=[electrons, beam], data_list=diag_particle_list, - write_dir=".", - warpx_file_prefix="Python_LaserAccelerationRZ_plt", ) # Set up simulation diff --git a/Examples/Physics_applications/laser_ion/CMakeLists.txt b/Examples/Physics_applications/laser_ion/CMakeLists.txt new file mode 100644 index 00000000000..ba51e4d1398 --- /dev/null +++ b/Examples/Physics_applications/laser_ion/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_laser_ion_acc # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_ion_acc # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_ion_acc_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_ion_acc_picmi.py # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1/ # output + OFF # dependency +) diff --git a/Examples/Physics_applications/laser_ion/README.rst b/Examples/Physics_applications/laser_ion/README similarity index 100% rename from Examples/Physics_applications/laser_ion/README.rst rename to Examples/Physics_applications/laser_ion/README diff --git a/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py b/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py new file mode 120000 index 00000000000..73e5ec47001 --- /dev/null +++ b/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py @@ -0,0 +1 @@ +../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/laser_ion/inputs_2d b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc similarity index 100% rename from Examples/Physics_applications/laser_ion/inputs_2d rename to Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc diff --git a/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py similarity index 98% rename from Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py rename to Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py index e268d1d6c69..04f9111ec5f 100755 --- a/Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py @@ -140,9 +140,8 @@ # Diagnostics particle_diag = picmi.ParticleDiagnostic( - name="Python_LaserIonAcc2d_plt", + name="diag1", period=100, - write_dir="./diags", warpx_format="openpmd", warpx_openpmd_backend="h5", # demonstration of a spatial and momentum filter @@ -154,12 +153,11 @@ for ncell_comp, cr in zip([nx, nz], coarsening_ratio): ncell_field.append(int(ncell_comp / cr)) field_diag = picmi.FieldDiagnostic( - name="Python_LaserIonAcc2d_plt", + name="diag1", grid=grid, period=100, number_of_cells=ncell_field, data_list=["B", "E", "J", "rho", "rho_electrons", "rho_hydrogen"], - write_dir="./diags", warpx_format="openpmd", warpx_openpmd_backend="h5", ) @@ -167,7 +165,6 @@ particle_fw_diag = picmi.ParticleDiagnostic( name="openPMDfw", period=100, - write_dir="./diags", warpx_format="openpmd", warpx_openpmd_backend="h5", warpx_plot_filter_function="(uz>=0) * (x<1.0e-6) * (x>-1.0e-6)", @@ -176,7 +173,6 @@ particle_bw_diag = picmi.ParticleDiagnostic( name="openPMDbw", period=100, - write_dir="./diags", warpx_format="openpmd", warpx_openpmd_backend="h5", warpx_plot_filter_function="(uz<0)", diff --git a/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt new file mode 100644 index 00000000000..ec3e4b09563 --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt @@ -0,0 +1,90 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_plasma_acceleration_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_plasma_acceleration_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1001000 # output + OFF # dependency +) + +add_warpx_test( + test_2d_plasma_acceleration_boosted # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_plasma_acceleration_boosted # inputs + analysis_default_regression.py # analysis + diags/diag1000020 # output + OFF # dependency +) + +add_warpx_test( + test_2d_plasma_acceleration_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_plasma_acceleration_mr # inputs + analysis_default_regression.py # analysis + diags/diag1000400 # output + OFF # dependency +) + +add_warpx_test( + test_2d_plasma_acceleration_mr_momentum_conserving # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_plasma_acceleration_mr_momentum_conserving # inputs + analysis_default_regression.py # analysis + diags/diag1000400 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_acceleration_boosted # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_acceleration_boosted # inputs + analysis_default_regression.py # analysis + diags/diag1000005 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_acceleration_boosted_hybrid # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_acceleration_boosted_hybrid # inputs + analysis_default_regression.py # analysis + diags/diag1000025 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_acceleration_mr_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_acceleration_mr_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_acceleration_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_acceleration_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Physics_applications/plasma_acceleration/README.rst b/Examples/Physics_applications/plasma_acceleration/README similarity index 100% rename from Examples/Physics_applications/plasma_acceleration/README.rst rename to Examples/Physics_applications/plasma_acceleration/README diff --git a/Examples/Physics_applications/plasma_acceleration/analysis_default_regression.py b/Examples/Physics_applications/plasma_acceleration/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_2d b/Examples/Physics_applications/plasma_acceleration/inputs_base_2d similarity index 96% rename from Examples/Physics_applications/plasma_acceleration/inputs_2d rename to Examples/Physics_applications/plasma_acceleration/inputs_base_2d index 7e11ae7b3de..769e1ebce37 100644 --- a/Examples/Physics_applications/plasma_acceleration/inputs_2d +++ b/Examples/Physics_applications/plasma_acceleration/inputs_base_2d @@ -1,11 +1,12 @@ ################################# ####### GENERAL PARAMETERS ###### ################################# -stop_time = 3.7e-12 -amr.n_cell = 64 128 +#stop_time = 3.7e-12 +max_step = 400 +amr.n_cell = 32 512 #64 128 amr.max_grid_size = 128 amr.blocking_factor = 32 -amr.max_level = 0 +amr.max_level = 1 geometry.dims = 2 geometry.prob_lo = -125.e-6 -149.e-6 geometry.prob_hi = 125.e-6 1.e-6 diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost b/Examples/Physics_applications/plasma_acceleration/inputs_base_3d similarity index 99% rename from Examples/Physics_applications/plasma_acceleration/inputs_3d_boost rename to Examples/Physics_applications/plasma_acceleration/inputs_base_3d index 2264872ec43..66debc4f99f 100644 --- a/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost +++ b/Examples/Physics_applications/plasma_acceleration/inputs_base_3d @@ -2,7 +2,7 @@ ####### GENERAL PARAMETERS ###### ################################# stop_time = 3.93151387287e-11 -amr.n_cell = 32 32 256 +amr.n_cell = 64 64 128 #32 32 256 amr.max_grid_size = 64 amr.blocking_factor = 32 amr.max_level = 0 diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py b/Examples/Physics_applications/plasma_acceleration/inputs_test_1d_plasma_acceleration_picmi.py similarity index 96% rename from Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py rename to Examples/Physics_applications/plasma_acceleration/inputs_test_1d_plasma_acceleration_picmi.py index 7bb08bc2e8e..4bde8cb1343 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_1d_plasma_acceleration_picmi.py @@ -78,8 +78,6 @@ grid=grid, period=max_steps, data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], - write_dir=".", - warpx_file_prefix="Python_PlasmaAcceleration1d_plt", ) part_diag = picmi.ParticleDiagnostic( diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_boosted similarity index 98% rename from Examples/Physics_applications/plasma_acceleration/inputs_2d_boost rename to Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_boosted index 76dcd3ee286..5d65649353c 100644 --- a/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_boosted @@ -1,8 +1,8 @@ ################################# ####### GENERAL PARAMETERS ###### ################################# -max_step = 2500 -amr.n_cell = 64 640 +max_step = 20 #2500 +amr.n_cell = 64 256 #64 640 amr.max_grid_size = 128 amr.blocking_factor = 32 amr.max_level = 0 diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr new file mode 100644 index 00000000000..5a98fa590ee --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_2d diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr_momentum_conserving b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr_momentum_conserving new file mode 100644 index 00000000000..c21068325a0 --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_mr_momentum_conserving @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.field_gathering = momentum-conserving diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted new file mode 100644 index 00000000000..62abe8e9df8 --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +max_step = 5 diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted_hybrid b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted_hybrid new file mode 100644 index 00000000000..3c085b64b1b --- /dev/null +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted_hybrid @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +max_step = 25 +warpx.do_current_centering = 0 +warpx.grid_type = hybrid diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_mr_picmi.py similarity index 97% rename from Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py rename to Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_mr_picmi.py index df5e9e9808c..9eb640ade95 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_mr_picmi.py @@ -89,8 +89,6 @@ grid=grid, period=2, data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], - write_dir=".", - warpx_file_prefix="Python_PlasmaAccelerationMR_plt", ) part_diag = picmi.ParticleDiagnostic( diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_picmi.py similarity index 97% rename from Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py rename to Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_picmi.py index 596f6962618..d5b99dbed97 100755 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py +++ b/Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_picmi.py @@ -84,8 +84,6 @@ grid=grid, period=max_steps, data_list=["Ex", "Ey", "Ez", "Jx", "Jy", "Jz", "part_per_cell"], - write_dir=".", - warpx_file_prefix="Python_PlasmaAcceleration_plt", ) part_diag = picmi.ParticleDiagnostic( diff --git a/Examples/Physics_applications/plasma_mirror/CMakeLists.txt b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt new file mode 100644 index 00000000000..b90e775a4b5 --- /dev/null +++ b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_plasma_mirror # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_plasma_mirror # inputs + analysis_default_regression.py # analysis + diags/diag1000020 # output + OFF # dependency +) diff --git a/Examples/Physics_applications/plasma_mirror/README.rst b/Examples/Physics_applications/plasma_mirror/README similarity index 100% rename from Examples/Physics_applications/plasma_mirror/README.rst rename to Examples/Physics_applications/plasma_mirror/README diff --git a/Examples/Physics_applications/plasma_mirror/analysis_default_regression.py b/Examples/Physics_applications/plasma_mirror/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/plasma_mirror/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/plasma_mirror/inputs_2d b/Examples/Physics_applications/plasma_mirror/inputs_test_2d_plasma_mirror similarity index 98% rename from Examples/Physics_applications/plasma_mirror/inputs_2d rename to Examples/Physics_applications/plasma_mirror/inputs_test_2d_plasma_mirror index 714af80affe..c2c67fe928c 100644 --- a/Examples/Physics_applications/plasma_mirror/inputs_2d +++ b/Examples/Physics_applications/plasma_mirror/inputs_test_2d_plasma_mirror @@ -1,8 +1,8 @@ ################################# ####### GENERAL PARAMETERS ###### ################################# -max_step = 1000 -amr.n_cell = 1024 512 +max_step = 20 #1000 +amr.n_cell = 256 128 #1024 512 amr.max_grid_size = 128 amr.blocking_factor = 32 amr.max_level = 0 diff --git a/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt new file mode 100644 index 00000000000..181304e9193 --- /dev/null +++ b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) +add_warpx_test( + test_rz_spacecraft_charging_picmi # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_spacecraft_charging_picmi.py # inputs + analysis.py # analysis + diags/diag1/ # output + OFF # dependency +) +endif() diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 11374d9fc95..6528a3bde65 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -30,7 +30,7 @@ test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") -ts = OpenPMDTimeSeries("./spacecraft_charging_plt") +ts = OpenPMDTimeSeries(filename) dt = 1.27e-8 t = [] phi = [] diff --git a/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py similarity index 98% rename from Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py rename to Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py index b44158284fe..e3bc888f600 100644 --- a/Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py +++ b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py @@ -262,8 +262,6 @@ def compute_actual_charge_on_spacecraft(): period=diagnostic_interval, data_list=["Er", "Ez", "phi", "rho", "rho_electrons", "rho_protons"], warpx_format="openpmd", - write_dir=".", - warpx_file_prefix="spacecraft_charging_plt", ) part_diag = picmi.ParticleDiagnostic( @@ -271,8 +269,6 @@ def compute_actual_charge_on_spacecraft(): period=diagnostic_interval, species=[electrons, protons], warpx_format="openpmd", - write_dir=".", - warpx_file_prefix="spacecraft_charging_plt", ) part_scraping_boundary_diag = picmi.ParticleBoundaryScrapingDiagnostic( diff --git a/Examples/Physics_applications/uniform_plasma/CMakeLists.txt b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt new file mode 100644 index 00000000000..f654dc79063 --- /dev/null +++ b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_uniform_plasma # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_uniform_plasma # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_3d_uniform_plasma # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_uniform_plasma # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_3d_uniform_plasma_restart # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_uniform_plasma_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000010 # output + test_3d_uniform_plasma # dependency +) diff --git a/Examples/Physics_applications/uniform_plasma/README.rst b/Examples/Physics_applications/uniform_plasma/README similarity index 100% rename from Examples/Physics_applications/uniform_plasma/README.rst rename to Examples/Physics_applications/uniform_plasma/README diff --git a/Examples/Physics_applications/uniform_plasma/analysis_default_regression.py b/Examples/Physics_applications/uniform_plasma/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/uniform_plasma/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/uniform_plasma/analysis_default_restart.py b/Examples/Physics_applications/uniform_plasma/analysis_default_restart.py new file mode 120000 index 00000000000..0459986eebc --- /dev/null +++ b/Examples/Physics_applications/uniform_plasma/analysis_default_restart.py @@ -0,0 +1 @@ +../../analysis_default_restart.py \ No newline at end of file diff --git a/Examples/Physics_applications/uniform_plasma/inputs_3d b/Examples/Physics_applications/uniform_plasma/inputs_base_3d similarity index 100% rename from Examples/Physics_applications/uniform_plasma/inputs_3d rename to Examples/Physics_applications/uniform_plasma/inputs_base_3d diff --git a/Examples/Physics_applications/uniform_plasma/inputs_2d b/Examples/Physics_applications/uniform_plasma/inputs_test_2d_uniform_plasma similarity index 100% rename from Examples/Physics_applications/uniform_plasma/inputs_2d rename to Examples/Physics_applications/uniform_plasma/inputs_test_2d_uniform_plasma diff --git a/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma b/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma new file mode 100644 index 00000000000..7665a846eef --- /dev/null +++ b/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_3d diff --git a/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma_restart b/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma_restart new file mode 100644 index 00000000000..4711ece3843 --- /dev/null +++ b/Examples/Physics_applications/uniform_plasma/inputs_test_3d_uniform_plasma_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_3d_uniform_plasma + +# test input parameters +amr.restart = "../test_3d_uniform_plasma/diags/chk000006" diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt new file mode 100644 index 00000000000..108a28a6539 --- /dev/null +++ b/Examples/Tests/CMakeLists.txt @@ -0,0 +1,78 @@ +# Add tests (alphabetical order) ############################################## +# + +add_subdirectory(accelerator_lattice) +add_subdirectory(boosted_diags) +add_subdirectory(boundaries) +add_subdirectory(btd_rz) +add_subdirectory(collider_relevant_diags) +add_subdirectory(collision) +add_subdirectory(diff_lumi_diag) +add_subdirectory(divb_cleaning) +add_subdirectory(dive_cleaning) +add_subdirectory(electrostatic_dirichlet_bc) +add_subdirectory(electrostatic_sphere) +add_subdirectory(electrostatic_sphere_eb) +add_subdirectory(embedded_boundary_cube) +add_subdirectory(embedded_boundary_diffraction) +add_subdirectory(embedded_boundary_python_api) +add_subdirectory(embedded_boundary_rotated_cube) +add_subdirectory(embedded_circle) +add_subdirectory(energy_conserving_thermal_plasma) +add_subdirectory(field_probe) +add_subdirectory(flux_injection) +add_subdirectory(gaussian_beam) +add_subdirectory(implicit) +add_subdirectory(initial_distribution) +add_subdirectory(initial_plasma_profile) +add_subdirectory(ionization) +add_subdirectory(ion_stopping) +add_subdirectory(langmuir) +add_subdirectory(langmuir_fluids) +add_subdirectory(larmor) +add_subdirectory(laser_injection) +add_subdirectory(laser_injection_from_file) +add_subdirectory(laser_on_fine) +add_subdirectory(load_external_field) +add_subdirectory(magnetostatic_eb) +add_subdirectory(maxwell_hybrid_qed) +add_subdirectory(nci_fdtd_stability) +add_subdirectory(nci_psatd_stability) +add_subdirectory(nodal_electrostatic) +add_subdirectory(nuclear_fusion) +add_subdirectory(ohm_solver_em_modes) +add_subdirectory(ohm_solver_ion_beam_instability) +add_subdirectory(ohm_solver_ion_Landau_damping) +add_subdirectory(ohm_solver_magnetic_reconnection) +add_subdirectory(open_bc_poisson_solver) +add_subdirectory(particle_boundary_interaction) +add_subdirectory(particle_boundary_process) +add_subdirectory(particle_boundary_scrape) +add_subdirectory(particle_data_python) +add_subdirectory(particle_fields_diags) +add_subdirectory(particle_pusher) +add_subdirectory(particle_thermal_boundary) +add_subdirectory(particles_in_pml) +add_subdirectory(pass_mpi_communicator) +add_subdirectory(pec) +add_subdirectory(photon_pusher) +add_subdirectory(plasma_lens) +add_subdirectory(pml) +add_subdirectory(point_of_contact_eb) +add_subdirectory(projection_divb_cleaner) +add_subdirectory(python_wrappers) +add_subdirectory(qed) +add_subdirectory(radiation_reaction) +add_subdirectory(reduced_diags) +add_subdirectory(relativistic_space_charge_initialization) +add_subdirectory(repelling_particles) +add_subdirectory(resampling) +add_subdirectory(restart) +add_subdirectory(restart_eb) +add_subdirectory(rigid_injection) +add_subdirectory(scraping) +add_subdirectory(silver_mueller) +add_subdirectory(single_particle) +add_subdirectory(space_charge_initialization) +add_subdirectory(subcycling) +add_subdirectory(vay_deposition) diff --git a/Examples/Tests/accelerator_lattice/CMakeLists.txt b/Examples/Tests/accelerator_lattice/CMakeLists.txt new file mode 100644 index 00000000000..7fc6b4dc8e4 --- /dev/null +++ b/Examples/Tests/accelerator_lattice/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_hard_edged_quadrupoles # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_hard_edged_quadrupoles # inputs + analysis.py # analysis + diags/diag1000050 # output + OFF # dependency +) + +add_warpx_test( + test_3d_hard_edged_quadrupoles_boosted # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_hard_edged_quadrupoles_boosted # inputs + analysis.py # analysis + diags/diag1000050 # output + OFF # dependency +) + +add_warpx_test( + test_3d_hard_edged_quadrupoles_moving # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_hard_edged_quadrupoles_moving # inputs + analysis.py # analysis + diags/diag1000050 # output + OFF # dependency +) diff --git a/Examples/Tests/AcceleratorLattice/analysis.py b/Examples/Tests/accelerator_lattice/analysis.py similarity index 100% rename from Examples/Tests/AcceleratorLattice/analysis.py rename to Examples/Tests/accelerator_lattice/analysis.py diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_3d b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles similarity index 100% rename from Examples/Tests/AcceleratorLattice/inputs_quad_3d rename to Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted similarity index 100% rename from Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d rename to Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_moving similarity index 100% rename from Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d rename to Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_moving diff --git a/Examples/Tests/boosted_diags/CMakeLists.txt b/Examples/Tests/boosted_diags/CMakeLists.txt new file mode 100644 index 00000000000..f0a6ceaf397 --- /dev/null +++ b/Examples/Tests/boosted_diags/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_laser_acceleration_btd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_acceleration_btd # inputs + analysis.py # analysis + diags/diag1000003 # output + OFF # dependency +) diff --git a/Examples/Tests/boosted_diags/inputs_3d b/Examples/Tests/boosted_diags/inputs_test_3d_laser_acceleration_btd similarity index 100% rename from Examples/Tests/boosted_diags/inputs_3d rename to Examples/Tests/boosted_diags/inputs_test_3d_laser_acceleration_btd diff --git a/Examples/Tests/boundaries/CMakeLists.txt b/Examples/Tests/boundaries/CMakeLists.txt new file mode 100644 index 00000000000..928b4b95071 --- /dev/null +++ b/Examples/Tests/boundaries/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_particle_boundaries # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_particle_boundaries # inputs + analysis.py # analysis + diags/diag1000008 # output + OFF # dependency +) diff --git a/Examples/Tests/boundaries/inputs_3d b/Examples/Tests/boundaries/inputs_test_3d_particle_boundaries similarity index 100% rename from Examples/Tests/boundaries/inputs_3d rename to Examples/Tests/boundaries/inputs_test_3d_particle_boundaries diff --git a/Examples/Tests/btd_rz/CMakeLists.txt b/Examples/Tests/btd_rz/CMakeLists.txt new file mode 100644 index 00000000000..15a01eb1680 --- /dev/null +++ b/Examples/Tests/btd_rz/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_rz_btd # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_btd # inputs + analysis.py # analysis + diags/diag1000289 # output + OFF # dependency +) diff --git a/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py b/Examples/Tests/btd_rz/analysis.py similarity index 100% rename from Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py rename to Examples/Tests/btd_rz/analysis.py diff --git a/Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD b/Examples/Tests/btd_rz/inputs_test_rz_btd similarity index 100% rename from Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD rename to Examples/Tests/btd_rz/inputs_test_rz_btd diff --git a/Examples/Tests/collider_relevant_diags/CMakeLists.txt b/Examples/Tests/collider_relevant_diags/CMakeLists.txt new file mode 100644 index 00000000000..ad999477507 --- /dev/null +++ b/Examples/Tests/collider_relevant_diags/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_collider_diagnostics # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_collider_diagnostics # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py b/Examples/Tests/collider_relevant_diags/analysis.py similarity index 99% rename from Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py rename to Examples/Tests/collider_relevant_diags/analysis.py index ab624bdac7e..f6eb9de124f 100755 --- a/Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py +++ b/Examples/Tests/collider_relevant_diags/analysis.py @@ -61,7 +61,7 @@ def dL_dt(): return lumi -input_dict = parse_input_file("inputs_3d_multiple_particles") +input_dict = parse_input_file("warpx_used_inputs") Ex, Ey, Ez = [float(w) for w in input_dict["particles.E_external_particle"]] Bx, By, Bz = [float(w) for w in input_dict["particles.B_external_particle"]] diff --git a/Examples/Tests/collider_relevant_diags/inputs_3d_multiple_particles b/Examples/Tests/collider_relevant_diags/inputs_test_3d_collider_diagnostics similarity index 99% rename from Examples/Tests/collider_relevant_diags/inputs_3d_multiple_particles rename to Examples/Tests/collider_relevant_diags/inputs_test_3d_collider_diagnostics index 1efc68c33b0..d88e0b767d6 100644 --- a/Examples/Tests/collider_relevant_diags/inputs_3d_multiple_particles +++ b/Examples/Tests/collider_relevant_diags/inputs_test_3d_collider_diagnostics @@ -18,6 +18,7 @@ geometry.prob_lo = 0 0 0 geometry.prob_hi = 8 8 8 particles.do_tiling = 0 warpx.use_filter = 0 +warpx.abort_on_warning_threshold = high ################################# ######## BOUNDARY CONDITION ##### diff --git a/Examples/Tests/collision/CMakeLists.txt b/Examples/Tests/collision/CMakeLists.txt new file mode 100644 index 00000000000..4293ba248e7 --- /dev/null +++ b/Examples/Tests/collision/CMakeLists.txt @@ -0,0 +1,68 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_collision_z # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_collision_z # inputs + analysis_collision_1d.py # analysis + diags/diag1000600 # output + OFF # dependency +) + +add_warpx_test( + test_2d_collision_xz # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_collision_xz # inputs + analysis_collision_2d.py # analysis + diags/diag1000150 # output + OFF # dependency +) + +add_warpx_test( + test_2d_collision_xz_picmi # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_collision_xz_picmi.py # inputs + analysis_collision_2d.py # analysis + diags/diag1000150 # output + OFF # dependency +) + +add_warpx_test( + test_3d_collision_iso # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_collision_iso # inputs + analysis_collision_3d_isotropization.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_3d_collision_xyz # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_collision_xyz # inputs + analysis_collision_3d.py # analysis + diags/diag1000150 # output + OFF # dependency +) + +add_warpx_test( + test_rz_collision # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_collision # inputs + analysis_collision_rz.py # analysis + diags/diag1000150 # output + OFF # dependency +) diff --git a/Examples/Tests/collision/analysis_collision_2d.py b/Examples/Tests/collision/analysis_collision_2d.py index 92153f0870e..7e1d74001a3 100755 --- a/Examples/Tests/collision/analysis_collision_2d.py +++ b/Examples/Tests/collision/analysis_collision_2d.py @@ -35,6 +35,8 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI +test_name = os.path.split(os.getcwd())[1] + tolerance = 0.001 ng = 64 @@ -62,6 +64,11 @@ # Collect all output files in fn_list (names match pattern prefix + arbitrary number) fn_list = glob.glob(prefix + "*[0-9]") +print(last_fn) +print(last_it) +print(prefix) +print(fn_list) + error = 0.0 nt = 0 for fn in fn_list: @@ -87,7 +94,7 @@ # The second part of the analysis is not done for the Python test # since the particle filter function is not accessible from PICMI yet -if "Python" in last_fn: +if "picmi" in test_name: exit() ## In the second part of the test, we verify that the diagnostic particle filter function works as @@ -114,5 +121,4 @@ last_fn, random_filter_fn, random_fraction, dim, species_name ) -test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, last_fn) diff --git a/Examples/Tests/collision/inputs_1d b/Examples/Tests/collision/inputs_test_1d_collision_z similarity index 100% rename from Examples/Tests/collision/inputs_1d rename to Examples/Tests/collision/inputs_test_1d_collision_z diff --git a/Examples/Tests/collision/inputs_2d b/Examples/Tests/collision/inputs_test_2d_collision_xz similarity index 100% rename from Examples/Tests/collision/inputs_2d rename to Examples/Tests/collision/inputs_test_2d_collision_xz diff --git a/Examples/Tests/collision/PICMI_inputs_2d.py b/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py similarity index 94% rename from Examples/Tests/collision/PICMI_inputs_2d.py rename to Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py index 2a66bea5046..f1b3e8d3b28 100755 --- a/Examples/Tests/collision/PICMI_inputs_2d.py +++ b/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py @@ -102,16 +102,12 @@ ######### DIAGNOSTICS ########### ################################# -particle_diag = picmi.ParticleDiagnostic( - name="diag1", period=10, write_dir=".", warpx_file_prefix="Python_collisionXZ_plt" -) +particle_diag = picmi.ParticleDiagnostic(name="diag1", period=10) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10, data_list=[], - write_dir=".", - warpx_file_prefix="Python_collisionXZ_plt", ) ################################# diff --git a/Examples/Tests/collision/inputs_3d_isotropization b/Examples/Tests/collision/inputs_test_3d_collision_iso similarity index 100% rename from Examples/Tests/collision/inputs_3d_isotropization rename to Examples/Tests/collision/inputs_test_3d_collision_iso diff --git a/Examples/Tests/collision/inputs_3d b/Examples/Tests/collision/inputs_test_3d_collision_xyz similarity index 100% rename from Examples/Tests/collision/inputs_3d rename to Examples/Tests/collision/inputs_test_3d_collision_xyz diff --git a/Examples/Tests/collision/inputs_rz b/Examples/Tests/collision/inputs_test_rz_collision similarity index 100% rename from Examples/Tests/collision/inputs_rz rename to Examples/Tests/collision/inputs_test_rz_collision diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt new file mode 100644 index 00000000000..2385a758fb6 --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_diff_lumi_diag # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_diff_lumi_diag # inputs + analysis.py # analysis + diags/diag1000080 # output + OFF # dependency +) diff --git a/Examples/Tests/diff_lumi_diag/inputs b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag similarity index 100% rename from Examples/Tests/diff_lumi_diag/inputs rename to Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag diff --git a/Examples/Tests/divb_cleaning/CMakeLists.txt b/Examples/Tests/divb_cleaning/CMakeLists.txt new file mode 100644 index 00000000000..f0a8162212f --- /dev/null +++ b/Examples/Tests/divb_cleaning/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_divb_cleaning # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_divb_cleaning # inputs + analysis.py # analysis + diags/diag1000400 # output + OFF # dependency +) diff --git a/Examples/Tests/divb_cleaning/analysis.py b/Examples/Tests/divb_cleaning/analysis.py index 1692d14b632..e534e5b0d59 100755 --- a/Examples/Tests/divb_cleaning/analysis.py +++ b/Examples/Tests/divb_cleaning/analysis.py @@ -24,8 +24,8 @@ fn = sys.argv[1] # Load yt data -ds_old = yt.load("divb_cleaning_3d_plt000398") -ds_mid = yt.load("divb_cleaning_3d_plt000399") +ds_old = yt.load("diags/diag1000398") +ds_mid = yt.load("diags/diag1000399") ds_new = yt.load(fn) # this is the last plotfile ad_old = ds_old.covering_grid( diff --git a/Examples/Tests/divb_cleaning/inputs_3d b/Examples/Tests/divb_cleaning/inputs_test_3d_divb_cleaning similarity index 100% rename from Examples/Tests/divb_cleaning/inputs_3d rename to Examples/Tests/divb_cleaning/inputs_test_3d_divb_cleaning diff --git a/Examples/Tests/dive_cleaning/CMakeLists.txt b/Examples/Tests/dive_cleaning/CMakeLists.txt new file mode 100644 index 00000000000..1e72305b673 --- /dev/null +++ b/Examples/Tests/dive_cleaning/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_dive_cleaning # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_dive_cleaning # inputs + analysis.py # analysis + diags/diag1000128 # output + OFF # dependency +) + +add_warpx_test( + test_3d_dive_cleaning # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_dive_cleaning # inputs + analysis.py # analysis + diags/diag1000128 # output + OFF # dependency +) diff --git a/Examples/Tests/dive_cleaning/inputs_test_2d_dive_cleaning b/Examples/Tests/dive_cleaning/inputs_test_2d_dive_cleaning new file mode 100644 index 00000000000..48cabee4495 --- /dev/null +++ b/Examples/Tests/dive_cleaning/inputs_test_2d_dive_cleaning @@ -0,0 +1,35 @@ +max_step = 128 +amr.n_cell = 64 64 +amr.max_grid_size = 32 +amr.max_level = 0 + +geometry.prob_lo = -50.e-6 -50.e-6 +geometry.prob_hi = 50.e-6 50.e-6 +geometry.dims = 2 + +boundary.field_lo = pml pml +boundary.field_hi = pml pml + +warpx.do_dive_cleaning = 1 +warpx.use_filter = 0 + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = beam +beam.charge = -q_e +beam.mass = 1.e30 +beam.injection_style = "gaussian_beam" +beam.x_rms = 2.e-6 +beam.y_rms = 2.e-6 +beam.z_rms = 2.e-6 +beam.x_m = 0. +beam.y_m = 0. +beam.z_m = 0.e-6 +beam.npart = 20000 +beam.q_tot = -1.e-20 +beam.momentum_distribution_type = "at_rest" + +diagnostics.diags_names = diag1 +diag1.intervals = 8 +diag1.diag_type = Full diff --git a/Examples/Tests/dive_cleaning/inputs_3d b/Examples/Tests/dive_cleaning/inputs_test_3d_dive_cleaning similarity index 100% rename from Examples/Tests/dive_cleaning/inputs_3d rename to Examples/Tests/dive_cleaning/inputs_test_3d_dive_cleaning index c3f83ddbdd9..3f22a2206cf 100644 --- a/Examples/Tests/dive_cleaning/inputs_3d +++ b/Examples/Tests/dive_cleaning/inputs_test_3d_dive_cleaning @@ -3,9 +3,9 @@ amr.n_cell = 64 64 64 amr.max_grid_size = 32 amr.max_level = 0 -geometry.dims = 3 geometry.prob_lo = -50.e-6 -50.e-6 -50.e-6 geometry.prob_hi = 50.e-6 50.e-6 50.e-6 +geometry.dims = 3 boundary.field_lo = pml pml pml boundary.field_hi = pml pml pml diff --git a/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt new file mode 100644 index 00000000000..93e837d4b59 --- /dev/null +++ b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_dirichlet_bc # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_dirichlet_bc # inputs + analysis.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_2d_dirichlet_bc_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_dirichlet_bc_picmi.py # inputs + analysis.py # analysis + diags/diag1000100 # output + OFF # dependency +) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py index 91e84fd8864..82fe061c3a8 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py @@ -18,9 +18,7 @@ import numpy as np import yt -files = sorted(glob.glob("dirichletbc_plt*"))[1:] -if len(files) == 0: - files = sorted(glob.glob("Python_dirichletbc_plt*"))[1:] +files = sorted(glob.glob("diags/diag1*"))[1:] assert len(files) > 0 times = np.ones(len(files)) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/inputs_2d b/Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc similarity index 93% rename from Examples/Tests/electrostatic_dirichlet_bc/inputs_2d rename to Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc index d501dac7d0c..46b00819926 100644 --- a/Examples/Tests/electrostatic_dirichlet_bc/inputs_2d +++ b/Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc @@ -1,5 +1,6 @@ max_step = 100 warpx.verbose = 0 +warpx.abort_on_warning_threshold = medium warpx.const_dt = 7.5e-10 warpx.do_electrostatic = labframe warpx.self_fields_required_precision = 1e-06 diff --git a/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py b/Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc_picmi.py similarity index 90% rename from Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py rename to Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc_picmi.py index 5a1c531fe3a..0c30e84d781 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/inputs_test_2d_dirichlet_bc_picmi.py @@ -58,16 +58,12 @@ # diagnostics ########################## -particle_diag = picmi.ParticleDiagnostic( - name="diag1", period=4, write_dir=".", warpx_file_prefix="Python_dirichletbc_plt" -) +particle_diag = picmi.ParticleDiagnostic(name="diag1", period=4) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=4, data_list=["phi"], - write_dir=".", - warpx_file_prefix="Python_dirichletbc_plt", ) ########################## diff --git a/Examples/Tests/electrostatic_sphere/CMakeLists.txt b/Examples/Tests/electrostatic_sphere/CMakeLists.txt new file mode 100644 index 00000000000..e80beb08e97 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/CMakeLists.txt @@ -0,0 +1,57 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_electrostatic_sphere # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_electrostatic_sphere # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000030 # output + OFF # dependency +) + +add_warpx_test( + test_3d_electrostatic_sphere_lab_frame # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_electrostatic_sphere_lab_frame # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000030 # output + OFF # dependency +) + +add_warpx_test( + test_3d_electrostatic_sphere_lab_frame_mr_emass_10 # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_3d_electrostatic_sphere_rel_nodal # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_electrostatic_sphere_rel_nodal # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000030 # output + OFF # dependency +) + +add_warpx_test( + test_rz_electrostatic_sphere # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_electrostatic_sphere # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000030 # output + OFF # dependency +) diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index 4acd868a148..33842058b0b 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -34,12 +34,13 @@ yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line +test_name = os.path.split(os.getcwd())[1] filename = sys.argv[1] ds = yt.load(filename) t_max = ds.current_time.item() # time of simulation # Parse test name and check if particle_shape = 4 is used -emass_10 = True if re.search("emass_10", filename) else False +emass_10 = True if re.search("emass_10", test_name) else False if emass_10: l2_tolerance = 0.096 @@ -193,5 +194,4 @@ def return_energies(iteration): ) # Check conservation of energy # Checksum regression analysis -test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/electrostatic_sphere/inputs_3d b/Examples/Tests/electrostatic_sphere/inputs_base_3d similarity index 100% rename from Examples/Tests/electrostatic_sphere/inputs_3d rename to Examples/Tests/electrostatic_sphere/inputs_base_3d diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere new file mode 100644 index 00000000000..d89395e9d74 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame new file mode 100644 index 00000000000..da97ae8afe7 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +diag2.electron.variables = x y z ux uy uz w phi +warpx.do_electrostatic = labframe diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 new file mode 100644 index 00000000000..481cc65f030 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +amr.max_level = 1 +amr.ref_ratio_vect = 2 2 2 +diag2.electron.variables = x y z ux uy uz w +electron.mass = 10 +max_step = 2 +warpx.abort_on_warning_threshold = medium +warpx.do_electrostatic = labframe +warpx.fine_tag_hi = 0.5 0.5 0.5 +warpx.fine_tag_lo = -0.5 -0.5 -0.5 diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_rel_nodal b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_rel_nodal new file mode 100644 index 00000000000..96bff8aa9c7 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_rel_nodal @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +warpx.abort_on_warning_threshold = medium +warpx.grid_type = collocated diff --git a/Examples/Tests/electrostatic_sphere/inputs_rz b/Examples/Tests/electrostatic_sphere/inputs_test_rz_electrostatic_sphere similarity index 96% rename from Examples/Tests/electrostatic_sphere/inputs_rz rename to Examples/Tests/electrostatic_sphere/inputs_test_rz_electrostatic_sphere index 2b6151e6d8c..a1c71c58fc3 100644 --- a/Examples/Tests/electrostatic_sphere/inputs_rz +++ b/Examples/Tests/electrostatic_sphere/inputs_test_rz_electrostatic_sphere @@ -11,6 +11,7 @@ boundary.field_hi = pec pec warpx.const_dt = 1e-6 warpx.do_electrostatic = labframe warpx.use_filter = 0 +warpx.abort_on_warning_threshold = medium particles.species_names = electron diff --git a/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt new file mode 100644 index 00000000000..ad5e8974225 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt @@ -0,0 +1,67 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_electrostatic_sphere_eb # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_electrostatic_sphere_eb # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_electrostatic_sphere_eb_mixed_bc # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_electrostatic_sphere_eb_mixed_bc # inputs + analysis_default_regression.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_electrostatic_sphere_eb_picmi # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_electrostatic_sphere_eb_picmi.py # inputs + analysis.py # analysis + diags/diag1000002 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_electrostatic_sphere_eb # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_electrostatic_sphere_eb # inputs + analysis_rz.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_electrostatic_sphere_eb_mr # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_electrostatic_sphere_eb_mr # inputs + analysis_rz_mr.py # analysis + diags/diag1/ # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_default_regression.py b/Examples/Tests/electrostatic_sphere_eb/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/electrostatic_sphere_eb/inputs_3d b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb similarity index 96% rename from Examples/Tests/electrostatic_sphere_eb/inputs_3d rename to Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb index 13ad42da070..f738e1c5d3a 100644 --- a/Examples/Tests/electrostatic_sphere_eb/inputs_3d +++ b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb @@ -20,6 +20,7 @@ warpx.do_electrostatic = labframe warpx.eb_implicit_function = "-(x**2+y**2+z**2-0.1**2)" warpx.eb_potential(x,y,z,t) = "1." warpx.self_fields_required_precision = 1.e-7 +warpx.abort_on_warning_threshold = medium algo.field_gathering = momentum-conserving diff --git a/Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_mixed_bc similarity index 94% rename from Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs rename to Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_mixed_bc index 0c1b9130ded..de2c0d0646c 100644 --- a/Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs +++ b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_mixed_bc @@ -17,6 +17,7 @@ warpx.do_electrostatic = labframe warpx.eb_implicit_function = "-(x**2+y**2+z**2-0.3**2)" warpx.eb_potential(x,y,z,t) = "1." warpx.self_fields_required_precision = 1.e-7 +warpx.abort_on_warning_threshold = medium algo.field_gathering = momentum-conserving diff --git a/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_picmi.py similarity index 94% rename from Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py rename to Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_picmi.py index 97f52a69c72..37d280e77d2 100755 --- a/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py +++ b/Examples/Tests/electrostatic_sphere_eb/inputs_test_3d_electrostatic_sphere_eb_picmi.py @@ -73,16 +73,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=1, - write_dir=".", - warpx_file_prefix="Python_ElectrostaticSphereEB_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=1, data_list=["Ex", "Ey", "Ez", "phi", "rho"], - write_dir=".", - warpx_file_prefix="Python_ElectrostaticSphereEB_plt", ) reduced_diag = picmi.ReducedDiagnostic( diff --git a/Examples/Tests/electrostatic_sphere_eb/inputs_rz b/Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb similarity index 94% rename from Examples/Tests/electrostatic_sphere_eb/inputs_rz rename to Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb index 28ebadb1cc7..8ace9cd9b4a 100644 --- a/Examples/Tests/electrostatic_sphere_eb/inputs_rz +++ b/Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb @@ -20,6 +20,7 @@ warpx.do_electrostatic = labframe warpx.eb_implicit_function = "-(x**2-0.1**2)" warpx.eb_potential(x,y,z,t) = "1." warpx.self_fields_required_precision = 1.e-7 +warpx.abort_on_warning_threshold = medium algo.field_gathering = momentum-conserving diff --git a/Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr b/Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb_mr similarity index 92% rename from Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr rename to Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb_mr index 722fc916416..d984ba35b5d 100644 --- a/Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr +++ b/Examples/Tests/electrostatic_sphere_eb/inputs_test_rz_electrostatic_sphere_eb_mr @@ -6,6 +6,7 @@ max_step = 1 amr.n_cell = 64 64 amr.blocking_factor = 8 amr.max_grid_size = 128 +amr.ref_ratio_vect = 2 2 2 boundary.field_lo = none periodic boundary.field_hi = pec periodic boundary.potential_lo_x = 0 @@ -23,6 +24,7 @@ warpx.do_electrostatic = labframe warpx.eb_implicit_function = "-(x**2-0.1**2)" warpx.eb_potential(x,y,z,t) = "1." warpx.self_fields_required_precision = 1.e-7 +warpx.abort_on_warning_threshold = medium algo.field_gathering = momentum-conserving diff --git a/Examples/Tests/embedded_boundary_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt new file mode 100644 index 00000000000..3fd0a0f4c3b --- /dev/null +++ b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt @@ -0,0 +1,41 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_boundary_cube # name + 2 # dims + 1 # nprocs + ON # eb + inputs_test_2d_embedded_boundary_cube # inputs + analysis_fields_2d.py # analysis + diags/diag1000114 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_cube # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_embedded_boundary_cube # inputs + analysis_fields.py # analysis + diags/diag1000208 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_cube_macroscopic # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_embedded_boundary_cube_macroscopic # inputs + analysis_fields.py # analysis + diags/diag1000208 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py index 1890c1d9aea..49da1a76edd 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields.py @@ -23,6 +23,8 @@ # $$ k_y = \frac{n\pi}{L}$$ # $$ k_z = \frac{p\pi}{L}$$ +test_name = os.path.split(os.getcwd())[1] + hi = [0.8, 0.8, 0.8] lo = [-0.8, -0.8, -0.8] ncells = [48, 48, 48] @@ -46,7 +48,7 @@ # Parse test name and check whether this use the macroscopic solver # (i.e. solving the equation in a dielectric) -macroscopic = True if re.search("macroscopic", filename) else False +macroscopic = True if re.search("macroscopic", test_name) else False # Calculate frequency of the mode oscillation omega = np.sqrt(h_2) * c @@ -108,6 +110,4 @@ rel_err_z = np.sqrt(np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) assert rel_err_z < rel_tol_err -test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/embedded_boundary_cube/inputs_3d b/Examples/Tests/embedded_boundary_cube/inputs_base_3d similarity index 97% rename from Examples/Tests/embedded_boundary_cube/inputs_3d rename to Examples/Tests/embedded_boundary_cube/inputs_base_3d index 61eb1192e04..9710701d871 100644 --- a/Examples/Tests/embedded_boundary_cube/inputs_3d +++ b/Examples/Tests/embedded_boundary_cube/inputs_base_3d @@ -7,6 +7,7 @@ geometry.dims = 3 geometry.prob_lo = -0.8 -0.8 -0.8 geometry.prob_hi = 0.8 0.8 0.8 warpx.cfl = 1 +warpx.abort_on_warning_threshold = medium boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec diff --git a/Examples/Tests/embedded_boundary_cube/inputs_2d b/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube similarity index 97% rename from Examples/Tests/embedded_boundary_cube/inputs_2d rename to Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube index 372e0dc0340..684325dc030 100644 --- a/Examples/Tests/embedded_boundary_cube/inputs_2d +++ b/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube @@ -7,6 +7,7 @@ geometry.dims = 2 geometry.prob_lo = -0.8 -0.8 geometry.prob_hi = 0.8 0.8 warpx.cfl = 1 +warpx.abort_on_warning_threshold = medium boundary.field_lo = pec pec boundary.field_hi = pec pec diff --git a/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube b/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube new file mode 100644 index 00000000000..9d612bd62da --- /dev/null +++ b/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube @@ -0,0 +1,2 @@ +# base inpute parameters +FILE = inputs_base_3d diff --git a/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube_macroscopic b/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube_macroscopic new file mode 100644 index 00000000000..1bcb49dec54 --- /dev/null +++ b/Examples/Tests/embedded_boundary_cube/inputs_test_3d_embedded_boundary_cube_macroscopic @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.em_solver_medium = macroscopic +macroscopic.epsilon = 1.5*8.8541878128e-12 +macroscopic.mu = 1.25663706212e-06 +macroscopic.sigma = 0 diff --git a/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt new file mode 100644 index 00000000000..d91a94b539b --- /dev/null +++ b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_rz_embedded_boundary_diffraction # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_embedded_boundary_diffraction # inputs + analysis_fields.py # analysis + diags/diag1/ # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py index bef85259f17..84dfacbb505 100755 --- a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -17,7 +17,8 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI -ts = OpenPMDTimeSeries("./EmbeddedBoundaryDiffraction_plt/") +filename = sys.argv[1] +ts = OpenPMDTimeSeries(filename) # Extract the intensity as a function of r and z Ex, info = ts.get_field("E", "x", iteration=300) @@ -42,6 +43,5 @@ def r_first_minimum(iz): assert np.all(abs(r[50:] - theta_diffraction * info.z[50:]) < 0.03) # Open the right plot file -filename = sys.argv[1] test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") diff --git a/Examples/Tests/embedded_boundary_diffraction/inputs_rz b/Examples/Tests/embedded_boundary_diffraction/inputs_test_rz_embedded_boundary_diffraction similarity index 100% rename from Examples/Tests/embedded_boundary_diffraction/inputs_rz rename to Examples/Tests/embedded_boundary_diffraction/inputs_test_rz_embedded_boundary_diffraction diff --git a/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt new file mode 100644 index 00000000000..cf45d9d56f3 --- /dev/null +++ b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_picmi # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_embedded_boundary_picmi.py # inputs + analysis.py # analysis + diags/diag1000002 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py similarity index 97% rename from Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py rename to Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py index 45d57e606b4..80ce483f2c7 100755 --- a/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py +++ b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py @@ -61,16 +61,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=1, - write_dir=".", - warpx_file_prefix="embedded_boundary_python_API_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=1, data_list=["Ex"], - write_dir=".", - warpx_file_prefix="embedded_boundary_python_API_plt", ) ########################## diff --git a/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt new file mode 100644 index 00000000000..c9d3b47cece --- /dev/null +++ b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt @@ -0,0 +1,28 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_boundary_rotated_cube # name + 2 # dims + 1 # nprocs + ON # eb + inputs_test_2d_embedded_boundary_rotated_cube # inputs + analysis_fields_2d.py # analysis + diags/diag1000068 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_rotated_cube # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_embedded_boundary_rotated_cube # inputs + analysis_fields_3d.py # analysis + diags/diag1000111 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py similarity index 100% rename from Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py rename to Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py diff --git a/Examples/Tests/embedded_boundary_rotated_cube/inputs_2d b/Examples/Tests/embedded_boundary_rotated_cube/inputs_test_2d_embedded_boundary_rotated_cube similarity index 96% rename from Examples/Tests/embedded_boundary_rotated_cube/inputs_2d rename to Examples/Tests/embedded_boundary_rotated_cube/inputs_test_2d_embedded_boundary_rotated_cube index e7e03168824..24fb2938c2d 100644 --- a/Examples/Tests/embedded_boundary_rotated_cube/inputs_2d +++ b/Examples/Tests/embedded_boundary_rotated_cube/inputs_test_2d_embedded_boundary_rotated_cube @@ -7,6 +7,7 @@ geometry.dims = 2 geometry.prob_lo = -0.8 -0.8 geometry.prob_hi = 0.8 0.8 warpx.cfl = 1 +warpx.abort_on_warning_threshold = medium boundary.field_lo = pec pec boundary.field_hi = pec pec diff --git a/Examples/Tests/embedded_boundary_rotated_cube/inputs_3d b/Examples/Tests/embedded_boundary_rotated_cube/inputs_test_3d_embedded_boundary_rotated_cube similarity index 98% rename from Examples/Tests/embedded_boundary_rotated_cube/inputs_3d rename to Examples/Tests/embedded_boundary_rotated_cube/inputs_test_3d_embedded_boundary_rotated_cube index 77e259e8975..faefeec2206 100644 --- a/Examples/Tests/embedded_boundary_rotated_cube/inputs_3d +++ b/Examples/Tests/embedded_boundary_rotated_cube/inputs_test_3d_embedded_boundary_rotated_cube @@ -7,6 +7,7 @@ geometry.dims = 3 geometry.prob_lo = -0.8 -0.8 -0.8 geometry.prob_hi = 0.8 0.8 0.8 warpx.cfl = 1 +warpx.abort_on_warning_threshold = medium boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec diff --git a/Examples/Tests/embedded_circle/CMakeLists.txt b/Examples/Tests/embedded_circle/CMakeLists.txt new file mode 100644 index 00000000000..9eb8f23460b --- /dev/null +++ b/Examples/Tests/embedded_circle/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_circle # name + 2 # dims + 2 # nprocs + ON # eb + inputs_test_2d_embedded_circle # inputs + analysis.py # analysis + diags/diag1000011 + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_circle/inputs_2d b/Examples/Tests/embedded_circle/inputs_test_2d_embedded_circle similarity index 100% rename from Examples/Tests/embedded_circle/inputs_2d rename to Examples/Tests/embedded_circle/inputs_test_2d_embedded_circle diff --git a/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt new file mode 100644 index 00000000000..13012e7605b --- /dev/null +++ b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_energy_conserving_thermal_plasma # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_energy_conserving_thermal_plasma # inputs + analysis.py # analysis + diags/diag1000500 # output + OFF # dependency +) diff --git a/Examples/Tests/energy_conserving_thermal_plasma/inputs_2d_electrostatic b/Examples/Tests/energy_conserving_thermal_plasma/inputs_test_2d_energy_conserving_thermal_plasma similarity index 100% rename from Examples/Tests/energy_conserving_thermal_plasma/inputs_2d_electrostatic rename to Examples/Tests/energy_conserving_thermal_plasma/inputs_test_2d_energy_conserving_thermal_plasma diff --git a/Examples/Tests/field_probe/CMakeLists.txt b/Examples/Tests/field_probe/CMakeLists.txt new file mode 100644 index 00000000000..4ef61237775 --- /dev/null +++ b/Examples/Tests/field_probe/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_2d_field_probe # name + 2 # dims + 2 # nprocs + ON # eb + inputs_test_2d_field_probe # inputs + analysis.py # analysis + diags/diag1000544 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/field_probe/analysis_field_probe.py b/Examples/Tests/field_probe/analysis.py similarity index 100% rename from Examples/Tests/field_probe/analysis_field_probe.py rename to Examples/Tests/field_probe/analysis.py diff --git a/Examples/Tests/field_probe/inputs_2d b/Examples/Tests/field_probe/inputs_test_2d_field_probe similarity index 100% rename from Examples/Tests/field_probe/inputs_2d rename to Examples/Tests/field_probe/inputs_test_2d_field_probe diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt new file mode 100644 index 00000000000..306ff2018bc --- /dev/null +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_flux_injection # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_flux_injection # inputs + analysis_flux_injection_3d.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_rz_flux_injection # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_flux_injection # inputs + analysis_flux_injection_rz.py # analysis + diags/diag1000120 # output + OFF # dependency +) diff --git a/Examples/Tests/flux_injection/inputs_3d b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection similarity index 100% rename from Examples/Tests/flux_injection/inputs_3d rename to Examples/Tests/flux_injection/inputs_test_3d_flux_injection diff --git a/Examples/Tests/flux_injection/inputs_rz b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection similarity index 100% rename from Examples/Tests/flux_injection/inputs_rz rename to Examples/Tests/flux_injection/inputs_test_rz_flux_injection diff --git a/Examples/Tests/gaussian_beam/CMakeLists.txt b/Examples/Tests/gaussian_beam/CMakeLists.txt new file mode 100644 index 00000000000..35ec08c10e3 --- /dev/null +++ b/Examples/Tests/gaussian_beam/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_focusing_gaussian_beam # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_focusing_gaussian_beam # inputs + analysis.py # analysis + diags/diag1000000 # output + OFF # dependency +) + +add_warpx_test( + test_3d_gaussian_beam_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_gaussian_beam_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/gaussian_beam/README.rst b/Examples/Tests/gaussian_beam/README similarity index 100% rename from Examples/Tests/gaussian_beam/README.rst rename to Examples/Tests/gaussian_beam/README diff --git a/Examples/Tests/gaussian_beam/analysis_focusing_beam.py b/Examples/Tests/gaussian_beam/analysis.py similarity index 100% rename from Examples/Tests/gaussian_beam/analysis_focusing_beam.py rename to Examples/Tests/gaussian_beam/analysis.py diff --git a/Examples/Tests/gaussian_beam/analysis_default_regression.py b/Examples/Tests/gaussian_beam/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/gaussian_beam/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/gaussian_beam/inputs_focusing_beam b/Examples/Tests/gaussian_beam/inputs_test_3d_focusing_gaussian_beam similarity index 100% rename from Examples/Tests/gaussian_beam/inputs_focusing_beam rename to Examples/Tests/gaussian_beam/inputs_test_3d_focusing_gaussian_beam diff --git a/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py b/Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py similarity index 97% rename from Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py rename to Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py index 9ad2fd6b82b..cd169110f8a 100755 --- a/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py +++ b/Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py @@ -87,8 +87,6 @@ period=10, data_list=args.fields_to_plot, warpx_format=args.diagformat, - write_dir=".", - warpx_file_prefix="Python_gaussian_beam_plt", ) part_diag1 = picmi.ParticleDiagnostic( diff --git a/Examples/Tests/implicit/CMakeLists.txt b/Examples/Tests/implicit/CMakeLists.txt new file mode 100644 index 00000000000..11881ae4972 --- /dev/null +++ b/Examples/Tests/implicit/CMakeLists.txt @@ -0,0 +1,46 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_semi_implicit_picard # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_semi_implicit_picard # inputs + analysis_1d.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_1d_theta_implicit_picard # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_theta_implicit_picard # inputs + analysis_1d.py # analysis + diags/diag1000100 # output + OFF # dependency +) + +add_warpx_test( + test_2d_theta_implicit_jfnk_vandb # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_theta_implicit_jfnk_vandb # inputs + analysis_vandb_jfnk_2d.py # analysis + diags/diag1000020 # output + OFF # dependency +) + +add_warpx_test( + test_2d_theta_implicit_jfnk_vandb_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py # inputs + analysis_vandb_jfnk_2d.py # analysis + diags/diag1000020 # output + OFF # dependency +) diff --git a/Examples/Tests/Implicit/analysis_1d.py b/Examples/Tests/implicit/analysis_1d.py similarity index 87% rename from Examples/Tests/Implicit/analysis_1d.py rename to Examples/Tests/implicit/analysis_1d.py index af4515968f9..bbbbb8db9b2 100755 --- a/Examples/Tests/Implicit/analysis_1d.py +++ b/Examples/Tests/implicit/analysis_1d.py @@ -29,9 +29,10 @@ delta_E = (total_energy - total_energy[0]) / total_energy[0] max_delta_E = np.abs(delta_E).max() -if re.match("SemiImplicitPicard_1d", fn): +test_name = os.path.split(os.getcwd())[1] +if re.match("test_1d_semi_implicit_picard", test_name): tolerance_rel = 2.5e-5 -elif re.match("ThetaImplicitPicard_1d", fn): +elif re.match("test_1d_theta_implicit_picard", test_name): # This case should have near machine precision conservation of energy tolerance_rel = 1.0e-14 diff --git a/Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py similarity index 100% rename from Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py rename to Examples/Tests/implicit/analysis_vandb_jfnk_2d.py diff --git a/Examples/Tests/Implicit/inputs_1d_semiimplicit b/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard similarity index 97% rename from Examples/Tests/Implicit/inputs_1d_semiimplicit rename to Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard index 07460e08be8..8ef0304bebb 100644 --- a/Examples/Tests/Implicit/inputs_1d_semiimplicit +++ b/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard @@ -17,6 +17,7 @@ my_constants.dt = 0.1/wpe # time step size, s max_step = 100 amr.n_cell = nz +amr.max_grid_size = 32 amr.max_level = 0 geometry.dims = 1 @@ -31,6 +32,7 @@ boundary.particle_hi = periodic ############ NUMERICS ########### ################################# +warpx.abort_on_warning_threshold = high warpx.verbose = 1 warpx.const_dt = dt algo.evolve_scheme = semi_implicit_em diff --git a/Examples/Tests/Implicit/inputs_1d b/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard similarity index 97% rename from Examples/Tests/Implicit/inputs_1d rename to Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard index 3e57689b723..2ed4d746708 100644 --- a/Examples/Tests/Implicit/inputs_1d +++ b/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard @@ -17,6 +17,7 @@ my_constants.dt = 0.1/wpe # time step size, s max_step = 100 amr.n_cell = nz +amr.max_grid_size = 32 amr.max_level = 0 geometry.dims = 1 @@ -31,6 +32,7 @@ boundary.particle_hi = periodic ############ NUMERICS ########### ################################# +warpx.abort_on_warning_threshold = high warpx.verbose = 1 warpx.const_dt = dt algo.evolve_scheme = theta_implicit_em diff --git a/Examples/Tests/Implicit/inputs_vandb_jfnk_2d b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb similarity index 98% rename from Examples/Tests/Implicit/inputs_vandb_jfnk_2d rename to Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb index 393a9d90330..0cdf2ebe40d 100644 --- a/Examples/Tests/Implicit/inputs_vandb_jfnk_2d +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb @@ -31,6 +31,7 @@ boundary.field_hi = periodic periodic ################################# ############ NUMERICS ########### ################################# +warpx.abort_on_warning_threshold = high warpx.serialize_initial_conditions = 1 warpx.verbose = 1 warpx.const_dt = dt diff --git a/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py similarity index 96% rename from Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py rename to Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py index a2ed607e873..8fa29127a7f 100755 --- a/Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py @@ -111,8 +111,6 @@ grid=grid, period=diagnostic_intervals, data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz", "Jx", "Jy", "Jz", "rho", "divE"], - write_dir=".", - warpx_file_prefix="ThetaImplicitJFNK_VandB_2d_PICMI_plt", ) part_diag1 = picmi.ParticleDiagnostic( @@ -120,8 +118,6 @@ period=diagnostic_intervals, species=[electrons, protons], data_list=["weighting", "position", "momentum"], - write_dir=".", - warpx_file_prefix="ThetaImplicitJFNK_VandB_2d_PICMI_plt", ) particle_energy_diag = picmi.ReducedDiagnostic( diff --git a/Examples/Tests/initial_distribution/CMakeLists.txt b/Examples/Tests/initial_distribution/CMakeLists.txt new file mode 100644 index 00000000000..14dabd7a67c --- /dev/null +++ b/Examples/Tests/initial_distribution/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_initial_distribution # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_initial_distribution # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/initial_distribution/analysis_distribution.py b/Examples/Tests/initial_distribution/analysis.py similarity index 100% rename from Examples/Tests/initial_distribution/analysis_distribution.py rename to Examples/Tests/initial_distribution/analysis.py diff --git a/Examples/Tests/initial_distribution/inputs b/Examples/Tests/initial_distribution/inputs_test_3d_initial_distribution similarity index 100% rename from Examples/Tests/initial_distribution/inputs rename to Examples/Tests/initial_distribution/inputs_test_3d_initial_distribution diff --git a/Examples/Tests/initial_plasma_profile/CMakeLists.txt b/Examples/Tests/initial_plasma_profile/CMakeLists.txt new file mode 100644 index 00000000000..fab15e8b97f --- /dev/null +++ b/Examples/Tests/initial_plasma_profile/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_parabolic_channel_initialization # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_parabolic_channel_initialization # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/initial_plasma_profile/inputs b/Examples/Tests/initial_plasma_profile/inputs_test_2d_parabolic_channel_initialization similarity index 100% rename from Examples/Tests/initial_plasma_profile/inputs rename to Examples/Tests/initial_plasma_profile/inputs_test_2d_parabolic_channel_initialization diff --git a/Examples/Tests/ion_stopping/CMakeLists.txt b/Examples/Tests/ion_stopping/CMakeLists.txt new file mode 100644 index 00000000000..1f203d76fa1 --- /dev/null +++ b/Examples/Tests/ion_stopping/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_ion_stopping # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_ion_stopping # inputs + analysis.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/ion_stopping/analysis_ion_stopping.py b/Examples/Tests/ion_stopping/analysis.py similarity index 93% rename from Examples/Tests/ion_stopping/analysis_ion_stopping.py rename to Examples/Tests/ion_stopping/analysis.py index f1ad3bc8b2b..e343bd23fdd 100755 --- a/Examples/Tests/ion_stopping/analysis_ion_stopping.py +++ b/Examples/Tests/ion_stopping/analysis.py @@ -12,7 +12,6 @@ # particle energies. import os -import re import sys import numpy as np @@ -30,13 +29,7 @@ tolerance = 1.0e-7 last_filename = sys.argv[1] - -# Remove trailing '/' from file name, if necessary -last_filename.rstrip("/") -# Find last iteration in file name, such as 'test_name_plt000001' (last_it = '000001') -last_it = re.search("\d+$", last_filename).group() -# Find output prefix in file name, such as 'test_name_plt000001' (prefix = 'test_name_plt') -prefix = last_filename[: -len(last_it)] +last_it = 10 def stopping_from_electrons(ne, Te, Zb, ion_mass): @@ -94,7 +87,7 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): # Fetch background parameters and initial particle data -ds0 = yt.load(f'{prefix}{len(last_it)*"0"}') +ds0 = yt.load("diags/diag1000000") ad0 = ds0.all_data() Zb = 1.0 # Ion charge state @@ -150,14 +143,14 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): ds = yt.load(last_filename) ad = ds.all_data() -dt = ds.current_time.to_value() / int(last_it) +dt = ds.current_time.to_value() / last_it # Step through the same number of time steps a_EE1 = EE1 a_EE2 = EE2 a_EE3 = EE3 a_EE4 = EE4 -for it in range(int(last_it)): +for it in range(last_it): dEdt1 = stopping_from_electrons(ne, Te, Zb, ion_mass12) a_EE1 *= np.exp(dEdt1 * dt) dEdt2 = stopping_from_electrons(ne2, Te2, Zb, ion_mass12) diff --git a/Examples/Tests/ion_stopping/inputs_3d b/Examples/Tests/ion_stopping/inputs_test_3d_ion_stopping similarity index 99% rename from Examples/Tests/ion_stopping/inputs_3d rename to Examples/Tests/ion_stopping/inputs_test_3d_ion_stopping index 291e1ca0a9e..93b59bbde4a 100644 --- a/Examples/Tests/ion_stopping/inputs_3d +++ b/Examples/Tests/ion_stopping/inputs_test_3d_ion_stopping @@ -14,6 +14,7 @@ boundary.field_hi = periodic periodic periodic boundary.particle_lo = periodic periodic periodic boundary.particle_hi = periodic periodic periodic algo.particle_shape = 1 +warpx.cfl = 0.7 particles.species_names = ions1 ions2 ions3 ions4 diff --git a/Examples/Tests/ionization/CMakeLists.txt b/Examples/Tests/ionization/CMakeLists.txt new file mode 100644 index 00000000000..32da653f301 --- /dev/null +++ b/Examples/Tests/ionization/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_ionization_boost # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_ionization_boost # inputs + analysis.py # analysis + diags/diag1000420 # output + OFF # dependency +) + +add_warpx_test( + test_2d_ionization_lab # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_ionization_lab # inputs + analysis.py # analysis + diags/diag1001600 # output + OFF # dependency +) + +add_warpx_test( + test_2d_ionization_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_ionization_picmi.py # inputs + analysis.py # analysis + diags/diag1001600 # output + OFF # dependency +) diff --git a/Examples/Tests/ionization/analysis_ionization.py b/Examples/Tests/ionization/analysis.py similarity index 100% rename from Examples/Tests/ionization/analysis_ionization.py rename to Examples/Tests/ionization/analysis.py diff --git a/Examples/Tests/ionization/inputs_2d_bf_rt b/Examples/Tests/ionization/inputs_test_2d_ionization_boost similarity index 100% rename from Examples/Tests/ionization/inputs_2d_bf_rt rename to Examples/Tests/ionization/inputs_test_2d_ionization_boost diff --git a/Examples/Tests/ionization/inputs_2d_rt b/Examples/Tests/ionization/inputs_test_2d_ionization_lab similarity index 100% rename from Examples/Tests/ionization/inputs_2d_rt rename to Examples/Tests/ionization/inputs_test_2d_ionization_lab diff --git a/Examples/Tests/ionization/PICMI_inputs_2d.py b/Examples/Tests/ionization/inputs_test_2d_ionization_picmi.py similarity index 96% rename from Examples/Tests/ionization/PICMI_inputs_2d.py rename to Examples/Tests/ionization/inputs_test_2d_ionization_picmi.py index 00db8c83ad1..6d1d485cc8c 100644 --- a/Examples/Tests/ionization/PICMI_inputs_2d.py +++ b/Examples/Tests/ionization/inputs_test_2d_ionization_picmi.py @@ -94,16 +94,12 @@ period=10000, species=[electrons, ions], data_list=["ux", "uy", "uz", "x", "z", "weighting", "orig_z"], - write_dir=".", - warpx_file_prefix="Python_ionization_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10000, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_ionization_plt", ) # Set up simulation diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt new file mode 100644 index 00000000000..1223a23e4d2 --- /dev/null +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -0,0 +1,435 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_langmuir_multi # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_langmuir_multi # inputs + analysis_1d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_2d_langmuir_multi_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_mr # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_2d_langmuir_multi_mr_anisotropic # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_mr_anisotropic # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_2d_langmuir_multi_mr_momentum_conserving # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_mr_momentum_conserving # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_mr_psatd # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_mr_psatd # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +add_warpx_test( + test_2d_langmuir_multi_nodal # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_nodal # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_2d_langmuir_multi_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_current_correction # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_current_correction # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_current_correction_nodal # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_current_correction_nodal # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_momentum_conserving # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_momentum_conserving # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_multiJ # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_multiJ # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_multiJ_nodal # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_multiJ_nodal # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_nodal # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_nodal # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_vay_deposition # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_vay_deposition # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_vay_deposition_nodal # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +add_warpx_test( + test_3d_langmuir_multi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +add_warpx_test( + test_3d_langmuir_multi_nodal # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_nodal # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +add_warpx_test( + test_3d_langmuir_multi_picmi # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_current_correction # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_current_correction # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_current_correction_nodal # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_current_correction_nodal # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_div_cleaning # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_div_cleaning # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_momentum_conserving # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_momentum_conserving # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_multiJ # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_multiJ # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_multiJ_nodal # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_multiJ_nodal # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_nodal # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_nodal # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_vay_deposition # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_vay_deposition # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_langmuir_multi_psatd_vay_deposition_nodal # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency + ) +endif() + +add_warpx_test( + test_rz_langmuir_multi # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_langmuir_multi # inputs + analysis_rz.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_rz_langmuir_multi_picmi # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_langmuir_multi_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_rz_langmuir_multi_psatd # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_langmuir_multi_psatd # inputs + analysis_rz.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_langmuir_multi_psatd_current_correction # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_langmuir_multi_psatd_current_correction # inputs + analysis_rz.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_langmuir_multi_psatd_multiJ # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_langmuir_multi_psatd_multiJ # inputs + analysis_rz.py # analysis + diags/diag1000080 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/langmuir/README.rst b/Examples/Tests/langmuir/README similarity index 100% rename from Examples/Tests/langmuir/README.rst rename to Examples/Tests/langmuir/README diff --git a/Examples/Tests/langmuir/analysis_1d.py b/Examples/Tests/langmuir/analysis_1d.py index 3ba21751671..d041ca03b36 100755 --- a/Examples/Tests/langmuir/analysis_1d.py +++ b/Examples/Tests/langmuir/analysis_1d.py @@ -29,14 +29,17 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI +# test name +test_name = os.path.split(os.getcwd())[1] + # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search("current_correction", fn) else False +current_correction = True if re.search("current_correction", test_name) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search("Vay_deposition", fn) else False +vay_deposition = True if re.search("vay_deposition", test_name) else False # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 @@ -123,5 +126,4 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_2d.py b/Examples/Tests/langmuir/analysis_2d.py index 8914b8b426c..ac98354c73b 100755 --- a/Examples/Tests/langmuir/analysis_2d.py +++ b/Examples/Tests/langmuir/analysis_2d.py @@ -29,19 +29,23 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI +# test name +test_name = os.path.split(os.getcwd())[1] + # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search("current_correction", fn) else False +current_correction = True if re.search("current_correction", test_name) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search("Vay_deposition", fn) else False +vay_deposition = True if re.search("vay_deposition", test_name) else False # Parse test name and check if particle_shape = 4 is used -particle_shape_4 = True if re.search("particle_shape_4", fn) else False +particle_shape_4 = True if re.search("particle_shape_4", test_name) else False -# Parameters (these parameters must match the parameters in `inputs.multi.rt`) +# Parameters (must match the parameters in the inputs) +# FIXME read these parameters from warpx_used_inputs epsilon = 0.01 n = 4.0e24 n_osc_x = 2 @@ -159,5 +163,4 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_3d.py b/Examples/Tests/langmuir/analysis_3d.py index 6fd58e62de4..9f4b2cc1f93 100755 --- a/Examples/Tests/langmuir/analysis_3d.py +++ b/Examples/Tests/langmuir/analysis_3d.py @@ -29,17 +29,20 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import checksumAPI +# test name +test_name = os.path.split(os.getcwd())[1] + # this will be the name of the plot file fn = sys.argv[1] # Parse test name and check if current correction (psatd.current_correction=1) is applied -current_correction = True if re.search("current_correction", fn) else False +current_correction = True if re.search("current_correction", test_name) else False # Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used -vay_deposition = True if re.search("Vay_deposition", fn) else False +vay_deposition = True if re.search("vay_deposition", test_name) else False # Parse test name and check if div(E)/div(B) cleaning (warpx.do_div_cleaning=1) is used -div_cleaning = True if re.search("div_cleaning", fn) else False +div_cleaning = True if re.search("div_cleaning", test_name) else False # Parameters (these parameters must match the parameters in `inputs.multi.rt`) epsilon = 0.01 @@ -178,8 +181,8 @@ def get_theoretical_field(field, t): assert error_rel < tolerance if div_cleaning: - ds_old = yt.load("Langmuir_multi_psatd_div_cleaning_plt000038") - ds_mid = yt.load("Langmuir_multi_psatd_div_cleaning_plt000039") + ds_old = yt.load("diags/diag1000038") + ds_mid = yt.load("diags/diag1000039") ds_new = yt.load(fn) # this is the last plotfile ad_old = ds_old.covering_grid( @@ -209,9 +212,7 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): +if re.search("single_precision", test_name): checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) else: checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/langmuir/analysis_default_regression.py b/Examples/Tests/langmuir/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/langmuir/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/langmuir/analysis_rz.py b/Examples/Tests/langmuir/analysis_rz.py index 792394ea573..dd26fd29db7 100755 --- a/Examples/Tests/langmuir/analysis_rz.py +++ b/Examples/Tests/langmuir/analysis_rz.py @@ -35,10 +35,11 @@ # this will be the name of the plot file fn = sys.argv[1] +# test name test_name = os.path.split(os.getcwd())[1] # Parse test name and check if current correction (psatd.current_correction) is applied -current_correction = True if re.search("current_correction", fn) else False +current_correction = True if re.search("current_correction", test_name) else False # Parameters (these parameters must match the parameters in `inputs.multi.rz.rt`) epsilon = 0.01 diff --git a/Examples/Tests/langmuir/inputs_2d b/Examples/Tests/langmuir/inputs_base_2d similarity index 100% rename from Examples/Tests/langmuir/inputs_2d rename to Examples/Tests/langmuir/inputs_base_2d diff --git a/Examples/Tests/langmuir/inputs_3d b/Examples/Tests/langmuir/inputs_base_3d similarity index 100% rename from Examples/Tests/langmuir/inputs_3d rename to Examples/Tests/langmuir/inputs_base_3d diff --git a/Examples/Tests/langmuir/inputs_rz b/Examples/Tests/langmuir/inputs_base_rz similarity index 100% rename from Examples/Tests/langmuir/inputs_rz rename to Examples/Tests/langmuir/inputs_base_rz diff --git a/Examples/Tests/langmuir/inputs_1d b/Examples/Tests/langmuir/inputs_test_1d_langmuir_multi similarity index 95% rename from Examples/Tests/langmuir/inputs_1d rename to Examples/Tests/langmuir/inputs_test_1d_langmuir_multi index af1cf367553..e2fd1da4b94 100644 --- a/Examples/Tests/langmuir/inputs_1d +++ b/Examples/Tests/langmuir/inputs_test_1d_langmuir_multi @@ -27,6 +27,7 @@ warpx.verbose = 1 # Algorithms algo.field_gathering = energy-conserving +algo.current_deposition = esirkepov warpx.use_filter = 0 # Order of particle shape factors @@ -79,6 +80,8 @@ positrons.momentum_function_uz(x,y,z) = "-epsilon * k/kp * cos(k*x) * cos(k*y) * diagnostics.diags_names = diag1 openpmd diag1.intervals = 40 diag1.diag_type = Full +diag1.electrons.variables = z w ux uy uz +diag1.positrons.variables = z w ux uy uz openpmd.intervals = 40 openpmd.diag_type = Full diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr new file mode 100644 index 00000000000..8adf73023be --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = ckc +amr.max_level = 1 +amr.ref_ratio = 4 +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.fine_tag_hi = 10.e-6 10.e-6 +warpx.fine_tag_lo = -10.e-6 -10.e-6 +warpx.use_filter = 1 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_anisotropic b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_anisotropic new file mode 100644 index 00000000000..047943373c0 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_anisotropic @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = ckc +amr.max_level = 1 +amr.ref_ratio_vect = 4 2 +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.fine_tag_hi = 10.e-6 10.e-6 +warpx.fine_tag_lo = -10.e-6 -10.e-6 +warpx.use_filter = 1 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_momentum_conserving b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_momentum_conserving new file mode 100644 index 00000000000..201f19f32c2 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_momentum_conserving @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.field_gathering = momentum-conserving +algo.maxwell_solver = ckc +amr.max_level = 1 +amr.ref_ratio = 4 +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.use_filter = 1 +warpx.fine_tag_lo = -10.e-6 -10.e-6 +warpx.fine_tag_hi = 10.e-6 10.e-6 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_psatd b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_psatd new file mode 100644 index 00000000000..cf95a07e2fc --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_mr_psatd @@ -0,0 +1,14 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +amr.max_level = 1 +amr.ref_ratio = 4 +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.fine_tag_hi = 10.e-6 10.e-6 +warpx.fine_tag_lo = -10.e-6 -10.e-6 +warpx.use_filter = 1 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_nodal b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_nodal new file mode 100644 index 00000000000..99d952d79d9 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_nodal @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = direct +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/PICMI_inputs_2d.py b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_picmi.py similarity index 97% rename from Examples/Tests/langmuir/PICMI_inputs_2d.py rename to Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_picmi.py index 11020ac34fb..dc7fa3a2ba7 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_2d.py +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_picmi.py @@ -73,8 +73,6 @@ grid=grid, period=diagnostic_intervals, data_list=["Ex", "Jx"], - write_dir=".", - warpx_file_prefix="Python_Langmuir_2d_plt", ) part_diag1 = picmi.ParticleDiagnostic( diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd new file mode 100644 index 00000000000..2386f9e462f --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction new file mode 100644 index 00000000000..c56572ac957 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = esirkepov +algo.maxwell_solver = psatd +amr.max_grid_size = 128 +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +warpx.cfl = 0.7071067811865475 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction_nodal b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction_nodal new file mode 100644 index 00000000000..5359d8703f3 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_current_correction_nodal @@ -0,0 +1,14 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +amr.max_grid_size = 128 +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +warpx.cfl = 0.7071067811865475 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_momentum_conserving b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_momentum_conserving new file mode 100644 index 00000000000..694f65fe233 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_momentum_conserving @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.field_gathering = momentum-conserving +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ new file mode 100644 index 00000000000..793f077b0f7 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +psatd.J_in_time = linear +psatd.solution_type = first-order +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 2 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ_nodal b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ_nodal new file mode 100644 index 00000000000..573337abb76 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_multiJ_nodal @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +psatd.J_in_time = linear +psatd.solution_type = first-order +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 2 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_nodal b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_nodal new file mode 100644 index 00000000000..8f02d4f8aae --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_nodal @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition new file mode 100644 index 00000000000..209e48e10e6 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = vay +algo.maxwell_solver = psatd +amr.max_grid_size = 128 +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.cfl = 0.7071067811865475 diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal new file mode 100644 index 00000000000..d327c1b37b2 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = vay +algo.maxwell_solver = psatd +amr.max_grid_size = 128 +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.cfl = 0.7071067811865475 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 new file mode 100644 index 00000000000..fc5d780cef1 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = vay +algo.maxwell_solver = psatd +algo.particle_shape = 4 +amr.max_grid_size = 128 +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz +warpx.cfl = 0.7071067811865475 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi new file mode 100644 index 00000000000..7665a846eef --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_3d diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_nodal b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_nodal new file mode 100644 index 00000000000..9620cd97f33 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_nodal @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/PICMI_inputs_3d.py b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_picmi.py similarity index 97% rename from Examples/Tests/langmuir/PICMI_inputs_3d.py rename to Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_picmi.py index e5cef203b7e..11ea1843e27 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_3d.py +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_picmi.py @@ -75,8 +75,6 @@ grid=grid, period=diagnostic_interval, data_list=["Ex", "Jx"], - write_dir=".", - warpx_file_prefix="Python_Langmuir_plt", ) part_diag1 = picmi.ParticleDiagnostic( diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd new file mode 100644 index 00000000000..427de2993b1 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.maxwell_solver = psatd +warpx.cfl = 0.5773502691896258 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction new file mode 100644 index 00000000000..86f33d131ce --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = esirkepov +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +warpx.cfl = 0.5773502691896258 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction_nodal b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction_nodal new file mode 100644 index 00000000000..7f67b9100b2 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_current_correction_nodal @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +warpx.cfl = 0.5773502691896258 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_div_cleaning b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_div_cleaning new file mode 100644 index 00000000000..d372b789336 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_div_cleaning @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +diag1.intervals = 0, 38:40:1 +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE F +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.5773502691896258 +warpx.do_dive_cleaning = 1 +warpx.do_divb_cleaning = 1 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_momentum_conserving b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_momentum_conserving new file mode 100644 index 00000000000..15a4c7d6985 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_momentum_conserving @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.field_gathering = momentum-conserving +algo.maxwell_solver = psatd +warpx.cfl = 0.5773502691896258 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ new file mode 100644 index 00000000000..e1cd25cd93d --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +warpx.cfl = 0.5773502691896258 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 2 +psatd.J_in_time = linear +psatd.solution_type = first-order +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ_nodal b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ_nodal new file mode 100644 index 00000000000..4a828d2e8b5 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_multiJ_nodal @@ -0,0 +1,14 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +psatd.J_in_time = linear +psatd.solution_type = first-order +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.5773502691896258 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 2 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_nodal b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_nodal new file mode 100644 index 00000000000..fd03e00968a --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_nodal @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.5773502691896258 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition new file mode 100644 index 00000000000..5e2ffa9d407 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = vay +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +warpx.cfl = 0.5773502691896258 diff --git a/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal new file mode 100644 index 00000000000..df311b0fb3c --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal @@ -0,0 +1,9 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.current_deposition = vay +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE +warpx.cfl = 0.5773502691896258 +warpx.grid_type = collocated diff --git a/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi new file mode 100644 index 00000000000..45665b67266 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +diag1.dump_rz_modes = 0 +diag1.electrons.variables = x y z w ux uy uz +diag1.ions.variables = x y z w ux uy uz diff --git a/Examples/Tests/langmuir/PICMI_inputs_rz.py b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_picmi.py similarity index 99% rename from Examples/Tests/langmuir/PICMI_inputs_rz.py rename to Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_picmi.py index e1becedd62d..24eedd703d2 100755 --- a/Examples/Tests/langmuir/PICMI_inputs_rz.py +++ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_picmi.py @@ -118,8 +118,6 @@ grid=grid, period=diagnostic_intervals, data_list=["Er", "Ez", "Bt", "Jr", "Jz", "part_per_cell"], - write_dir=".", - warpx_file_prefix="Python_Langmuir_rz_multimode_plt", ) part_diag1 = picmi.ParticleDiagnostic( diff --git a/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd new file mode 100644 index 00000000000..5537335629d --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd @@ -0,0 +1,15 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +diag1.dump_rz_modes = 0 +diag1.electrons.variables = x y z w ux uy uz +diag1.ions.variables = x y z w ux uy uz +electrons.random_theta = 0 +ions.random_theta = 0 +psatd.current_correction = 0 +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.do_dive_cleaning = 0 diff --git a/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_current_correction b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_current_correction new file mode 100644 index 00000000000..fac41cea4cd --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_current_correction @@ -0,0 +1,16 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +amr.max_grid_size = 128 +diag1.dump_rz_modes = 0 +diag1.electrons.variables = x y z w ux uy uz +diag1.fields_to_plot = jr jz Er Ez Bt rho divE +diag1.ions.variables = x y z w ux uy uz +electrons.random_theta = 0 +ions.random_theta = 0 +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +warpx.do_dive_cleaning = 0 diff --git a/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_multiJ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_multiJ new file mode 100644 index 00000000000..0ff617af8eb --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_psatd_multiJ @@ -0,0 +1,22 @@ +# base input parameters +FILE = inputs_base_rz + +# test input parameters +algo.current_deposition = direct +algo.maxwell_solver = psatd +amr.max_grid_size = 32 +diag1.dump_rz_modes = 0 +diag1.electrons.variables = x y z w ux uy uz +diag1.ions.variables = x y z w ux uy uz +electrons.num_particles_per_cell_each_dim = 2 4 2 +electrons.random_theta = 0 +ions.num_particles_per_cell_each_dim = 2 4 2 +ions.random_theta = 0 +psatd.current_correction = 0 +psatd.update_with_rho = 1 +warpx.abort_on_warning_threshold = medium +warpx.do_dive_cleaning = 0 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 4 +warpx.n_rz_azimuthal_modes = 2 +warpx.use_filter = 1 diff --git a/Examples/Tests/langmuir_fluids/CMakeLists.txt b/Examples/Tests/langmuir_fluids/CMakeLists.txt new file mode 100644 index 00000000000..8f3ab3ebc78 --- /dev/null +++ b/Examples/Tests/langmuir_fluids/CMakeLists.txt @@ -0,0 +1,46 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_langmuir_fluid # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_langmuir_fluid # inputs + analysis_1d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_2d_langmuir_fluid # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_langmuir_fluid # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_3d_langmuir_fluid # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_langmuir_fluid # inputs + analysis_3d.py # analysis + diags/diag1000040 # output + OFF # dependency +) + +add_warpx_test( + test_rz_langmuir_fluid # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_langmuir_fluid # inputs + analysis_rz.py # analysis + diags/diag1000080 # output + OFF # dependency +) diff --git a/Examples/Tests/langmuir_fluids/inputs_1d b/Examples/Tests/langmuir_fluids/inputs_test_1d_langmuir_fluid similarity index 100% rename from Examples/Tests/langmuir_fluids/inputs_1d rename to Examples/Tests/langmuir_fluids/inputs_test_1d_langmuir_fluid diff --git a/Examples/Tests/langmuir_fluids/inputs_2d b/Examples/Tests/langmuir_fluids/inputs_test_2d_langmuir_fluid similarity index 100% rename from Examples/Tests/langmuir_fluids/inputs_2d rename to Examples/Tests/langmuir_fluids/inputs_test_2d_langmuir_fluid diff --git a/Examples/Tests/langmuir_fluids/inputs_3d b/Examples/Tests/langmuir_fluids/inputs_test_3d_langmuir_fluid similarity index 100% rename from Examples/Tests/langmuir_fluids/inputs_3d rename to Examples/Tests/langmuir_fluids/inputs_test_3d_langmuir_fluid diff --git a/Examples/Tests/langmuir_fluids/inputs_rz b/Examples/Tests/langmuir_fluids/inputs_test_rz_langmuir_fluid similarity index 100% rename from Examples/Tests/langmuir_fluids/inputs_rz rename to Examples/Tests/langmuir_fluids/inputs_test_rz_langmuir_fluid diff --git a/Examples/Tests/larmor/CMakeLists.txt b/Examples/Tests/larmor/CMakeLists.txt new file mode 100644 index 00000000000..3ddcc394c98 --- /dev/null +++ b/Examples/Tests/larmor/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_larmor # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_larmor # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/larmor/analysis_default_regression.py b/Examples/Tests/larmor/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/larmor/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/larmor/inputs_2d_mr b/Examples/Tests/larmor/inputs_test_2d_larmor similarity index 99% rename from Examples/Tests/larmor/inputs_2d_mr rename to Examples/Tests/larmor/inputs_test_2d_larmor index 5d7af3d67a4..76e4f76ee22 100644 --- a/Examples/Tests/larmor/inputs_2d_mr +++ b/Examples/Tests/larmor/inputs_test_2d_larmor @@ -1,5 +1,5 @@ # Maximum number of time steps -max_step = 400 +max_step = 10 # number of grid points amr.n_cell = 64 64 diff --git a/Examples/Tests/laser_injection/CMakeLists.txt b/Examples/Tests/laser_injection/CMakeLists.txt new file mode 100644 index 00000000000..577b8bdcebc --- /dev/null +++ b/Examples/Tests/laser_injection/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_laser_injection # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_laser_injection # inputs + analysis_1d.py # analysis + diags/diag1000240 # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_injection # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_injection # inputs + analysis_2d.py # analysis + diags/diag1000240 # output + OFF # dependency +) + +add_warpx_test( + test_3d_laser_injection # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_laser_injection # inputs + analysis_3d.py # analysis + diags/diag1000020 # output + OFF # dependency +) diff --git a/Examples/Tests/laser_injection/analysis_laser.py b/Examples/Tests/laser_injection/analysis_3d.py similarity index 100% rename from Examples/Tests/laser_injection/analysis_laser.py rename to Examples/Tests/laser_injection/analysis_3d.py diff --git a/Examples/Tests/laser_injection/inputs_1d_rt b/Examples/Tests/laser_injection/inputs_test_1d_laser_injection similarity index 100% rename from Examples/Tests/laser_injection/inputs_1d_rt rename to Examples/Tests/laser_injection/inputs_test_1d_laser_injection diff --git a/Examples/Tests/laser_injection/inputs_2d_rt b/Examples/Tests/laser_injection/inputs_test_2d_laser_injection similarity index 100% rename from Examples/Tests/laser_injection/inputs_2d_rt rename to Examples/Tests/laser_injection/inputs_test_2d_laser_injection diff --git a/Examples/Tests/laser_injection/inputs_3d_rt b/Examples/Tests/laser_injection/inputs_test_3d_laser_injection similarity index 99% rename from Examples/Tests/laser_injection/inputs_3d_rt rename to Examples/Tests/laser_injection/inputs_test_3d_laser_injection index 72464f86aaf..250a0160881 100644 --- a/Examples/Tests/laser_injection/inputs_3d_rt +++ b/Examples/Tests/laser_injection/inputs_test_3d_laser_injection @@ -1,5 +1,5 @@ # Maximum number of time steps -max_step = 1000 +max_step = 20 # number of grid points amr.n_cell = 32 32 240 diff --git a/Examples/Tests/laser_injection_from_file/CMakeLists.txt b/Examples/Tests/laser_injection_from_file/CMakeLists.txt new file mode 100644 index 00000000000..a4f09f6895d --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/CMakeLists.txt @@ -0,0 +1,156 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_laser_injection_from_lasy_file_prepare # name + 1 # dims + 1 # nprocs + OFF # eb + inputs_test_1d_laser_injection_from_lasy_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_1d_laser_injection_from_lasy_file # name + 1 # dims + 1 # nprocs + OFF # eb + inputs_test_1d_laser_injection_from_lasy_file # inputs + analysis_1d.py # analysis + diags/diag1000251 # output + test_1d_laser_injection_from_lasy_file_prepare # dependency +) + +add_warpx_test( + test_1d_laser_injection_from_lasy_file_boost_prepare # name + 1 # dims + 1 # nprocs + OFF # eb + inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_1d_laser_injection_from_lasy_file_boost # name + 1 # dims + 1 # nprocs + OFF # eb + inputs_test_1d_laser_injection_from_lasy_file_boost # inputs + analysis_1d_boost.py # analysis + diags/diag1000001 # output + test_1d_laser_injection_from_lasy_file_boost_prepare # dependency +) + +add_warpx_test( + test_2d_laser_injection_from_binary_file_prepare # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_laser_injection_from_binary_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_injection_from_binary_file # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_laser_injection_from_binary_file # inputs + analysis_2d_binary.py # analysis + diags/diag1000250 # output + test_2d_laser_injection_from_binary_file_prepare # dependency +) + +add_warpx_test( + test_2d_laser_injection_from_lasy_file_prepare # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_laser_injection_from_lasy_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_injection_from_lasy_file # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_laser_injection_from_lasy_file # inputs + analysis_2d.py # analysis + diags/diag1000251 # output + test_2d_laser_injection_from_lasy_file_prepare # dependency +) + +add_warpx_test( + test_3d_laser_injection_from_lasy_file_prepare # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_laser_injection_from_lasy_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_3d_laser_injection_from_lasy_file # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_laser_injection_from_lasy_file # inputs + analysis_3d.py # analysis + diags/diag1000251 # output + test_3d_laser_injection_from_lasy_file_prepare # dependency +) + +add_warpx_test( + test_rz_laser_injection_from_lasy_file_prepare # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_laser_injection_from_lasy_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_rz_laser_injection_from_lasy_file # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_laser_injection_from_lasy_file # inputs + analysis_rz.py # analysis + diags/diag1000252 # output + test_rz_laser_injection_from_lasy_file_prepare # dependency +) + +add_warpx_test( + test_rz_laser_injection_from_RZ_lasy_file_prepare # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py # inputs + OFF # analysis + OFF # output + OFF # dependency +) + +add_warpx_test( + test_rz_laser_injection_from_RZ_lasy_file # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_laser_injection_from_RZ_lasy_file # inputs + analysis_from_RZ_file.py # analysis + diags/diag1000612 # output + test_rz_laser_injection_from_RZ_lasy_file_prepare # dependency +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d.py b/Examples/Tests/laser_injection_from_file/analysis_1d.py index e9bab5e8783..1b5f209cb91 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external lasy file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 1D, for both envelope and central frequency -import glob import os import sys @@ -61,94 +57,61 @@ def gauss_env(T, Z): return E_max * np.real(np.exp(exp_arg)) -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - dt = ds.current_time.to_value() / steps - - z = np.linspace( - ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] - ) - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( - -t_c + ds.current_time.to_value(), z - ) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() - env = abs(hilbert(F_laser)) - - # Plot results - plt.figure(figsize=(8, 8)) - plt.subplot(221) - plt.title("PIC field") - plt.plot(z, F_laser) - plt.subplot(222) - plt.title("PIC envelope") - plt.plot(z, env) - plt.subplot(223) - plt.title("Theory envelope") - plt.plot(z, env_theory) - plt.subplot(224) - plt.title("Difference") - plt.plot(z, env - env_theory) - - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_z = np.fft.fftfreq(F_laser.shape[0], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.abs(freq_z[pos_max[0]]) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - os.system( - "./" + executable + " inputs.1d_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) - - -def main(): - from lasy.laser import Laser - from lasy.profiles import GaussianProfile - - # Create a laser using lasy - pol = (1, 0) - profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) - dim = "xyt" - lo = (-25e-6, -25e-6, -20e-15) - hi = (+25e-6, +25e-6, +20e-15) - npoints = (100, 100, 100) - laser = Laser(dim, lo, hi, npoints, profile) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("gaussianlaser3d") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000251/" - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 251 +ds = yt.load(filename) +dt = ds.current_time.to_value() / steps + +z = np.linspace( + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] +) + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( + -t_c + ds.current_time.to_value(), z +) + +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() +env = abs(hilbert(F_laser)) + +# Plot results +plt.figure(figsize=(8, 8)) +plt.subplot(221) +plt.title("PIC field") +plt.plot(z, F_laser) +plt.subplot(222) +plt.title("PIC envelope") +plt.plot(z, env) +plt.subplot(223) +plt.title("Theory envelope") +plt.plot(z, env_theory) +plt.subplot(224) +plt.title("Difference") +plt.plot(z, env - env_theory) + +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_z = np.fft.fftfreq(F_laser.shape[0], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.abs(freq_z[pos_max[0]]) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py index 279b29f14ce..89c0ea3c57c 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external lasy file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 1D, for both envelope and central frequency -import glob import os import sys @@ -61,99 +57,62 @@ def gauss_env(T, Z): return E_max * np.real(np.exp(exp_arg)) -def do_analysis(fname, compname): - ds = yt.load(fname) - dz = (ds.domain_right_edge[0].v - ds.domain_left_edge[0].v) / ds.domain_dimensions[ - 0 - ] - dt = dz / c - - z = np.linspace( - ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] - ) - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( - -t_c + ds.current_time.to_value(), z - ) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() - env = abs(hilbert(F_laser)) - - # Plot results - plt.figure(figsize=(8, 8)) - plt.subplot(221) - plt.title("PIC field") - plt.plot(z, F_laser) - plt.subplot(222) - plt.title("PIC envelope") - plt.plot(z, env) - plt.subplot(223) - plt.title("Theory envelope") - plt.plot(z, env_theory) - plt.subplot(224) - plt.title("Difference") - plt.plot(z, env - env_theory) - - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_z = np.fft.fftfreq(F_laser.shape[0], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.abs(freq_z[pos_max[0]]) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - os.system( - "./" - + executable - + " inputs.1d_boost_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000001/", "comp_unf.pdf") - - -def main(): - from lasy.laser import Laser - from lasy.profiles import GaussianProfile - - # Create a laser using lasy - pol = (1, 0) - profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) - dim = "xyt" - lo = (-25e-6, -25e-6, -20e-15) - hi = (+25e-6, +25e-6, +20e-15) - npoints = (100, 100, 100) - laser = Laser(dim, lo, hi, npoints, profile) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("gaussianlaser3d") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000001/" - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +filename = sys.argv[1] +compname = "comp_unf.pdf" +ds = yt.load(filename) +dz = (ds.domain_right_edge[0].v - ds.domain_left_edge[0].v) / ds.domain_dimensions[0] +dt = dz / c + +z = np.linspace( + ds.domain_left_edge[0].v, ds.domain_right_edge[0].v, ds.domain_dimensions[0] +) + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), z) + gauss_env( + -t_c + ds.current_time.to_value(), z +) + +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() +env = abs(hilbert(F_laser)) + +# Plot results +plt.figure(figsize=(8, 8)) +plt.subplot(221) +plt.title("PIC field") +plt.plot(z, F_laser) +plt.subplot(222) +plt.title("PIC envelope") +plt.plot(z, env) +plt.subplot(223) +plt.title("Theory envelope") +plt.plot(z, env_theory) +plt.subplot(224) +plt.title("Difference") +plt.plot(z, env - env_theory) + +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_z = np.fft.fftfreq(F_laser.shape[0], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.abs(freq_z[pos_max[0]]) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +# Do the checksum test +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d.py b/Examples/Tests/laser_injection_from_file/analysis_2d.py index 18c178cea15..ab5649e968f 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external lasy file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 2D, for both envelope and central frequency -import glob import os import sys @@ -66,115 +62,82 @@ def gauss_env(T, X, Y, Z): return E_max * np.real(np.exp(exp_arg)) -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - dt = ds.current_time.to_value() / steps - - # Define 3D meshes - x = np.linspace( - ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] - ).v - y = np.linspace( - ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] - ).v - z = np.linspace( - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_dimensions[ds.dimensionality - 1], - ).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( - -t_c + ds.current_time.to_value(), X, Y, Z - ) +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 251 +ds = yt.load(filename) +dt = ds.current_time.to_value() / steps + +# Define 3D meshes +x = np.linspace( + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] +).v +y = np.linspace( + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] +).v +z = np.linspace( + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], +).v +X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z +) - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() - env = abs(hilbert(F_laser)) - extent = [ - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ] - env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] - - # Plot results - plt.figure(figsize=(8, 6)) - plt.subplot(221) - plt.title("PIC field") - plt.imshow(F_laser, extent=extent) - plt.colorbar() - plt.subplot(222) - plt.title("PIC envelope") - plt.imshow(env, extent=extent) - plt.colorbar() - plt.subplot(223) - plt.title("Theory envelope") - plt.imshow(env_theory_slice, extent=extent) - plt.colorbar() - plt.subplot(224) - plt.title("Difference") - plt.imshow(env - env_theory_slice, extent=extent) - plt.colorbar() - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_x = np.fft.fftfreq(F_laser.shape[0], dt) - freq_z = np.fft.fftfreq(F_laser.shape[1], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - os.system( - "./" + executable + " inputs.2d_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) - - -def main(): - from lasy.laser import Laser - from lasy.profiles import GaussianProfile - - # Create a laser using lasy - pol = (1, 0) - profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) - dim = "xyt" - lo = (-25e-6, -25e-6, -20e-15) - hi = (+25e-6, +25e-6, +20e-15) - npoints = (100, 100, 100) - laser = Laser(dim, lo, hi, npoints, profile) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("gaussianlaser3d") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000251/" - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() +env = abs(hilbert(F_laser)) +extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], +] +env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] + +# Plot results +plt.figure(figsize=(8, 6)) +plt.subplot(221) +plt.title("PIC field") +plt.imshow(F_laser, extent=extent) +plt.colorbar() +plt.subplot(222) +plt.title("PIC envelope") +plt.imshow(env, extent=extent) +plt.colorbar() +plt.subplot(223) +plt.title("Theory envelope") +plt.imshow(env_theory_slice, extent=extent) +plt.colorbar() +plt.subplot(224) +plt.title("Difference") +plt.imshow(env - env_theory_slice, extent=extent) +plt.colorbar() +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_x = np.fft.fftfreq(F_laser.shape[0], dt) +freq_z = np.fft.fftfreq(F_laser.shape[1], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py index 44030261732..bcb13bba410 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external binary file. -# -# - Generate an input binary file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external binary file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 2D, for both envelope and central frequency -import glob import os import sys @@ -62,30 +58,6 @@ xcoords = np.linspace(x_l, x_r, x_points) -def gauss(T, X, Y, opt): - """Compute the electric field for a Gaussian laser pulse. - This is used to write the binary input file. - """ - - k0 = 2.0 * np.pi / wavelength - inv_tau2 = 1.0 / tt / tt - osc_phase = k0 * c * (T - t_c) - - diff_factor = 1.0 + 1.0j * foc_dist * 2 / (k0 * w0 * w0) - inv_w_2 = 1.0 / (w0 * w0 * diff_factor) - - pre_fact = np.exp(1.0j * osc_phase) - - if opt == "3d": - pre_fact = pre_fact / diff_factor - else: - pre_fact = pre_fact / np.sqrt(diff_factor) - - exp_arg = -(X * X + Y * Y) * inv_w_2 - inv_tau2 * (T - t_c) * (T - t_c) - - return np.real(pre_fact * np.exp(exp_arg)) - - # Function for the envelope def gauss_env(T, XX, ZZ): """Function to compute the theory for the envelope""" @@ -99,134 +71,80 @@ def gauss_env(T, XX, ZZ): return E_max * np.real(np.exp(exp_arg)) -def write_file(fname, x, y, t, E): - """For a given filename fname, space coordinates x and y, time coordinate t - and field E, write a WarpX-compatible input binary file containing the - profile of the laser pulse. This function should be used in the case - of a uniform spatio-temporal mesh - """ - - with open(fname, "wb") as file: - flag_unif = 1 - file.write(flag_unif.to_bytes(1, byteorder="little")) - file.write((len(t)).to_bytes(4, byteorder="little", signed=False)) - file.write((len(x)).to_bytes(4, byteorder="little", signed=False)) - file.write((len(y)).to_bytes(4, byteorder="little", signed=False)) - file.write(t[0].tobytes()) - file.write(t[-1].tobytes()) - file.write(x[0].tobytes()) - file.write(x[-1].tobytes()) - if len(y) == 1: - file.write(y[0].tobytes()) - else: - file.write(y[0].tobytes()) - file.write(y[-1].tobytes()) - file.write(E.tobytes()) - - -def create_gaussian_2d(): - T, X, Y = np.meshgrid(tcoords, xcoords, np.array([0.0]), indexing="ij") - E_t = gauss(T, X, Y, "2d") - write_file("gauss_2d", xcoords, np.array([0.0]), tcoords, E_t) - - -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - - dt = ds.current_time.to_value() / steps - - # Define 2D meshes - x = np.linspace( - ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] - ).v - z = np.linspace( - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_dimensions[ds.dimensionality - 1], - ).v - X, Z = np.meshgrid(x, z, sparse=False, indexing="ij") - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Z) + gauss_env( - -t_c + ds.current_time.to_value(), X, Z - ) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() - env = abs(hilbert(F_laser)) - extent = [ - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ] - - # Plot results - plt.figure(figsize=(8, 6)) - plt.subplot(221) - plt.title("PIC field") - plt.imshow(F_laser, extent=extent) - plt.colorbar() - plt.subplot(222) - plt.title("PIC envelope") - plt.imshow(env, extent=extent) - plt.colorbar() - plt.subplot(223) - plt.title("Theory envelope") - plt.imshow(env_theory, extent=extent) - plt.colorbar() - plt.subplot(224) - plt.title("Difference") - plt.imshow(env - env_theory, extent=extent) - plt.colorbar() - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fft2(F_laser) - - freq_rows = np.fft.fftfreq(F_laser.shape[0], dt) - freq_cols = np.fft.fftfreq(F_laser.shape[1], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.sqrt((freq_rows[pos_max[0]]) ** 2 + (freq_cols[pos_max[1]] ** 2)) - exp_freq = c / wavelength - - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - create_gaussian_2d() - os.system( - "./" - + executable - + " inputs.2d_test_binary diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000250/", "comp_unf.pdf", 250) - - -def main(): - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000250/" - test_name = "LaserInjectionFromBINARYFile" - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 250 +ds = yt.load(filename) + +dt = ds.current_time.to_value() / steps + +# Define 2D meshes +x = np.linspace( + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] +).v +z = np.linspace( + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], +).v +X, Z = np.meshgrid(x, z, sparse=False, indexing="ij") + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Z +) + +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() +env = abs(hilbert(F_laser)) +extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], +] + +# Plot results +plt.figure(figsize=(8, 6)) +plt.subplot(221) +plt.title("PIC field") +plt.imshow(F_laser, extent=extent) +plt.colorbar() +plt.subplot(222) +plt.title("PIC envelope") +plt.imshow(env, extent=extent) +plt.colorbar() +plt.subplot(223) +plt.title("Theory envelope") +plt.imshow(env_theory, extent=extent) +plt.colorbar() +plt.subplot(224) +plt.title("Difference") +plt.imshow(env - env_theory, extent=extent) +plt.colorbar() +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fft2(F_laser) + +freq_rows = np.fft.fftfreq(F_laser.shape[0], dt) +freq_cols = np.fft.fftfreq(F_laser.shape[1], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.sqrt((freq_rows[pos_max[0]]) ** 2 + (freq_cols[pos_max[1]] ** 2)) +exp_freq = c / wavelength + +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_3d.py b/Examples/Tests/laser_injection_from_file/analysis_3d.py index 59fe2c6ce8a..7d30af28639 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_3d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_3d.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external lasy file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 3D, for both envelope and central frequency -import glob import os import sys @@ -66,123 +62,88 @@ def gauss_env(T, X, Y, Z): return E_max * np.real(np.exp(exp_arg)) -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - dt = ds.current_time.to_value() / steps - - # Define 3D meshes - x = np.linspace( - ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] - ).v - y = np.linspace( - ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] - ).v - z = np.linspace( - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_dimensions[ds.dimensionality - 1], - ).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( - -t_c + ds.current_time.to_value(), X, Y, Z - ) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() - env = abs(hilbert(F_laser)) - extent = [ - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ] - - F_slice = F_laser[:, F_laser.shape[1] // 2, :] - env_slice = env[:, env.shape[1] // 2, :] - env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] - - # Plot results - plt.figure(figsize=(8, 6)) - plt.subplot(221) - plt.title("PIC field") - plt.imshow(F_slice, extent=extent) - plt.colorbar() - plt.subplot(222) - plt.title("PIC envelope") - plt.imshow(env_slice, extent=extent) - plt.colorbar() - plt.subplot(223) - plt.title("Theory envelope") - plt.imshow(env_theory_slice, extent=extent) - plt.colorbar() - plt.subplot(224) - plt.title("Difference") - plt.imshow(env_slice - env_theory_slice, extent=extent) - plt.colorbar() - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_x = np.fft.fftfreq(F_laser.shape[0], dt) - freq_y = np.fft.fftfreq(F_laser.shape[1], dt) - freq_z = np.fft.fftfreq(F_laser.shape[2], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.sqrt( - (freq_x[pos_max[0]]) ** 2 - + (freq_y[pos_max[1]] ** 2) - + (freq_z[pos_max[2]]) ** 2 - ) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 251 +ds = yt.load(filename) +dt = ds.current_time.to_value() / steps + +# Define 3D meshes +x = np.linspace( + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] +).v +y = np.linspace( + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] +).v +z = np.linspace( + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], +).v +X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z +) +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Ey"].v.squeeze() +env = abs(hilbert(F_laser)) +extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], +] + +F_slice = F_laser[:, F_laser.shape[1] // 2, :] +env_slice = env[:, env.shape[1] // 2, :] +env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] + +# Plot results +plt.figure(figsize=(8, 6)) +plt.subplot(221) +plt.title("PIC field") +plt.imshow(F_slice, extent=extent) +plt.colorbar() +plt.subplot(222) +plt.title("PIC envelope") +plt.imshow(env_slice, extent=extent) +plt.colorbar() +plt.subplot(223) +plt.title("Theory envelope") +plt.imshow(env_theory_slice, extent=extent) +plt.colorbar() +plt.subplot(224) +plt.title("Difference") +plt.imshow(env_slice - env_theory_slice, extent=extent) +plt.colorbar() +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_x = np.fft.fftfreq(F_laser.shape[0], dt) +freq_y = np.fft.fftfreq(F_laser.shape[1], dt) +freq_z = np.fft.fftfreq(F_laser.shape[2], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.sqrt( + (freq_x[pos_max[0]]) ** 2 + (freq_y[pos_max[1]] ** 2) + (freq_z[pos_max[2]]) ** 2 +) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold -def launch_analysis(executable): - os.system( - "./" + executable + " inputs.3d_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000251/", "comp_unf.pdf", 251) - - -def main(): - from lasy.laser import Laser - from lasy.profiles import GaussianProfile - - # Create a laser using lasy - pol = (1, 0) - profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) - dim = "xyt" - lo = (-25e-6, -25e-6, -20e-15) - hi = (+25e-6, +25e-6, +20e-15) - npoints = (100, 100, 100) - laser = Laser(dim, lo, hi, npoints, profile) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("gaussianlaser3d") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000251/" - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_RZ.py b/Examples/Tests/laser_injection_from_file/analysis_RZ.py deleted file mode 100755 index 5ebba5b86e2..00000000000 --- a/Examples/Tests/laser_injection_from_file/analysis_RZ.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli -# Remi Lehe, Ilian Kara-Mostefa -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - - -# This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected -# - Compute the theory for laser envelope at time T -# - Compare theory and simulation in RZ, for both envelope and central frequency - -import glob -import os -import sys - -import matplotlib - -matplotlib.use("Agg") -import matplotlib.pyplot as plt -import numpy as np -import yt -from scipy.constants import c, epsilon_0 -from scipy.signal import hilbert - -yt.funcs.mylog.setLevel(50) - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Maximum acceptable error for this test -relative_error_threshold = 0.065 - -# Physical parameters -um = 1.0e-6 -fs = 1.0e-15 - -# Parameters of the gaussian beam -wavelength = 1.0 * um -w0 = 12.0 * um -tt = 10.0 * fs -t_c = 20.0 * fs - -laser_energy = 1.0 -E_max = np.sqrt( - 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) -) - - -# Function for the envelope -def gauss_env(T, X, Y, Z): - # Function to compute the theory for the envelope - inv_tau2 = 1.0 / tt / tt - inv_w_2 = 1.0 / (w0 * w0) - exp_arg = ( - -(X * X) * inv_w_2 - - (Y * Y) * inv_w_2 - - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) - ) - return E_max * np.real(np.exp(exp_arg)) - - -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - dt = ds.current_time.to_value() / steps - - # Define 3D meshes - x = np.linspace( - ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] - ).v - y = np.linspace( - ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] - ).v - z = np.linspace( - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_dimensions[ds.dimensionality - 1], - ).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") - - # Compute the theory for envelope - env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( - -t_c + ds.current_time.to_value(), X, Y, Z - ) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() - env = abs(hilbert(F_laser)) - extent = [ - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ] - - env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] - - # Plot results - plt.figure(figsize=(8, 6)) - plt.subplot(221) - plt.title("PIC field") - plt.imshow(F_laser, extent=extent) - plt.colorbar() - plt.subplot(222) - plt.title("PIC envelope") - plt.imshow(env, extent=extent) - plt.colorbar() - plt.subplot(223) - plt.title("Theory envelope") - plt.imshow(env_theory_slice, extent=extent) - plt.colorbar() - plt.subplot(224) - plt.title("Difference") - plt.imshow(env - env_theory_slice, extent=extent) - plt.colorbar() - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_x = np.fft.fftfreq(F_laser.shape[0], dt) - freq_z = np.fft.fftfreq(F_laser.shape[1], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - os.system( - "./" + executable + " inputs.RZ_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000252/", "comp_unf.pdf", 252) - - -def main(): - from lasy.laser import Laser - from lasy.profiles import GaussianProfile - - # Create a laser using lasy - pol = (1, 0) - profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) - dim = "xyt" - lo = (-25e-6, -25e-6, -20e-15) - hi = (+25e-6, +25e-6, +20e-15) - npoints = (100, 100, 100) - laser = Laser(dim, lo, hi, npoints, profile) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("gaussianlaser3d") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000252/" - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index 8bc0daea481..72575da96b4 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -9,14 +9,10 @@ # This file is part of the WarpX automated test suite. It is used to test the -# injection of a laser pulse from an external lasy file. -# -# - Generate an input lasy file with a Laguerre Gaussian laser pulse. -# - Run the WarpX simulation for time T, when the pulse is fully injected +# injection of a laser pulse from an external lasy file: # - Compute the theory for laser envelope at time T # - Compare theory and simulation in RZ, for both envelope and central frequency -import glob import os import sys @@ -73,126 +69,83 @@ def laguerre_env(T, X, Y, Z, p, m): return E_max * np.real(envelope) -def do_analysis(fname, compname, steps): - ds = yt.load(fname) - dt = ds.current_time.to_value() / steps - - # Define 3D meshes - x = np.linspace( - ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] - ).v - y = np.linspace( - ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] - ).v - z = np.linspace( - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_dimensions[ds.dimensionality - 1], - ).v - X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") - - # Compute the theory for envelope - env_theory = laguerre_env( - +t_c - ds.current_time.to_value(), X, Y, Z, p=0, m=1 - ) + laguerre_env(-t_c + ds.current_time.to_value(), X, Y, Z, p=0, m=1) - - # Read laser field in PIC simulation, and compute envelope - all_data_level_0 = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions - ) - F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() - env = abs(hilbert(F_laser)) - extent = [ - ds.domain_left_edge[ds.dimensionality - 1], - ds.domain_right_edge[ds.dimensionality - 1], - ds.domain_left_edge[0], - ds.domain_right_edge[0], - ] - - env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] - - # Plot results - plt.figure(figsize=(8, 6)) - plt.subplot(221) - plt.title("PIC field") - plt.imshow(F_laser, extent=extent) - plt.colorbar() - plt.subplot(222) - plt.title("PIC envelope") - plt.imshow(env, extent=extent) - plt.colorbar() - plt.subplot(223) - plt.title("Theory envelope") - plt.imshow(env_theory_slice, extent=extent) - plt.colorbar() - plt.subplot(224) - plt.title("Difference") - plt.imshow(env - env_theory_slice, extent=extent) - plt.colorbar() - plt.tight_layout() - plt.savefig(compname, bbox_inches="tight") - - relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) - print("Relative error envelope: ", relative_error_env) - assert relative_error_env < relative_error_threshold - - fft_F_laser = np.fft.fftn(F_laser) - - freq_x = np.fft.fftfreq(F_laser.shape[0], dt) - freq_z = np.fft.fftfreq(F_laser.shape[1], dt) - - pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) - - freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) - exp_freq = c / wavelength - relative_error_freq = np.abs(freq - exp_freq) / exp_freq - print("Relative error frequency: ", relative_error_freq) - assert relative_error_freq < relative_error_threshold - - -def launch_analysis(executable): - os.system( - "./" - + executable - + " inputs.from_RZ_file_test diag1.file_prefix=diags/plotfiles/plt" - ) - do_analysis("diags/plotfiles/plt000612/", "comp_unf.pdf", 612) - - -def main(): - from lasy.laser import Laser - from lasy.profiles import CombinedLongitudinalTransverseProfile - from lasy.profiles.longitudinal import GaussianLongitudinalProfile - from lasy.profiles.transverse import LaguerreGaussianTransverseProfile - - # Create a Laguerre Gaussian laser in RZ geometry using lasy - pol = (1, 0) - profile = CombinedLongitudinalTransverseProfile( - wavelength, - pol, - laser_energy, - GaussianLongitudinalProfile(wavelength, tt, t_peak=0), - LaguerreGaussianTransverseProfile(w0, p=0, m=1), - ) - dim = "rt" - lo = (0e-6, -20e-15) - hi = (+25e-6, +20e-15) - npoints = (100, 100) - laser = Laser(dim, lo, hi, npoints, profile, n_azimuthal_modes=2) - laser.normalize(laser_energy, kind="energy") - laser.write_to_file("laguerrelaserRZ") - executables = glob.glob("*.ex") - if len(executables) == 1: - launch_analysis(executables[0]) - else: - assert False - - # Do the checksum test - filename_end = "diags/plotfiles/plt000612/" - test_name = "LaserInjectionFromRZLASYFile" - checksumAPI.evaluate_checksum(test_name, filename_end) - print("Passed") - - -if __name__ == "__main__": - main() +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 612 +ds = yt.load(filename) +dt = ds.current_time.to_value() / steps + +# Define 3D meshes +x = np.linspace( + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] +).v +y = np.linspace( + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] +).v +z = np.linspace( + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], +).v +X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") + +# Compute the theory for envelope +env_theory = laguerre_env( + +t_c - ds.current_time.to_value(), X, Y, Z, p=0, m=1 +) + laguerre_env(-t_c + ds.current_time.to_value(), X, Y, Z, p=0, m=1) + +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() +env = abs(hilbert(F_laser)) +extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], +] + +env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] + +# Plot results +plt.figure(figsize=(8, 6)) +plt.subplot(221) +plt.title("PIC field") +plt.imshow(F_laser, extent=extent) +plt.colorbar() +plt.subplot(222) +plt.title("PIC envelope") +plt.imshow(env, extent=extent) +plt.colorbar() +plt.subplot(223) +plt.title("Theory envelope") +plt.imshow(env_theory_slice, extent=extent) +plt.colorbar() +plt.subplot(224) +plt.title("Difference") +plt.imshow(env - env_theory_slice, extent=extent) +plt.colorbar() +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_x = np.fft.fftfreq(F_laser.shape[0], dt) +freq_z = np.fft.fftfreq(F_laser.shape[1], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/analysis_rz.py b/Examples/Tests/laser_injection_from_file/analysis_rz.py new file mode 100755 index 00000000000..90e392bcf25 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/analysis_rz.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Compute the theory for laser envelope at time T +# - Compare theory and simulation in RZ, for both envelope and central frequency + +import os +import sys + +import matplotlib + +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import yt +from scipy.constants import c, epsilon_0 +from scipy.signal import hilbert + +yt.funcs.mylog.setLevel(50) + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +import checksumAPI + +# Maximum acceptable error for this test +relative_error_threshold = 0.065 + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +laser_energy = 1.0 +E_max = np.sqrt( + 2 * (2 / np.pi) ** (3 / 2) * laser_energy / (epsilon_0 * w0**2 * c * tt) +) + + +# Function for the envelope +def gauss_env(T, X, Y, Z): + # Function to compute the theory for the envelope + inv_tau2 = 1.0 / tt / tt + inv_w_2 = 1.0 / (w0 * w0) + exp_arg = ( + -(X * X) * inv_w_2 + - (Y * Y) * inv_w_2 + - inv_tau2 / c / c * (Z - T * c) * (Z - T * c) + ) + return E_max * np.real(np.exp(exp_arg)) + + +filename = sys.argv[1] +compname = "comp_unf.pdf" +steps = 252 +ds = yt.load(filename) +dt = ds.current_time.to_value() / steps + +# Define 3D meshes +x = np.linspace( + ds.domain_left_edge[0], ds.domain_right_edge[0], ds.domain_dimensions[0] +).v +y = np.linspace( + ds.domain_left_edge[1], ds.domain_right_edge[1], ds.domain_dimensions[1] +).v +z = np.linspace( + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_dimensions[ds.dimensionality - 1], +).v +X, Y, Z = np.meshgrid(x, y, z, sparse=False, indexing="ij") + +# Compute the theory for envelope +env_theory = gauss_env(+t_c - ds.current_time.to_value(), X, Y, Z) + gauss_env( + -t_c + ds.current_time.to_value(), X, Y, Z +) + +# Read laser field in PIC simulation, and compute envelope +all_data_level_0 = ds.covering_grid( + level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions +) +F_laser = all_data_level_0["boxlib", "Et"].v.squeeze() +env = abs(hilbert(F_laser)) +extent = [ + ds.domain_left_edge[ds.dimensionality - 1], + ds.domain_right_edge[ds.dimensionality - 1], + ds.domain_left_edge[0], + ds.domain_right_edge[0], +] + +env_theory_slice = env_theory[:, env_theory.shape[1] // 2, :] + +# Plot results +plt.figure(figsize=(8, 6)) +plt.subplot(221) +plt.title("PIC field") +plt.imshow(F_laser, extent=extent) +plt.colorbar() +plt.subplot(222) +plt.title("PIC envelope") +plt.imshow(env, extent=extent) +plt.colorbar() +plt.subplot(223) +plt.title("Theory envelope") +plt.imshow(env_theory_slice, extent=extent) +plt.colorbar() +plt.subplot(224) +plt.title("Difference") +plt.imshow(env - env_theory_slice, extent=extent) +plt.colorbar() +plt.tight_layout() +plt.savefig(compname, bbox_inches="tight") + +relative_error_env = np.sum(np.abs(env - env_theory_slice)) / np.sum(np.abs(env)) +print("Relative error envelope: ", relative_error_env) +assert relative_error_env < relative_error_threshold + +fft_F_laser = np.fft.fftn(F_laser) + +freq_x = np.fft.fftfreq(F_laser.shape[0], dt) +freq_z = np.fft.fftfreq(F_laser.shape[1], dt) + +pos_max = np.unravel_index(np.abs(fft_F_laser).argmax(), fft_F_laser.shape) + +freq = np.sqrt((freq_x[pos_max[0]]) ** 2 + (freq_z[pos_max[1]]) ** 2) +exp_freq = c / wavelength +relative_error_freq = np.abs(freq - exp_freq) / exp_freq +print("Relative error frequency: ", relative_error_freq) +assert relative_error_freq < relative_error_threshold + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/laser_injection_from_file/inputs.1d_test b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.1d_test rename to Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file index 6c392883418..1510a8df4c3 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.1d_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file @@ -40,7 +40,7 @@ lasy_laser.e_max = 1.e14 # Maximum amplitude of the laser field (i lasy_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_laser.profile = from_file lasy_laser.time_chunk_size = 50 -lasy_laser.lasy_file_name = "diags/gaussianlaser3d_00000.h5" +lasy_laser.lasy_file_name = "../test_1d_laser_injection_from_lasy_file_prepare/diags/gaussian_laser_3d_00000.h5" lasy_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs.1d_boost_test b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.1d_boost_test rename to Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost index ffc1865ee0f..d118ce85ae5 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.1d_boost_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost @@ -47,7 +47,7 @@ lasy_laser.e_max = 1.e14 # Maximum amplitude of the laser field (i lasy_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_laser.profile = from_file lasy_laser.time_chunk_size = 50 -lasy_laser.lasy_file_name = "diags/gaussianlaser3d_00000.h5" +lasy_laser.lasy_file_name = "../test_1d_laser_injection_from_lasy_file_boost_prepare/diags/gaussian_laser_3d_00000.h5" lasy_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py new file mode 100755 index 00000000000..f71b87d5fc8 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import GaussianProfile + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +# Create a laser using lasy +profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) +dim = "xyt" +lo = (-25e-6, -25e-6, -20e-15) +hi = (+25e-6, +25e-6, +20e-15) +npoints = (100, 100, 100) +laser = Laser(dim, lo, hi, npoints, profile) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("gaussian_laser_3d") diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_prepare.py new file mode 100755 index 00000000000..902b0c47210 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_1d_laser_injection_from_lasy_file_prepare.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import GaussianProfile + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs + +# Create a laser using lasy +profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) +dim = "xyt" +lo = (-25e-6, -25e-6, -20e-15) +hi = (+25e-6, +25e-6, +20e-15) +npoints = (100, 100, 100) +laser = Laser(dim, lo, hi, npoints, profile) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("gaussian_laser_3d") diff --git a/Examples/Tests/laser_injection_from_file/inputs.2d_test_binary b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file similarity index 94% rename from Examples/Tests/laser_injection_from_file/inputs.2d_test_binary rename to Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file index fdb19f406ca..022da5b0e29 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.2d_test_binary +++ b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file @@ -40,7 +40,7 @@ binary_laser.polarization = 0. 1. 0. # The main polarization vector binary_laser.e_max = 1.e12 # Maximum amplitude of the laser field (in V/m) binary_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) binary_laser.profile = from_file -binary_laser.binary_file_name = "gauss_2d" +binary_laser.binary_file_name = "../test_2d_laser_injection_from_binary_file_prepare/gauss_2d" binary_laser.time_chunk_size = 50 binary_laser.delay = 0.0 diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file_prepare.py new file mode 100755 index 00000000000..d8fe2236ae0 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_binary_file_prepare.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external binary file: +# - Generate an input binary file with a gaussian laser pulse. + +import sys + +import matplotlib + +matplotlib.use("Agg") +import numpy as np +import yt + +yt.funcs.mylog.setLevel(50) + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") + +# Maximum acceptable error for this test +relative_error_threshold = 0.065 + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 +c = 299792458 + +# Parameters of the gaussian beam +wavelength = 1.0 * um +w0 = 6.0 * um +tt = 10.0 * fs +x_c = 0.0 * um +t_c = 20.0 * fs +foc_dist = 10 * um +E_max = 1e12 +rot_angle = -np.pi / 4.0 + +# Parameters of the tx grid +x_l = -12.0 * um +x_r = 12.0 * um +x_points = 480 +t_l = 0.0 * fs +t_r = 40.0 * fs +t_points = 400 +tcoords = np.linspace(t_l, t_r, t_points) +xcoords = np.linspace(x_l, x_r, x_points) + + +def gauss(T, X, Y, opt): + """Compute the electric field for a Gaussian laser pulse. + This is used to write the binary input file. + """ + + k0 = 2.0 * np.pi / wavelength + inv_tau2 = 1.0 / tt / tt + osc_phase = k0 * c * (T - t_c) + + diff_factor = 1.0 + 1.0j * foc_dist * 2 / (k0 * w0 * w0) + inv_w_2 = 1.0 / (w0 * w0 * diff_factor) + + pre_fact = np.exp(1.0j * osc_phase) + + if opt == "3d": + pre_fact = pre_fact / diff_factor + else: + pre_fact = pre_fact / np.sqrt(diff_factor) + + exp_arg = -(X * X + Y * Y) * inv_w_2 - inv_tau2 * (T - t_c) * (T - t_c) + + return np.real(pre_fact * np.exp(exp_arg)) + + +def write_file(fname, x, y, t, E): + """For a given filename fname, space coordinates x and y, time coordinate t + and field E, write a WarpX-compatible input binary file containing the + profile of the laser pulse. This function should be used in the case + of a uniform spatio-temporal mesh + """ + + with open(fname, "wb") as file: + flag_unif = 1 + file.write(flag_unif.to_bytes(1, byteorder="little")) + file.write((len(t)).to_bytes(4, byteorder="little", signed=False)) + file.write((len(x)).to_bytes(4, byteorder="little", signed=False)) + file.write((len(y)).to_bytes(4, byteorder="little", signed=False)) + file.write(t[0].tobytes()) + file.write(t[-1].tobytes()) + file.write(x[0].tobytes()) + file.write(x[-1].tobytes()) + if len(y) == 1: + file.write(y[0].tobytes()) + else: + file.write(y[0].tobytes()) + file.write(y[-1].tobytes()) + file.write(E.tobytes()) + + +T, X, Y = np.meshgrid(tcoords, xcoords, np.array([0.0]), indexing="ij") +E_t = gauss(T, X, Y, "2d") +write_file("gauss_2d", xcoords, np.array([0.0]), tcoords, E_t) diff --git a/Examples/Tests/laser_injection_from_file/inputs.2d_test b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.2d_test rename to Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file index e5814471753..f6a0693a5bd 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.2d_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file @@ -40,7 +40,7 @@ lasy_laser.e_max = 1.e14 # Maximum amplitude of the laser field (i lasy_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_laser.profile = from_file lasy_laser.time_chunk_size = 50 -lasy_laser.lasy_file_name = "diags/gaussianlaser3d_00000.h5" +lasy_laser.lasy_file_name = "../test_2d_laser_injection_from_lasy_file_prepare/diags/gaussian_laser_3d_00000.h5" lasy_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file_prepare.py new file mode 100755 index 00000000000..f71b87d5fc8 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_2d_laser_injection_from_lasy_file_prepare.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import GaussianProfile + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +# Create a laser using lasy +profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) +dim = "xyt" +lo = (-25e-6, -25e-6, -20e-15) +hi = (+25e-6, +25e-6, +20e-15) +npoints = (100, 100, 100) +laser = Laser(dim, lo, hi, npoints, profile) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("gaussian_laser_3d") diff --git a/Examples/Tests/laser_injection_from_file/inputs.3d_test b/Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.3d_test rename to Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file index ad8159cb650..534ea729886 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.3d_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file @@ -40,7 +40,7 @@ lasy_laser.e_max = 1.e14 # Maximum amplitude of the laser field (i lasy_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_laser.profile = from_file lasy_laser.time_chunk_size = 50 -lasy_laser.lasy_file_name = "diags/gaussianlaser3d_00000.h5" +lasy_laser.lasy_file_name = "../test_3d_laser_injection_from_lasy_file_prepare/diags/gaussian_laser_3d_00000.h5" lasy_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file_prepare.py new file mode 100755 index 00000000000..410dcd2c36e --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_3d_laser_injection_from_lasy_file_prepare.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import GaussianProfile + +# Maximum acceptable error for this test +relative_error_threshold = 0.065 + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +# Create a laser using lasy +profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) +dim = "xyt" +lo = (-25e-6, -25e-6, -20e-15) +hi = (+25e-6, +25e-6, +20e-15) +npoints = (100, 100, 100) +laser = Laser(dim, lo, hi, npoints, profile) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("gaussian_laser_3d") diff --git a/Examples/Tests/laser_injection_from_file/inputs.from_RZ_file_test b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.from_RZ_file_test rename to Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file index f92440188b7..a4c87d244fc 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.from_RZ_file_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file @@ -40,7 +40,7 @@ lasy_RZ_laser.e_max = 1.e14 # Maximum amplitude of the laser field lasy_RZ_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_RZ_laser.profile = from_file lasy_RZ_laser.time_chunk_size = 50 -lasy_RZ_laser.lasy_file_name = "diags/laguerrelaserRZ_00000.h5" +lasy_RZ_laser.lasy_file_name = "../test_rz_laser_injection_from_RZ_lasy_file_prepare/diags/laguerre_laser_RZ_00000.h5" lasy_RZ_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py new file mode 100755 index 00000000000..1a1dfabe86e --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a Laguerre Gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import CombinedLongitudinalTransverseProfile +from lasy.profiles.longitudinal import GaussianLongitudinalProfile +from lasy.profiles.transverse import LaguerreGaussianTransverseProfile + +# Maximum acceptable error for this test +relative_error_threshold = 0.065 + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Laguerre Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +# Create a Laguerre Gaussian laser in RZ geometry using lasy +profile = CombinedLongitudinalTransverseProfile( + wavelength, + pol, + laser_energy, + GaussianLongitudinalProfile(wavelength, tt, t_peak=0), + LaguerreGaussianTransverseProfile(w0, p=0, m=1), +) +dim = "rt" +lo = (0e-6, -20e-15) +hi = (+25e-6, +20e-15) +npoints = (100, 100) +laser = Laser(dim, lo, hi, npoints, profile, n_azimuthal_modes=2) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("laguerre_laser_RZ") diff --git a/Examples/Tests/laser_injection_from_file/inputs.RZ_test b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file similarity index 93% rename from Examples/Tests/laser_injection_from_file/inputs.RZ_test rename to Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file index 2a539883fec..cc7100afb9d 100644 --- a/Examples/Tests/laser_injection_from_file/inputs.RZ_test +++ b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file @@ -41,7 +41,7 @@ lasy_laser.e_max = 1.e14 # Maximum amplitude of the laser field (i lasy_laser.wavelength = 1.0e-6 # The wavelength of the laser (in meters) lasy_laser.profile = from_file lasy_laser.time_chunk_size = 50 -lasy_laser.lasy_file_name = "diags/gaussianlaser3d_00000.h5" +lasy_laser.lasy_file_name = "../test_rz_laser_injection_from_lasy_file_prepare/diags/gaussian_laser_3d_00000.h5" lasy_laser.delay = 0.0 # Diagnostics diff --git a/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file_prepare.py b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file_prepare.py new file mode 100755 index 00000000000..410dcd2c36e --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/inputs_test_rz_laser_injection_from_lasy_file_prepare.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Andrew Myers, Axel Huebl, Luca Fedeli +# Remi Lehe, Ilian Kara-Mostefa +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +# This file is part of the WarpX automated test suite. It is used to test the +# injection of a laser pulse from an external lasy file: +# - Generate an input lasy file with a gaussian laser pulse. + +from lasy.laser import Laser +from lasy.profiles import GaussianProfile + +# Maximum acceptable error for this test +relative_error_threshold = 0.065 + +# Physical parameters +um = 1.0e-6 +fs = 1.0e-15 + +# Parameters of the Gaussian beam +wavelength = 1.0 * um +pol = (1, 0) +laser_energy = 1.0 +w0 = 12.0 * um +tt = 10.0 * fs +t_c = 20.0 * fs + +# Create a laser using lasy +profile = GaussianProfile(wavelength, pol, laser_energy, w0, tt, t_peak=0) +dim = "xyt" +lo = (-25e-6, -25e-6, -20e-15) +hi = (+25e-6, +25e-6, +20e-15) +npoints = (100, 100, 100) +laser = Laser(dim, lo, hi, npoints, profile) +laser.normalize(laser_energy, kind="energy") +laser.write_to_file("gaussian_laser_3d") diff --git a/Examples/Tests/laser_on_fine/CMakeLists.txt b/Examples/Tests/laser_on_fine/CMakeLists.txt new file mode 100644 index 00000000000..794d5e68c66 --- /dev/null +++ b/Examples/Tests/laser_on_fine/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_laser_on_fine # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_laser_on_fine # inputs + analysis_default_regression.py # analysis + diags/diag1000050 # output + OFF # dependency +) diff --git a/Examples/Tests/laser_on_fine/analysis_default_regression.py b/Examples/Tests/laser_on_fine/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/laser_on_fine/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/laser_on_fine/inputs_2d b/Examples/Tests/laser_on_fine/inputs_test_2d_laser_on_fine similarity index 99% rename from Examples/Tests/laser_on_fine/inputs_2d rename to Examples/Tests/laser_on_fine/inputs_test_2d_laser_on_fine index 1ffbfcb7e09..61c10e81f3e 100644 --- a/Examples/Tests/laser_on_fine/inputs_2d +++ b/Examples/Tests/laser_on_fine/inputs_test_2d_laser_on_fine @@ -1,5 +1,5 @@ # Maximum number of time steps -max_step = 500 +max_step = 50 # number of grid points amr.n_cell = 64 64 diff --git a/Examples/Tests/load_external_field/CMakeLists.txt b/Examples/Tests/load_external_field/CMakeLists.txt new file mode 100644 index 00000000000..93b0a1436be --- /dev/null +++ b/Examples/Tests/load_external_field/CMakeLists.txt @@ -0,0 +1,68 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_load_external_field_grid_picmi # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_load_external_field_grid_picmi.py # inputs + analysis_3d.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_3d_load_external_field_particle_picmi # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_load_external_field_particle_picmi.py # inputs + analysis_3d.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_rz_load_external_field_grid # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_load_external_field_grid # inputs + analysis_rz.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_rz_load_external_field_grid_restart # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_load_external_field_grid_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000300 # output + test_rz_load_external_field_grid # dependency +) + +add_warpx_test( + test_rz_load_external_field_particles # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_load_external_field_particles # inputs + analysis_rz.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_rz_load_external_field_particles_restart # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_load_external_field_particles_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000300 # output + test_rz_load_external_field_particles # dependency +) diff --git a/Examples/Tests/LoadExternalField/analysis_3d.py b/Examples/Tests/load_external_field/analysis_3d.py similarity index 100% rename from Examples/Tests/LoadExternalField/analysis_3d.py rename to Examples/Tests/load_external_field/analysis_3d.py diff --git a/Examples/Tests/load_external_field/analysis_default_restart.py b/Examples/Tests/load_external_field/analysis_default_restart.py new file mode 120000 index 00000000000..0459986eebc --- /dev/null +++ b/Examples/Tests/load_external_field/analysis_default_restart.py @@ -0,0 +1 @@ +../../analysis_default_restart.py \ No newline at end of file diff --git a/Examples/Tests/LoadExternalField/analysis_rz.py b/Examples/Tests/load_external_field/analysis_rz.py similarity index 100% rename from Examples/Tests/LoadExternalField/analysis_rz.py rename to Examples/Tests/load_external_field/analysis_rz.py diff --git a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py similarity index 95% rename from Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py rename to Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py index f71bb171ee3..231552d088e 100644 --- a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py +++ b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py @@ -94,16 +94,12 @@ period=300, species=[ions], data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], - write_dir=".", - warpx_file_prefix="Python_LoadExternalGridField3D_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=300, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_LoadExternalGridField3D_plt", ) ################################# diff --git a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py similarity index 95% rename from Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py rename to Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py index 90b1ac78474..c2ec6c1a5b7 100644 --- a/Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py +++ b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py @@ -94,16 +94,12 @@ period=300, species=[ions], data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], - write_dir=".", - warpx_file_prefix="Python_LoadExternalParticleField3D_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=300, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_LoadExternalParticleField3D_plt", ) ################################# diff --git a/Examples/Tests/LoadExternalField/inputs_rz_grid_fields b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid similarity index 97% rename from Examples/Tests/LoadExternalField/inputs_rz_grid_fields rename to Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid index 2e22ca299ea..f986add7bf5 100644 --- a/Examples/Tests/LoadExternalField/inputs_rz_grid_fields +++ b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid @@ -1,3 +1,4 @@ +warpx.abort_on_warning_threshold = medium warpx.serialize_initial_conditions = 0 warpx.do_dynamic_scheduling = 0 particles.do_tiling = 0 diff --git a/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid_restart b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid_restart new file mode 100644 index 00000000000..ed31e697e25 --- /dev/null +++ b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_grid_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_rz_load_external_field_grid + +# test input parameters +amr.restart = "../test_rz_load_external_field_grid/diags/chk000150" diff --git a/Examples/Tests/LoadExternalField/inputs_rz_particle_fields b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles similarity index 97% rename from Examples/Tests/LoadExternalField/inputs_rz_particle_fields rename to Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles index b76d4cb7efc..e725ed588b0 100644 --- a/Examples/Tests/LoadExternalField/inputs_rz_particle_fields +++ b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles @@ -1,3 +1,4 @@ +warpx.abort_on_warning_threshold = medium warpx.serialize_initial_conditions = 0 warpx.do_dynamic_scheduling = 0 particles.do_tiling = 0 diff --git a/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles_restart b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles_restart new file mode 100644 index 00000000000..7e20f87d6d2 --- /dev/null +++ b/Examples/Tests/load_external_field/inputs_test_rz_load_external_field_particles_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_rz_load_external_field_particles + +# test input parameters +amr.restart = "../test_rz_load_external_field_particles/diags/chk000150" diff --git a/Examples/Tests/magnetostatic_eb/CMakeLists.txt b/Examples/Tests/magnetostatic_eb/CMakeLists.txt new file mode 100644 index 00000000000..db97a6e11c2 --- /dev/null +++ b/Examples/Tests/magnetostatic_eb/CMakeLists.txt @@ -0,0 +1,41 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_magnetostatic_eb # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_magnetostatic_eb # inputs + analysis_default_regression.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_magnetostatic_eb_picmi # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_magnetostatic_eb_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_magnetostatic_eb_picmi # name + RZ # dims + 1 # nprocs + ON # eb + inputs_test_rz_magnetostatic_eb_picmi.py # inputs + analysis_rz.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/magnetostatic_eb/analysis_default_regression.py b/Examples/Tests/magnetostatic_eb/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/magnetostatic_eb/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/magnetostatic_eb/inputs_3d b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb similarity index 96% rename from Examples/Tests/magnetostatic_eb/inputs_3d rename to Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb index 60617f7d392..73ff14dc8a4 100644 --- a/Examples/Tests/magnetostatic_eb/inputs_3d +++ b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb @@ -48,4 +48,3 @@ diag1.diag_type = Full diag1.format = plotfile diag1.intervals = 1 diag1.fields_to_plot = Az Bx By Ex Ey jz phi rho -diag1.file_prefix = ./magnetostatic_eb_3d_mr_plt diff --git a/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py similarity index 97% rename from Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py rename to Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py index 6a3fe9e2988..d3c35daf261 100755 --- a/Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py @@ -122,8 +122,6 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=1, - write_dir=".", - warpx_file_prefix="Python_magnetostatic_eb_3d_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", @@ -145,8 +143,6 @@ "phi", "rho", ], - write_dir=".", - warpx_file_prefix="Python_magnetostatic_eb_3d_plt", ) ########################## diff --git a/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py similarity index 97% rename from Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py rename to Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py index 0ccf4460dfe..d0f1787a5a2 100755 --- a/Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py @@ -122,16 +122,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=1, - write_dir=".", - warpx_file_prefix="Python_magnetostatic_eb_rz_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=1, data_list=["Er", "Bt", "Az", "Jz", "phi", "rho"], - write_dir=".", - warpx_file_prefix="Python_magnetostatic_eb_rz_plt", ) ########################## diff --git a/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt new file mode 100644 index 00000000000..9e315b7536d --- /dev/null +++ b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_FFT) + add_warpx_test( + test_2d_maxwell_hybrid_qed_solver # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_maxwell_hybrid_qed_solver # inputs + analysis.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py b/Examples/Tests/maxwell_hybrid_qed/analysis.py similarity index 100% rename from Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py rename to Examples/Tests/maxwell_hybrid_qed/analysis.py diff --git a/Examples/Tests/maxwell_hybrid_qed/inputs_2d b/Examples/Tests/maxwell_hybrid_qed/inputs_test_2d_maxwell_hybrid_qed_solver similarity index 98% rename from Examples/Tests/maxwell_hybrid_qed/inputs_2d rename to Examples/Tests/maxwell_hybrid_qed/inputs_test_2d_maxwell_hybrid_qed_solver index 2baa72c0990..8e1091c91e2 100644 --- a/Examples/Tests/maxwell_hybrid_qed/inputs_2d +++ b/Examples/Tests/maxwell_hybrid_qed/inputs_test_2d_maxwell_hybrid_qed_solver @@ -24,7 +24,7 @@ boundary.field_hi = periodic periodic algo.maxwell_solver = psatd warpx.verbose = 0 warpx.use_filter = 1 -warpx.cfl = 1. +warpx.cfl = 0.7071067811865475 warpx.use_hybrid_QED = 1 ################################# diff --git a/Examples/Tests/nci_fdtd_stability/CMakeLists.txt b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt new file mode 100644 index 00000000000..73d0f38beec --- /dev/null +++ b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_nci_corrector # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_nci_corrector # inputs + analysis_ncicorr.py # analysis + diags/diag1000600 # output + OFF # dependency +) + +add_warpx_test( + test_2d_nci_corrector_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_nci_corrector_mr # inputs + analysis_ncicorr.py # analysis + diags/diag1000600 # output + OFF # dependency +) diff --git a/Examples/Tests/nci_fdtd_stability/inputs_2d b/Examples/Tests/nci_fdtd_stability/inputs_base_2d similarity index 100% rename from Examples/Tests/nci_fdtd_stability/inputs_2d rename to Examples/Tests/nci_fdtd_stability/inputs_base_2d diff --git a/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector b/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector new file mode 100644 index 00000000000..83d537fd856 --- /dev/null +++ b/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +amr.max_level = 0 +particles.use_fdtd_nci_corr = 1 diff --git a/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector_mr b/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector_mr new file mode 100644 index 00000000000..0f53af0443a --- /dev/null +++ b/Examples/Tests/nci_fdtd_stability/inputs_test_2d_nci_corrector_mr @@ -0,0 +1,9 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +amr.max_level = 1 +amr.n_cell = 64 64 +particles.use_fdtd_nci_corr = 1 +warpx.fine_tag_hi = 20.e-6 20.e-6 +warpx.fine_tag_lo = -20.e-6 -20.e-6 diff --git a/Examples/Tests/nci_psatd_stability/CMakeLists.txt b/Examples/Tests/nci_psatd_stability/CMakeLists.txt new file mode 100644 index 00000000000..6a27abdc783 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/CMakeLists.txt @@ -0,0 +1,223 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_FFT) + add_warpx_test( + test_2d_averaged_galilean_psatd # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_averaged_galilean_psatd # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_averaged_galilean_psatd_hybrid # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_averaged_galilean_psatd_hybrid # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_comoving_psatd_hybrid # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_comoving_psatd_hybrid # inputs + analysis_default_regression.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_galilean_psatd # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_galilean_psatd # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_galilean_psatd_current_correction # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_galilean_psatd_current_correction # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_galilean_psatd_current_correction_psb # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_galilean_psatd_current_correction_psb # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_galilean_psatd_hybrid # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_galilean_psatd_hybrid # inputs + analysis_default_regression.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_averaged_galilean_psatd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_averaged_galilean_psatd # inputs + analysis_galilean.py # analysis + diags/diag1000160 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_averaged_galilean_psatd_hybrid # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_averaged_galilean_psatd_hybrid # inputs + analysis_galilean.py # analysis + diags/diag1000160 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_galilean_psatd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_galilean_psatd # inputs + analysis_galilean.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_galilean_psatd_current_correction # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_galilean_psatd_current_correction # inputs + analysis_galilean.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_galilean_psatd_current_correction_psb # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_galilean_psatd_current_correction_psb # inputs + analysis_galilean.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_uniform_plasma_multiJ # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_uniform_plasma_multiJ # inputs + analysis_multiJ.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_galilean_psatd # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_galilean_psatd # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_galilean_psatd_current_correction # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_galilean_psatd_current_correction # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_galilean_psatd_current_correction_psb # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_galilean_psatd_current_correction_psb # inputs + analysis_galilean.py # analysis + diags/diag1000400 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_multiJ_psatd # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_multiJ_psatd # inputs + analysis_default_regression.py # analysis + diags/diag1000050 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/nci_psatd_stability/analysis_default_regression.py b/Examples/Tests/nci_psatd_stability/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/nci_psatd_stability/inputs_2d b/Examples/Tests/nci_psatd_stability/inputs_base_2d similarity index 100% rename from Examples/Tests/nci_psatd_stability/inputs_2d rename to Examples/Tests/nci_psatd_stability/inputs_base_2d diff --git a/Examples/Tests/nci_psatd_stability/inputs_avg_2d b/Examples/Tests/nci_psatd_stability/inputs_base_2d_averaged similarity index 100% rename from Examples/Tests/nci_psatd_stability/inputs_avg_2d rename to Examples/Tests/nci_psatd_stability/inputs_base_2d_averaged diff --git a/Examples/Tests/nci_psatd_stability/inputs_3d b/Examples/Tests/nci_psatd_stability/inputs_base_3d similarity index 100% rename from Examples/Tests/nci_psatd_stability/inputs_3d rename to Examples/Tests/nci_psatd_stability/inputs_base_3d diff --git a/Examples/Tests/nci_psatd_stability/inputs_avg_3d b/Examples/Tests/nci_psatd_stability/inputs_base_3d_averaged similarity index 100% rename from Examples/Tests/nci_psatd_stability/inputs_avg_3d rename to Examples/Tests/nci_psatd_stability/inputs_base_3d_averaged diff --git a/Examples/Tests/nci_psatd_stability/inputs_rz b/Examples/Tests/nci_psatd_stability/inputs_base_rz similarity index 100% rename from Examples/Tests/nci_psatd_stability/inputs_rz rename to Examples/Tests/nci_psatd_stability/inputs_base_rz diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd new file mode 100644 index 00000000000..62f93dbd473 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_2d_averaged + +# test input parameters +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd_hybrid b/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd_hybrid new file mode 100644 index 00000000000..0ef3668b103 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_averaged_galilean_psatd_hybrid @@ -0,0 +1,9 @@ +# base input parameters +FILE = inputs_base_2d_averaged + +# test input parameters +amr.max_grid_size_x = 128 +amr.max_grid_size_y = 64 +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.grid_type = hybrid diff --git a/Examples/Tests/comoving/inputs_2d_hybrid b/Examples/Tests/nci_psatd_stability/inputs_test_2d_comoving_psatd_hybrid similarity index 97% rename from Examples/Tests/comoving/inputs_2d_hybrid rename to Examples/Tests/nci_psatd_stability/inputs_test_2d_comoving_psatd_hybrid index 393e18d2077..32b155cf0b6 100644 --- a/Examples/Tests/comoving/inputs_2d_hybrid +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_comoving_psatd_hybrid @@ -23,6 +23,7 @@ algo.particle_pusher = vay algo.particle_shape = 3 psatd.use_default_v_comoving = 1 +psatd.current_correction = 0 warpx.cfl = 1. @@ -40,6 +41,7 @@ warpx.use_filter = 1 warpx.serialize_initial_conditions = 1 warpx.verbose = 1 +warpx.abort_on_warning_threshold = medium particles.species_names = electrons ions beam particles.use_fdtd_nci_corr = 0 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd new file mode 100644 index 00000000000..caebf987434 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_2d + +# test input paramters +algo.current_deposition = direct +psatd.current_correction = 0 +warpx.grid_type = collocated +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction new file mode 100644 index 00000000000..177cf7bcd0c --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_2d + +# test input paramters +amr.blocking_factor = 64 +amr.max_grid_size = 64 +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 0 +psatd.update_with_rho = 0 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction_psb b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction_psb new file mode 100644 index 00000000000..437059d6bd8 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_current_correction_psb @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_2d + +# test input paramters +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +psatd.update_with_rho = 0 diff --git a/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_hybrid similarity index 97% rename from Examples/Tests/nci_psatd_stability/inputs_2d_hybrid rename to Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_hybrid index 90dfd58c4ae..501c964353f 100644 --- a/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid +++ b/Examples/Tests/nci_psatd_stability/inputs_test_2d_galilean_psatd_hybrid @@ -21,6 +21,7 @@ algo.particle_pusher = vay algo.particle_shape = 3 psatd.use_default_v_galilean = 1 +psatd.current_correction = 0 warpx.cfl = 1. @@ -38,6 +39,7 @@ warpx.use_filter = 1 warpx.serialize_initial_conditions = 1 warpx.verbose = 1 +warpx.abort_on_warning_threshold = medium particles.species_names = electrons ions beam particles.use_fdtd_nci_corr = 0 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd new file mode 100644 index 00000000000..7c978874145 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d_averaged + +# test input parameters +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd_hybrid b/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd_hybrid new file mode 100644 index 00000000000..4996f476854 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_averaged_galilean_psatd_hybrid @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_3d_averaged + +# test input parameters +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium +warpx.grid_type = hybrid diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd new file mode 100644 index 00000000000..3ec82981aea --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +psatd.current_correction = 0 +psatd.v_galilean = 0. 0. 0.99498743710662 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction new file mode 100644 index 00000000000..8b596c9a633 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 0 +psatd.update_with_rho = 0 +psatd.v_galilean = 0. 0. 0.99498743710662 +warpx.numprocs = 1 1 2 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction_psb b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction_psb new file mode 100644 index 00000000000..87ce1b7ed92 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_galilean_psatd_current_correction_psb @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 +psatd.v_galilean = 0. 0. 0.99498743710662 +psatd.update_with_rho = 0 +warpx.numprocs = 1 1 1 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_3d_uniform_plasma_multiJ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_uniform_plasma_multiJ new file mode 100644 index 00000000000..70e9c5e992c --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_3d_uniform_plasma_multiJ @@ -0,0 +1,13 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +diag1.fields_to_plot = Bx By Bz divE Ex Ey Ez F G jx jy jz rho +psatd.J_in_time = constant +psatd.rho_in_time = constant +psatd.solution_type = first-order +warpx.abort_on_warning_threshold = medium +warpx.do_divb_cleaning = 1 +warpx.do_dive_cleaning = 1 +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 1 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd new file mode 100644 index 00000000000..30bcfc160cf --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_rz + +# test input paramters +electrons.random_theta = 0 +ions.random_theta = 0 +psatd.current_correction = 0 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction new file mode 100644 index 00000000000..378535e12bc --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction @@ -0,0 +1,10 @@ +# base input parameters +FILE = inputs_base_rz + +# test input paramters +amr.blocking_factor = 32 +amr.max_grid_size = 32 +electrons.random_theta = 0 +ions.random_theta = 0 +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 0 diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction_psb b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction_psb new file mode 100644 index 00000000000..6eb35754a90 --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/inputs_test_rz_galilean_psatd_current_correction_psb @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_rz + +# test input paramters +electrons.random_theta = 0 +ions.random_theta = 0 +psatd.current_correction = 1 +psatd.periodic_single_box_fft = 1 diff --git a/Examples/Tests/multi_j/inputs_rz b/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd similarity index 97% rename from Examples/Tests/multi_j/inputs_rz rename to Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd index dd440d60667..5e263856256 100644 --- a/Examples/Tests/multi_j/inputs_rz +++ b/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd @@ -28,6 +28,7 @@ warpx.moving_window_v = 1. warpx.n_rz_azimuthal_modes = 1 warpx.use_filter = 1 warpx.verbose = 1 +warpx.abort_on_warning_threshold = medium warpx.cfl = 1. #warpx.gamma_boost = 1. @@ -42,6 +43,7 @@ psatd.do_time_averaging = 1 # PSATD psatd.update_with_rho = 1 #psatd.v_galilean = 0. 0. -0.9373391857121336 +psatd.J_in_time = linear # Particles diff --git a/Examples/Tests/nodal_electrostatic/CMakeLists.txt b/Examples/Tests/nodal_electrostatic/CMakeLists.txt new file mode 100644 index 00000000000..62627eb576a --- /dev/null +++ b/Examples/Tests/nodal_electrostatic/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_nodal_electrostatic_solver # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_nodal_electrostatic_solver # inputs + analysis.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/nodal_electrostatic/analysis_3d.py b/Examples/Tests/nodal_electrostatic/analysis.py similarity index 100% rename from Examples/Tests/nodal_electrostatic/analysis_3d.py rename to Examples/Tests/nodal_electrostatic/analysis.py diff --git a/Examples/Tests/nodal_electrostatic/inputs_3d b/Examples/Tests/nodal_electrostatic/inputs_test_3d_nodal_electrostatic_solver similarity index 98% rename from Examples/Tests/nodal_electrostatic/inputs_3d rename to Examples/Tests/nodal_electrostatic/inputs_test_3d_nodal_electrostatic_solver index 91732a2c8ff..f1fd206eee3 100644 --- a/Examples/Tests/nodal_electrostatic/inputs_3d +++ b/Examples/Tests/nodal_electrostatic/inputs_test_3d_nodal_electrostatic_solver @@ -35,6 +35,7 @@ boundary.field_hi = PEC PEC PEC warpx.do_electrostatic = relativistic warpx.const_dt = dt warpx.grid_type = collocated +warpx.abort_on_warning_threshold = high algo.particle_pusher = vay algo.particle_shape = 3 diff --git a/Examples/Tests/nuclear_fusion/CMakeLists.txt b/Examples/Tests/nuclear_fusion/CMakeLists.txt new file mode 100644 index 00000000000..4ed47607c8d --- /dev/null +++ b/Examples/Tests/nuclear_fusion/CMakeLists.txt @@ -0,0 +1,68 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_proton_boron_fusion # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_proton_boron_fusion # inputs + analysis_proton_boron_fusion.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_deuterium_deuterium_fusion # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_deuterium_deuterium_fusion # inputs + analysis_two_product_fusion.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_deuterium_deuterium_fusion_intraspecies # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_deuterium_deuterium_fusion_intraspecies # inputs + analysis_deuterium_deuterium_3d_intraspecies.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_3d_deuterium_tritium_fusion # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_deuterium_tritium_fusion # inputs + analysis_two_product_fusion.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_proton_boron_fusion # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_proton_boron_fusion # inputs + analysis_proton_boron_fusion.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_rz_deuterium_tritium_fusion # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_deuterium_tritium_fusion # inputs + analysis_two_product_fusion.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py index b4f77bb9caa..25e898c05be 100755 --- a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py @@ -6,6 +6,7 @@ # License: BSD-3-Clause-LBNL import os +import re import sys import yt @@ -82,7 +83,9 @@ E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha ## Checks whether this is the 2D or the 3D test -is_2D = "2D" in sys.argv[1] +with open("./warpx_used_inputs") as warpx_used_inputs: + is_2D = re.search("geometry.dims\s*=\s*2", warpx_used_inputs.read()) +warpx_used_inputs.close() ## Some numerical parameters for this test size_x = 8 diff --git a/Examples/Tests/nuclear_fusion/inputs_proton_boron_2d b/Examples/Tests/nuclear_fusion/inputs_test_2d_proton_boron_fusion similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_proton_boron_2d rename to Examples/Tests/nuclear_fusion/inputs_test_2d_proton_boron_fusion diff --git a/Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d b/Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_deuterium_fusion similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d rename to Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_deuterium_fusion diff --git a/Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d_intraspecies b/Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_deuterium_fusion_intraspecies similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d_intraspecies rename to Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_deuterium_fusion_intraspecies diff --git a/Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d b/Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_tritium_fusion similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d rename to Examples/Tests/nuclear_fusion/inputs_test_3d_deuterium_tritium_fusion diff --git a/Examples/Tests/nuclear_fusion/inputs_proton_boron_3d b/Examples/Tests/nuclear_fusion/inputs_test_3d_proton_boron_fusion similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_proton_boron_3d rename to Examples/Tests/nuclear_fusion/inputs_test_3d_proton_boron_fusion diff --git a/Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz b/Examples/Tests/nuclear_fusion/inputs_test_rz_deuterium_tritium_fusion similarity index 100% rename from Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz rename to Examples/Tests/nuclear_fusion/inputs_test_rz_deuterium_tritium_fusion diff --git a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt new file mode 100644 index 00000000000..ce5bed2c587 --- /dev/null +++ b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_ohm_solver_em_modes_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + "inputs_test_1d_ohm_solver_em_modes_picmi.py --test --dim 1 --bdir z" # inputs + analysis.py # analysis + diags/field_diag000250 # output + OFF # dependency +) + +add_warpx_test( + test_rz_ohm_solver_em_modes_picmi # name + RZ # dims + 2 # nprocs + OFF # eb + "inputs_test_rz_ohm_solver_em_modes_picmi.py --test" # inputs + analysis_rz.py # analysis + diags/diag1000100 # output + OFF # dependency +) diff --git a/Examples/Tests/ohm_solver_EM_modes/README.rst b/Examples/Tests/ohm_solver_em_modes/README similarity index 100% rename from Examples/Tests/ohm_solver_EM_modes/README.rst rename to Examples/Tests/ohm_solver_em_modes/README diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis.py b/Examples/Tests/ohm_solver_em_modes/analysis.py similarity index 100% rename from Examples/Tests/ohm_solver_EM_modes/analysis.py rename to Examples/Tests/ohm_solver_em_modes/analysis.py diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py similarity index 100% rename from Examples/Tests/ohm_solver_EM_modes/analysis_rz.py rename to Examples/Tests/ohm_solver_em_modes/analysis_rz.py diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py b/Examples/Tests/ohm_solver_em_modes/inputs_test_1d_ohm_solver_em_modes_picmi.py similarity index 98% rename from Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py rename to Examples/Tests/ohm_solver_em_modes/inputs_test_1d_ohm_solver_em_modes_picmi.py index 11394029062..ac0c2369c0e 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_em_modes/inputs_test_1d_ohm_solver_em_modes_picmi.py @@ -257,8 +257,6 @@ def setup_run(self): particle_diag = picmi.ParticleDiagnostic( name="field_diag", period=self.total_steps, - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_EM_modes_1d_plt", # warpx_format = 'openpmd', # warpx_openpmd_backend = 'h5' ) @@ -268,8 +266,6 @@ def setup_run(self): grid=self.grid, period=self.total_steps, data_list=["B", "E", "J_displacement"], - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_EM_modes_1d_plt", # warpx_format = 'openpmd', # warpx_openpmd_backend = 'h5' ) diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py b/Examples/Tests/ohm_solver_em_modes/inputs_test_rz_ohm_solver_em_modes_picmi.py similarity index 98% rename from Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py rename to Examples/Tests/ohm_solver_em_modes/inputs_test_rz_ohm_solver_em_modes_picmi.py index ace91bad4d5..ba922dbdc9f 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py +++ b/Examples/Tests/ohm_solver_em_modes/inputs_test_rz_ohm_solver_em_modes_picmi.py @@ -224,8 +224,6 @@ def setup_run(self): period=self.total_steps, species=[self.ions], data_list=["ux", "uy", "uz", "weighting"], - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_EM_modes_rz_plt", ) simulation.add_diagnostic(part_diag) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt new file mode 100644 index 00000000000..e5017318f19 --- /dev/null +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_ohm_solver_landau_damping_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + "inputs_test_2d_ohm_solver_landau_damping_picmi.py --test --dim 2 --temp_ratio 0.1" # inputs + analysis.py # analysis + diags/diag1000100 # output + OFF # dependency +) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/README.rst b/Examples/Tests/ohm_solver_ion_Landau_damping/README similarity index 100% rename from Examples/Tests/ohm_solver_ion_Landau_damping/README.rst rename to Examples/Tests/ohm_solver_ion_Landau_damping/README diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py b/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py similarity index 98% rename from Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py rename to Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py index 4f7c26bb403..7c1709d059f 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py @@ -219,19 +219,15 @@ def setup_run(self): particle_diag = picmi.ParticleDiagnostic( name="diag1", period=100, - write_dir=".", species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - warpx_file_prefix=f"Python_ohms_law_solver_landau_damping_{self.dim}d_plt", ) simulation.add_diagnostic(particle_diag) field_diag = picmi.FieldDiagnostic( name="diag1", grid=self.grid, period=100, - write_dir=".", data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - warpx_file_prefix=f"Python_ohms_law_solver_landau_damping_{self.dim}d_plt", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt new file mode 100644 index 00000000000..a6c978ba3ef --- /dev/null +++ b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_ohm_solver_ion_beam_picmi # name + 1 # dims + 2 # nprocs + OFF # eb + "inputs_test_1d_ohm_solver_ion_beam_picmi.py --test --dim 1 --resonant" # inputs + analysis.py # analysis + diags/diag1002500 # output + OFF # dependency +) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/README.rst b/Examples/Tests/ohm_solver_ion_beam_instability/README similarity index 100% rename from Examples/Tests/ohm_solver_ion_beam_instability/README.rst rename to Examples/Tests/ohm_solver_ion_beam_instability/README diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py b/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py similarity index 98% rename from Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py rename to Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py index 2558d70b4b8..19569a04e5b 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py @@ -259,8 +259,6 @@ def setup_run(self): period=1250, species=[self.ions, self.beam_ions], data_list=["ux", "uy", "uz", "z", "weighting"], - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_ion_beam_1d_plt", ) simulation.add_diagnostic(part_diag) field_diag = picmi.FieldDiagnostic( @@ -268,8 +266,6 @@ def setup_run(self): grid=self.grid, period=1250, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_ion_beam_1d_plt", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt new file mode 100644 index 00000000000..849d4c3b2a3 --- /dev/null +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_ohm_solver_magnetic_reconnection_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + "inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py --test" # inputs + analysis.py # analysis + diags/diag1000020 # output + OFF # dependency +) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/README.rst b/Examples/Tests/ohm_solver_magnetic_reconnection/README similarity index 100% rename from Examples/Tests/ohm_solver_magnetic_reconnection/README.rst rename to Examples/Tests/ohm_solver_magnetic_reconnection/README diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py similarity index 98% rename from Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py rename to Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py index b776c48f5ab..4f13c76e208 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py @@ -247,10 +247,8 @@ def setup_run(self): particle_diag = picmi.ParticleDiagnostic( name="diag1", period=self.total_steps, - write_dir=".", species=[self.ions], data_list=["ux", "uy", "uz", "x", "z", "weighting"], - warpx_file_prefix="Python_ohms_law_solver_magnetic_reconnection_2d_plt", # warpx_format='openpmd', # warpx_openpmd_backend='h5', ) @@ -260,8 +258,6 @@ def setup_run(self): grid=self.grid, period=self.total_steps, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez"], - write_dir=".", - warpx_file_prefix="Python_ohms_law_solver_magnetic_reconnection_2d_plt", # warpx_format='openpmd', # warpx_openpmd_backend='h5', ) diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt new file mode 100644 index 00000000000..1f921ae98b2 --- /dev/null +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_FFT) + add_warpx_test( + test_3d_open_bc_poisson_solver # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_open_bc_poisson_solver # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/openbc_poisson_solver/analysis.py b/Examples/Tests/open_bc_poisson_solver/analysis.py similarity index 100% rename from Examples/Tests/openbc_poisson_solver/analysis.py rename to Examples/Tests/open_bc_poisson_solver/analysis.py diff --git a/Examples/Tests/openbc_poisson_solver/inputs_3d b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver similarity index 100% rename from Examples/Tests/openbc_poisson_solver/inputs_3d rename to Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver diff --git a/Examples/Tests/particle_boundary_interaction/CMakeLists.txt b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt new file mode 100644 index 00000000000..b7517ef9bc4 --- /dev/null +++ b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_rz_particle_boundary_interaction_picmi # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_particle_boundary_interaction_picmi.py # inputs + analysis.py # analysis + diags/diag1/ # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index 9d8baf774b7..b80cf4b52aa 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -24,7 +24,7 @@ test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") -ts = OpenPMDTimeSeries("./particle_boundary_interaction_plt") +ts = OpenPMDTimeSeries(filename) it = ts.iterations x, y, z = ts.get_particle(["x", "y", "z"], species="electrons", iteration=it[-1]) diff --git a/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py b/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py similarity index 97% rename from Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py rename to Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py index 7d33de6b5dd..4b491ac6873 100644 --- a/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py +++ b/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py @@ -78,8 +78,6 @@ period=diagnostic_interval, data_list=["Er", "Ez", "phi", "rho", "rho_electrons"], warpx_format="openpmd", - write_dir=".", - warpx_file_prefix="particle_boundary_interaction_plt", ) part_diag = picmi.ParticleDiagnostic( @@ -87,8 +85,6 @@ period=diagnostic_interval, species=[electrons], warpx_format="openpmd", - write_dir=".", - warpx_file_prefix="particle_boundary_interaction_plt", ) ########################## diff --git a/Examples/Tests/particle_boundary_process/CMakeLists.txt b/Examples/Tests/particle_boundary_process/CMakeLists.txt new file mode 100644 index 00000000000..a674c72abe3 --- /dev/null +++ b/Examples/Tests/particle_boundary_process/CMakeLists.txt @@ -0,0 +1,26 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_particle_reflection_picmi # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_particle_reflection_picmi.py # inputs + analysis_reflection.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +if(WarpX_EB) + add_warpx_test( + test_3d_particle_absorption # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_particle_absorption # inputs + analysis_absorption.py # analysis + diags/diag1000060 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/particle_boundary_process/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py index 47ef02937a7..fdde2622684 100755 --- a/Examples/Tests/particle_boundary_process/analysis_absorption.py +++ b/Examples/Tests/particle_boundary_process/analysis_absorption.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 +import sys + import yt # This test shoots a beam of electrons at cubic embedded boundary geometry @@ -9,11 +11,12 @@ # the problem domain yet. # all particles are still there -ds40 = yt.load("particle_absorption_plt000040") +ds40 = yt.load("diags/diag1000040") np40 = ds40.index.particle_headers["electrons"].num_particles assert np40 == 612 # all particles have been removed -ds60 = yt.load("particle_absorption_plt000060") +filename = sys.argv[1] +ds60 = yt.load(filename) np60 = ds60.index.particle_headers["electrons"].num_particles assert np60 == 0 diff --git a/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py b/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py similarity index 100% rename from Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py rename to Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py diff --git a/Examples/Tests/particle_boundary_process/inputs_absorption b/Examples/Tests/particle_boundary_process/inputs_test_3d_particle_absorption similarity index 100% rename from Examples/Tests/particle_boundary_process/inputs_absorption rename to Examples/Tests/particle_boundary_process/inputs_test_3d_particle_absorption diff --git a/Examples/Tests/particle_boundary_scrape/CMakeLists.txt b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt new file mode 100644 index 00000000000..361f99bfb09 --- /dev/null +++ b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt @@ -0,0 +1,28 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_particle_scrape # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_particle_scrape # inputs + analysis_scrape.py # analysis + diags/diag1000060 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_particle_scrape_picmi # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_particle_scrape_picmi.py # inputs + analysis_scrape.py # analysis + diags/diag1000060 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/particle_boundary_scrape/analysis_scrape.py b/Examples/Tests/particle_boundary_scrape/analysis_scrape.py index cb737ebd5d6..1b3a97da228 100755 --- a/Examples/Tests/particle_boundary_scrape/analysis_scrape.py +++ b/Examples/Tests/particle_boundary_scrape/analysis_scrape.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 -from pathlib import Path + +import sys import yt @@ -11,19 +12,13 @@ # the problem domain yet. # all particles are still there -if Path("particle_scrape_plt000040").is_dir(): - filename = "particle_scrape_plt000040" -else: - filename = "Python_particle_scrape_plt000040" +filename = "diags/diag1000040" ds40 = yt.load(filename) np40 = ds40.index.particle_headers["electrons"].num_particles assert np40 == 612 # all particles have been removed -if Path("particle_scrape_plt000060").is_dir(): - filename = "particle_scrape_plt000060" -else: - filename = "Python_particle_scrape_plt000060" +filename = sys.argv[1] ds60 = yt.load(filename) np60 = ds60.index.particle_headers["electrons"].num_particles assert np60 == 0 diff --git a/Examples/Tests/particle_boundary_scrape/inputs_scrape b/Examples/Tests/particle_boundary_scrape/inputs_test_3d_particle_scrape similarity index 100% rename from Examples/Tests/particle_boundary_scrape/inputs_scrape rename to Examples/Tests/particle_boundary_scrape/inputs_test_3d_particle_scrape diff --git a/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py b/Examples/Tests/particle_boundary_scrape/inputs_test_3d_particle_scrape_picmi.py similarity index 96% rename from Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py rename to Examples/Tests/particle_boundary_scrape/inputs_test_3d_particle_scrape_picmi.py index 02c22a4723d..1be71d4c397 100755 --- a/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py +++ b/Examples/Tests/particle_boundary_scrape/inputs_test_3d_particle_scrape_picmi.py @@ -80,16 +80,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=diagnostic_intervals, - write_dir=".", - warpx_file_prefix="Python_particle_scrape_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=diagnostic_intervals, data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz"], - write_dir=".", - warpx_file_prefix="Python_particle_scrape_plt", ) ########################## diff --git a/Examples/Tests/particle_data_python/CMakeLists.txt b/Examples/Tests/particle_data_python/CMakeLists.txt new file mode 100644 index 00000000000..45bed4e9cf6 --- /dev/null +++ b/Examples/Tests/particle_data_python/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_particle_attr_access_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_particle_attr_access_picmi.py # inputs + analysis.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_2d_particle_attr_access_unique_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + "inputs_test_2d_particle_attr_access_picmi.py --unique" # inputs + analysis.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_2d_prev_positions_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_prev_positions_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/particle_data_python/analysis_default_regression.py b/Examples/Tests/particle_data_python/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_data_python/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_data_python/PICMI_inputs_2d.py b/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py similarity index 94% rename from Examples/Tests/particle_data_python/PICMI_inputs_2d.py rename to Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py index 4ef9e9b40ed..dbd29a43bc7 100755 --- a/Examples/Tests/particle_data_python/PICMI_inputs_2d.py +++ b/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py @@ -75,16 +75,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix=f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10, data_list=["phi"], - write_dir=".", - warpx_file_prefix=f"Python_particle_attr_access_{'unique_' if args.unique else ''}plt", ) ########################## diff --git a/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py b/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py similarity index 95% rename from Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py rename to Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py index 97a4619e314..2ad86ecea95 100755 --- a/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py +++ b/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py @@ -78,16 +78,12 @@ name="diag1", period=10, species=[electrons], - write_dir=".", - warpx_file_prefix="Python_prev_positions_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], period=10, grid=grid, - write_dir=".", - warpx_file_prefix="Python_prev_positions_plt", ) ########################## # simulation setup diff --git a/Examples/Tests/particle_fields_diags/CMakeLists.txt b/Examples/Tests/particle_fields_diags/CMakeLists.txt new file mode 100644 index 00000000000..b35ffe46713 --- /dev/null +++ b/Examples/Tests/particle_fields_diags/CMakeLists.txt @@ -0,0 +1,25 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_particle_fields_diags # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_particle_fields_diags # inputs + analysis_particle_diags.py # analysis + diags/diag1000200 # output + OFF # dependency +) + +# FIXME +#add_warpx_test( +# test_3d_particle_fields_diags_single_precision # name +# 3 # dims +# 2 # nprocs +# OFF # eb +# inputs_test_3d_particle_fields_diags # inputs +# analysis_particle_diags_single.py # analysis +# diags/diag1000200 # output +# OFF # dependency +#) diff --git a/Examples/Tests/particle_fields_diags/inputs b/Examples/Tests/particle_fields_diags/inputs_test_3d_particle_fields_diags similarity index 100% rename from Examples/Tests/particle_fields_diags/inputs rename to Examples/Tests/particle_fields_diags/inputs_test_3d_particle_fields_diags diff --git a/Examples/Tests/particle_pusher/CMakeLists.txt b/Examples/Tests/particle_pusher/CMakeLists.txt new file mode 100644 index 00000000000..583106014a5 --- /dev/null +++ b/Examples/Tests/particle_pusher/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_particle_pusher # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_particle_pusher # inputs + analysis.py # analysis + diags/diag1010000 # output + OFF # dependency +) diff --git a/Examples/Tests/particle_pusher/analysis_pusher.py b/Examples/Tests/particle_pusher/analysis.py similarity index 100% rename from Examples/Tests/particle_pusher/analysis_pusher.py rename to Examples/Tests/particle_pusher/analysis.py diff --git a/Examples/Tests/particle_pusher/inputs_3d b/Examples/Tests/particle_pusher/inputs_test_3d_particle_pusher similarity index 100% rename from Examples/Tests/particle_pusher/inputs_3d rename to Examples/Tests/particle_pusher/inputs_test_3d_particle_pusher diff --git a/Examples/Tests/particle_thermal_boundary/CMakeLists.txt b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt new file mode 100644 index 00000000000..26478b59c07 --- /dev/null +++ b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_particle_thermal_boundary # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_particle_thermal_boundary # inputs + analysis.py # analysis + diags/diag1002000 # output + OFF # dependency +) diff --git a/Examples/Tests/particle_thermal_boundary/analysis_2d.py b/Examples/Tests/particle_thermal_boundary/analysis.py similarity index 100% rename from Examples/Tests/particle_thermal_boundary/analysis_2d.py rename to Examples/Tests/particle_thermal_boundary/analysis.py diff --git a/Examples/Tests/particle_thermal_boundary/inputs_2d b/Examples/Tests/particle_thermal_boundary/inputs_test_2d_particle_thermal_boundary similarity index 100% rename from Examples/Tests/particle_thermal_boundary/inputs_2d rename to Examples/Tests/particle_thermal_boundary/inputs_test_2d_particle_thermal_boundary diff --git a/Examples/Tests/particles_in_pml/CMakeLists.txt b/Examples/Tests/particles_in_pml/CMakeLists.txt new file mode 100644 index 00000000000..e8f1a13601d --- /dev/null +++ b/Examples/Tests/particles_in_pml/CMakeLists.txt @@ -0,0 +1,46 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_particles_in_pml # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_particles_in_pml # inputs + analysis_particles_in_pml.py # analysis + diags/diag1000180 # output + OFF # dependency +) + +add_warpx_test( + test_2d_particles_in_pml_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_particles_in_pml_mr # inputs + analysis_particles_in_pml.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_3d_particles_in_pml # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_particles_in_pml # inputs + analysis_particles_in_pml.py # analysis + diags/diag1000120 # output + OFF # dependency +) + +add_warpx_test( + test_3d_particles_in_pml_mr # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_particles_in_pml_mr # inputs + analysis_particles_in_pml.py # analysis + diags/diag1000200 # output + OFF # dependency +) diff --git a/Examples/Tests/particles_in_pml/inputs_2d b/Examples/Tests/particles_in_pml/inputs_test_2d_particles_in_pml similarity index 100% rename from Examples/Tests/particles_in_pml/inputs_2d rename to Examples/Tests/particles_in_pml/inputs_test_2d_particles_in_pml diff --git a/Examples/Tests/particles_in_pml/inputs_mr_2d b/Examples/Tests/particles_in_pml/inputs_test_2d_particles_in_pml_mr similarity index 100% rename from Examples/Tests/particles_in_pml/inputs_mr_2d rename to Examples/Tests/particles_in_pml/inputs_test_2d_particles_in_pml_mr diff --git a/Examples/Tests/particles_in_pml/inputs_3d b/Examples/Tests/particles_in_pml/inputs_test_3d_particles_in_pml similarity index 100% rename from Examples/Tests/particles_in_pml/inputs_3d rename to Examples/Tests/particles_in_pml/inputs_test_3d_particles_in_pml diff --git a/Examples/Tests/particles_in_pml/inputs_mr_3d b/Examples/Tests/particles_in_pml/inputs_test_3d_particles_in_pml_mr similarity index 100% rename from Examples/Tests/particles_in_pml/inputs_mr_3d rename to Examples/Tests/particles_in_pml/inputs_test_3d_particles_in_pml_mr diff --git a/Examples/Tests/pass_mpi_communicator/CMakeLists.txt b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt new file mode 100644 index 00000000000..f68986d363a --- /dev/null +++ b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt @@ -0,0 +1,17 @@ +# Add tests (alphabetical order) ############################################## +# + +# TODO +# - Enable in pyAMReX (https://github.com/AMReX-Codes/pyamrex/issues/163) +# - Enable related lines in input script +# - Enable analysis script +add_warpx_test( + test_2d_pass_mpi_comm_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pass_mpi_comm_picmi.py # inputs + OFF #analysis.py # analysis + OFF # output + OFF # dependency +) diff --git a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py b/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py similarity index 99% rename from Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py rename to Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py index 55ebf64d8e6..200cea7be0f 100755 --- a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py +++ b/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py @@ -91,7 +91,6 @@ grid=grid, period=diagnostic_intervals, data_list=["Ex", "Jx"], - write_dir=".", warpx_file_prefix=f"Python_pass_mpi_comm_plt{color + 1}_", ) diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt new file mode 100644 index 00000000000..69c68ec5329 --- /dev/null +++ b/Examples/Tests/pec/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_pec_field # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_pec_field # inputs + analysis_pec.py # analysis + diags/diag1000125 # output + OFF # dependency +) + +add_warpx_test( + test_3d_pec_field_mr # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_pec_field_mr # inputs + analysis_pec_mr.py # analysis + diags/diag1000125 # output + OFF # dependency +) + +add_warpx_test( + test_3d_pec_particle # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_pec_particle # inputs + analysis_default_regression.py # analysis + diags/diag1000020 # output + OFF # dependency +) diff --git a/Examples/Tests/pec/analysis_default_regression.py b/Examples/Tests/pec/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/pec/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/pec/inputs_field_PEC_3d b/Examples/Tests/pec/inputs_test_3d_pec_field similarity index 100% rename from Examples/Tests/pec/inputs_field_PEC_3d rename to Examples/Tests/pec/inputs_test_3d_pec_field diff --git a/Examples/Tests/pec/inputs_field_PEC_mr_3d b/Examples/Tests/pec/inputs_test_3d_pec_field_mr similarity index 100% rename from Examples/Tests/pec/inputs_field_PEC_mr_3d rename to Examples/Tests/pec/inputs_test_3d_pec_field_mr diff --git a/Examples/Tests/pec/inputs_particle_PEC_3d b/Examples/Tests/pec/inputs_test_3d_pec_particle similarity index 100% rename from Examples/Tests/pec/inputs_particle_PEC_3d rename to Examples/Tests/pec/inputs_test_3d_pec_particle diff --git a/Examples/Tests/photon_pusher/CMakeLists.txt b/Examples/Tests/photon_pusher/CMakeLists.txt new file mode 100644 index 00000000000..491906e0466 --- /dev/null +++ b/Examples/Tests/photon_pusher/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_photon_pusher # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_photon_pusher # inputs + analysis.py # analysis + diags/diag1000050 # output + OFF # dependency +) diff --git a/Examples/Tests/photon_pusher/analysis_photon_pusher.py b/Examples/Tests/photon_pusher/analysis.py similarity index 100% rename from Examples/Tests/photon_pusher/analysis_photon_pusher.py rename to Examples/Tests/photon_pusher/analysis.py diff --git a/Examples/Tests/photon_pusher/inputs_3d b/Examples/Tests/photon_pusher/inputs_test_3d_photon_pusher similarity index 100% rename from Examples/Tests/photon_pusher/inputs_3d rename to Examples/Tests/photon_pusher/inputs_test_3d_photon_pusher diff --git a/Examples/Tests/plasma_lens/CMakeLists.txt b/Examples/Tests/plasma_lens/CMakeLists.txt new file mode 100644 index 00000000000..cdba552db9e --- /dev/null +++ b/Examples/Tests/plasma_lens/CMakeLists.txt @@ -0,0 +1,57 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_plasma_lens # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_lens # inputs + analysis.py # analysis + diags/diag1000084 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_lens_boosted # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_lens_boosted # inputs + analysis.py # analysis + diags/diag1000084 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_lens_hard_edged # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_lens_hard_edged # inputs + analysis.py # analysis + diags/diag1000084 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_lens_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_lens_picmi.py # inputs + analysis.py # analysis + diags/diag1000084 # output + OFF # dependency +) + +add_warpx_test( + test_3d_plasma_lens_short # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_plasma_lens_short # inputs + analysis.py # analysis + diags/diag1000084 # output + OFF # dependency +) diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 8cbbe86c927..11e2a084ac5 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -16,6 +16,7 @@ """ import os +import re import sys import numpy as np @@ -194,10 +195,8 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): "error in y particle velocity" ) +# The PICMI and native input versions run the same test, so +# their results are compared to the same benchmark file test_name = os.path.split(os.getcwd())[1] -# The PICMI and native input versions of `inputs_3d` run the same test, so -# their results are compared to the same benchmark file. -if test_name == "Python_plasma_lens": - test_name = "Plasma_lens" - +test_name = re.sub("_picmi", "", test_name) checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/plasma_lens/inputs_3d b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens similarity index 100% rename from Examples/Tests/plasma_lens/inputs_3d rename to Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens diff --git a/Examples/Tests/plasma_lens/inputs_boosted_3d b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted similarity index 100% rename from Examples/Tests/plasma_lens/inputs_boosted_3d rename to Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted diff --git a/Examples/Tests/plasma_lens/inputs_lattice_3d b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_hard_edged similarity index 100% rename from Examples/Tests/plasma_lens/inputs_lattice_3d rename to Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_hard_edged diff --git a/Examples/Tests/plasma_lens/PICMI_inputs_3d.py b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_picmi.py similarity index 94% rename from Examples/Tests/plasma_lens/PICMI_inputs_3d.py rename to Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_picmi.py index 32b2ab3abf7..b9672e17e1a 100644 --- a/Examples/Tests/plasma_lens/PICMI_inputs_3d.py +++ b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_picmi.py @@ -67,8 +67,6 @@ period=max_steps, species=[electrons], data_list=["ux", "uy", "uz", "x", "y", "z"], - write_dir=".", - warpx_file_prefix="Python_plasma_lens_plt", ) field_diag1 = picmi.FieldDiagnostic( @@ -76,8 +74,6 @@ grid=grid, period=max_steps, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_plasma_lens_plt", ) # Set up simulation sim = picmi.Simulation( diff --git a/Examples/Tests/plasma_lens/inputs_short_3d b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_short similarity index 100% rename from Examples/Tests/plasma_lens/inputs_short_3d rename to Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_short diff --git a/Examples/Tests/pml/CMakeLists.txt b/Examples/Tests/pml/CMakeLists.txt new file mode 100644 index 00000000000..92847dfff24 --- /dev/null +++ b/Examples/Tests/pml/CMakeLists.txt @@ -0,0 +1,100 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_pml_x_ckc # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_ckc # inputs + analysis_pml_ckc.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_2d_pml_x_galilean # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_galilean # inputs + analysis_pml_psatd.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_pml_x_psatd # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_psatd # inputs + analysis_pml_psatd.py # analysis + diags/diag1000300 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_2d_pml_x_psatd_restart # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_psatd_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000300 # output + test_2d_pml_x_psatd # dependency + ) +endif() + +add_warpx_test( + test_2d_pml_x_yee # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_yee # inputs + analysis_pml_yee.py # analysis + diags/diag1000300 # output + OFF # dependency +) + +add_warpx_test( + test_2d_pml_x_yee_restart # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_pml_x_yee_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000300 # output + test_2d_pml_x_yee # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_3d_pml_psatd_dive_divb_cleaning # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_pml_psatd_dive_divb_cleaning # inputs + analysis_default_regression.py # analysis + diags/diag1000100 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_rz_pml_psatd # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_pml_psatd # inputs + analysis_pml_psatd_rz.py # analysis + diags/diag1000500 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/pml/analysis_default_regression.py b/Examples/Tests/pml/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/pml/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/pml/analysis_default_restart.py b/Examples/Tests/pml/analysis_default_restart.py new file mode 120000 index 00000000000..0459986eebc --- /dev/null +++ b/Examples/Tests/pml/analysis_default_restart.py @@ -0,0 +1 @@ +../../analysis_default_restart.py \ No newline at end of file diff --git a/Examples/Tests/pml/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py index de2f48810e4..00b867857f9 100755 --- a/Examples/Tests/pml/analysis_pml_psatd.py +++ b/Examples/Tests/pml/analysis_pml_psatd.py @@ -21,14 +21,14 @@ filename = sys.argv[1] -galilean = True if re.search("galilean", filename) else False +cwd = os.getcwd() +filename_init = os.path.join(cwd, "diags/diag1000050") +galilean = True if re.search("galilean", cwd) else False # Initial laser energy (at iteration 50) if galilean: - filename_init = "pml_x_galilean_plt000050" energy_start = 4.439376199524034e-08 else: - filename_init = "pml_x_psatd_plt000050" energy_start = 7.282940107273505e-08 # Check consistency of field energy diagnostics with initial energy above @@ -75,12 +75,5 @@ assert reflectivity < reflectivity_max -# Check restart data v. original data -sys.path.insert(0, "../../../../warpx/Examples/") -from analysis_default_restart import check_restart - -if not galilean: - check_restart(filename) - test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/pml/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py index 962036bad0e..a24854af095 100755 --- a/Examples/Tests/pml/analysis_pml_yee.py +++ b/Examples/Tests/pml/analysis_pml_yee.py @@ -57,11 +57,5 @@ assert error_rel < tolerance_rel -# Check restart data v. original data -sys.path.insert(0, "../../../../warpx/Examples/") -from analysis_default_restart import check_restart - -check_restart(filename) - test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/pml/inputs_2d b/Examples/Tests/pml/inputs_base_2d similarity index 100% rename from Examples/Tests/pml/inputs_2d rename to Examples/Tests/pml/inputs_base_2d diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_ckc b/Examples/Tests/pml/inputs_test_2d_pml_x_ckc new file mode 100644 index 00000000000..f686674ae14 --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_ckc @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = ckc diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_galilean b/Examples/Tests/pml/inputs_test_2d_pml_x_galilean new file mode 100644 index 00000000000..34a9081a181 --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_galilean @@ -0,0 +1,14 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho divE +psatd.current_correction = 0 +psatd.update_with_rho = 1 +psatd.v_galilean = 0. 0. 0.99 +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 +warpx.do_pml_divb_cleaning = 1 +warpx.do_pml_dive_cleaning = 1 +warpx.grid_type = collocated diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_psatd b/Examples/Tests/pml/inputs_test_2d_pml_x_psatd new file mode 100644 index 00000000000..191d5774530 --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_psatd @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = psatd +diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho divE +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7071067811865475 +warpx.do_pml_divb_cleaning = 0 +warpx.do_pml_dive_cleaning = 0 +psatd.current_correction = 0 +psatd.update_with_rho = 1 diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_psatd_restart b/Examples/Tests/pml/inputs_test_2d_pml_x_psatd_restart new file mode 100644 index 00000000000..44b9be4494a --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_psatd_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_2d_pml_x_psatd + +# test input parameters +amr.restart = "../test_2d_pml_x_psatd/diags/chk000150" diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_yee b/Examples/Tests/pml/inputs_test_2d_pml_x_yee new file mode 100644 index 00000000000..390cf079c16 --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_yee @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.maxwell_solver = yee diff --git a/Examples/Tests/pml/inputs_test_2d_pml_x_yee_restart b/Examples/Tests/pml/inputs_test_2d_pml_x_yee_restart new file mode 100644 index 00000000000..b626e3aa662 --- /dev/null +++ b/Examples/Tests/pml/inputs_test_2d_pml_x_yee_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_2d_pml_x_yee + +# test input parameters +amr.restart = "../test_2d_pml_x_yee/diags/chk000150" diff --git a/Examples/Tests/pml/inputs_3d b/Examples/Tests/pml/inputs_test_3d_pml_psatd_dive_divb_cleaning similarity index 94% rename from Examples/Tests/pml/inputs_3d rename to Examples/Tests/pml/inputs_test_3d_pml_psatd_dive_divb_cleaning index e152afc7cc7..4e8c3c78329 100644 --- a/Examples/Tests/pml/inputs_3d +++ b/Examples/Tests/pml/inputs_test_3d_pml_psatd_dive_divb_cleaning @@ -14,12 +14,15 @@ boundary.field_lo = pml pml pml boundary.field_hi = pml pml pml # Numerical parameters +ablastr.fillboundary_always_sync = 1 +warpx.abort_on_warning_threshold = medium warpx.cfl = 1.0 warpx.grid_type = staggered warpx.do_dive_cleaning = 1 warpx.do_divb_cleaning = 1 warpx.do_pml_dive_cleaning = 1 warpx.do_pml_divb_cleaning = 1 +warpx.do_similar_dm_pml = 0 warpx.use_filter = 1 warpx.verbose = 1 diff --git a/Examples/Tests/pml/inputs_rz b/Examples/Tests/pml/inputs_test_rz_pml_psatd similarity index 93% rename from Examples/Tests/pml/inputs_rz rename to Examples/Tests/pml/inputs_test_rz_pml_psatd index f5e23fe0399..87b4d7a5b3f 100644 --- a/Examples/Tests/pml/inputs_rz +++ b/Examples/Tests/pml/inputs_test_rz_pml_psatd @@ -25,8 +25,11 @@ warpx.do_pml_in_domain = 0 ############ NUMERICS ########### ################################# algo.maxwell_solver = psatd +warpx.abort_on_warning_threshold = medium +warpx.cfl = 0.7 warpx.use_filter = 0 algo.particle_shape = 1 +psatd.current_correction = 0 ################################# ############ PARTICLE ########### diff --git a/Examples/Tests/point_of_contact_eb/CMakeLists.txt b/Examples/Tests/point_of_contact_eb/CMakeLists.txt new file mode 100644 index 00000000000..25bf4b977de --- /dev/null +++ b/Examples/Tests/point_of_contact_eb/CMakeLists.txt @@ -0,0 +1,28 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_point_of_contact_eb # name + 3 # dims + 2 # nprocs + ON # eb + inputs_test_3d_point_of_contact_eb # inputs + analysis.py # analysis + diags/diag1/ # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_point_of_contact_eb # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_point_of_contact_eb # inputs + analysis.py # analysis + diags/diag1/ # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/point_of_contact_EB/analysis.py b/Examples/Tests/point_of_contact_eb/analysis.py similarity index 100% rename from Examples/Tests/point_of_contact_EB/analysis.py rename to Examples/Tests/point_of_contact_eb/analysis.py diff --git a/Examples/Tests/point_of_contact_EB/inputs_3d b/Examples/Tests/point_of_contact_eb/inputs_test_3d_point_of_contact_eb similarity index 100% rename from Examples/Tests/point_of_contact_EB/inputs_3d rename to Examples/Tests/point_of_contact_eb/inputs_test_3d_point_of_contact_eb diff --git a/Examples/Tests/point_of_contact_EB/inputs_rz b/Examples/Tests/point_of_contact_eb/inputs_test_rz_point_of_contact_eb similarity index 100% rename from Examples/Tests/point_of_contact_EB/inputs_rz rename to Examples/Tests/point_of_contact_eb/inputs_test_rz_point_of_contact_eb diff --git a/Examples/Tests/projection_divb_cleaner/CMakeLists.txt b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt new file mode 100644 index 00000000000..91dd6bdc592 --- /dev/null +++ b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_projection_divb_cleaner_callback_picmi # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_projection_divb_cleaner_callback_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_projection_divb_cleaner_picmi # name + 3 # dims + 1 # nprocs + OFF # eb + inputs_test_3d_projection_divb_cleaner_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_rz_projection_divb_cleaner # name + RZ # dims + 1 # nprocs + OFF # eb + inputs_test_rz_projection_divb_cleaner # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/projection_divb_cleaner/analysis_rz.py b/Examples/Tests/projection_divb_cleaner/analysis.py similarity index 100% rename from Examples/Tests/projection_divb_cleaner/analysis_rz.py rename to Examples/Tests/projection_divb_cleaner/analysis.py diff --git a/Examples/Tests/projection_divb_cleaner/analysis_default_regression.py b/Examples/Tests/projection_divb_cleaner/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/projection_divb_cleaner/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/projection_divb_cleaner/PICMI_inputs_3D_pyload.py b/Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_callback_picmi.py similarity index 98% rename from Examples/Tests/projection_divb_cleaner/PICMI_inputs_3D_pyload.py rename to Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_callback_picmi.py index 2e8474af01d..e6eb7ecf904 100644 --- a/Examples/Tests/projection_divb_cleaner/PICMI_inputs_3D_pyload.py +++ b/Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_callback_picmi.py @@ -178,12 +178,10 @@ def __init__(self): ####################################################################### field_diag = picmi.FieldDiagnostic( - name="field_diag", + name="diag1", grid=self.grid, period=self.diag_steps, data_list=["B"], - write_dir=".", - warpx_file_prefix="Python_projection_divb_cleaner_callback_3d_plt", warpx_format="plotfile", ) simulation.add_diagnostic(field_diag) diff --git a/Examples/Tests/projection_divb_cleaner/PICMI_inputs_3d.py b/Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_picmi.py similarity index 95% rename from Examples/Tests/projection_divb_cleaner/PICMI_inputs_3d.py rename to Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_picmi.py index 8769a74dde3..f4347f30e56 100644 --- a/Examples/Tests/projection_divb_cleaner/PICMI_inputs_3d.py +++ b/Examples/Tests/projection_divb_cleaner/inputs_test_3d_projection_divb_cleaner_picmi.py @@ -69,8 +69,6 @@ grid=grid, period=1, data_list=["Bx", "By", "Bz"], - write_dir=".", - warpx_file_prefix="Python_projection_divb_cleaner_3d_plt", warpx_plot_raw_fields=True, warpx_plot_raw_fields_guards=True, ) @@ -107,7 +105,7 @@ ##### SIMULATION ANALYSIS ###### ################################# -filename = "Python_projection_divb_cleaner_3d_plt000001" +filename = "diags/diag1000001" ds = yt.load(filename) grid0 = ds.index.grids[0] diff --git a/Examples/Tests/projection_divb_cleaner/inputs_rz b/Examples/Tests/projection_divb_cleaner/inputs_test_rz_projection_divb_cleaner similarity index 96% rename from Examples/Tests/projection_divb_cleaner/inputs_rz rename to Examples/Tests/projection_divb_cleaner/inputs_test_rz_projection_divb_cleaner index 86e12cd39d1..3e8f69ee411 100644 --- a/Examples/Tests/projection_divb_cleaner/inputs_rz +++ b/Examples/Tests/projection_divb_cleaner/inputs_test_rz_projection_divb_cleaner @@ -45,7 +45,6 @@ diagnostics.diags_names = diag1 diag1.intervals = 1 diag1.diag_type = Full diag1.fields_to_plot = Br Bt Bz -diag1.file_prefix= projection_divb_cleaner_rz_plt diag1.plot_raw_fields = true diag1.plot_raw_fields_guards = true diag1.format = plotfile diff --git a/Examples/Tests/python_wrappers/CMakeLists.txt b/Examples/Tests/python_wrappers/CMakeLists.txt new file mode 100644 index 00000000000..83fc6e16f7d --- /dev/null +++ b/Examples/Tests/python_wrappers/CMakeLists.txt @@ -0,0 +1,15 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_FFT) + add_warpx_test( + test_2d_python_wrappers_picmi # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_python_wrappers_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000100 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/python_wrappers/analysis_default_regression.py b/Examples/Tests/python_wrappers/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/python_wrappers/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/python_wrappers/PICMI_inputs_2d.py b/Examples/Tests/python_wrappers/inputs_test_2d_python_wrappers_picmi.py similarity index 99% rename from Examples/Tests/python_wrappers/PICMI_inputs_2d.py rename to Examples/Tests/python_wrappers/inputs_test_2d_python_wrappers_picmi.py index c3aa9eac8b0..66917b4146b 100755 --- a/Examples/Tests/python_wrappers/PICMI_inputs_2d.py +++ b/Examples/Tests/python_wrappers/inputs_test_2d_python_wrappers_picmi.py @@ -71,16 +71,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix="Python_wrappers_plt", data_list=diag_field_list, ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10, - write_dir=".", - warpx_file_prefix="Python_wrappers_plt", data_list=diag_field_list, ) diff --git a/Examples/Tests/qed/CMakeLists.txt b/Examples/Tests/qed/CMakeLists.txt new file mode 100644 index 00000000000..77690642f07 --- /dev/null +++ b/Examples/Tests/qed/CMakeLists.txt @@ -0,0 +1,112 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_qed_breit_wheeler # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_qed_breit_wheeler # inputs + analysis_breit_wheeler_yt.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_2d_qed_breit_wheeler_opmd # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_qed_breit_wheeler_opmd # inputs + analysis_breit_wheeler_opmd.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_2d_qed_quantum_sync # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_qed_quantum_sync # inputs + analysis_quantum_sync.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_breit_wheeler # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_breit_wheeler # inputs + analysis_breit_wheeler_yt.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_breit_wheeler_opmd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_breit_wheeler_opmd # inputs + analysis_breit_wheeler_opmd.py # analysis + diags/diag1/ # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_quantum_sync # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_quantum_sync # inputs + analysis_quantum_sync.py # analysis + diags/diag1000002 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_schwinger_1 # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_schwinger_1 # inputs + analysis_schwinger.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_schwinger_2 # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_schwinger_2 # inputs + analysis_schwinger.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_schwinger_3 # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_schwinger_3 # inputs + analysis_schwinger.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_qed_schwinger_4 # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_qed_schwinger_4 # inputs + analysis_schwinger.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/qed/breit_wheeler/analysis_core.py b/Examples/Tests/qed/analysis_breit_wheeler_core.py similarity index 100% rename from Examples/Tests/qed/breit_wheeler/analysis_core.py rename to Examples/Tests/qed/analysis_breit_wheeler_core.py diff --git a/Examples/Tests/qed/breit_wheeler/analysis_opmd.py b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py similarity index 95% rename from Examples/Tests/qed/breit_wheeler/analysis_opmd.py rename to Examples/Tests/qed/analysis_breit_wheeler_opmd.py index 21b1024a665..25547eda438 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_opmd.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py @@ -9,7 +9,7 @@ import sys -import analysis_core as ac +import analysis_breit_wheeler_core as ac import openpmd_api as io # sys.path.insert(1, '../../../../warpx/Regression/Checksum/') @@ -17,7 +17,7 @@ # This script is a frontend for the analysis routines -# in analysis_core.py (please refer to this file for +# in analysis_breit_wheeler_core.py (please refer to this file for # a full description). It reads output files in openPMD # format and extracts the data needed for # the analysis routines. diff --git a/Examples/Tests/qed/breit_wheeler/analysis_yt.py b/Examples/Tests/qed/analysis_breit_wheeler_yt.py similarity index 94% rename from Examples/Tests/qed/breit_wheeler/analysis_yt.py rename to Examples/Tests/qed/analysis_breit_wheeler_yt.py index dbba6bfb56a..9836e3e8894 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_yt.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_yt.py @@ -13,11 +13,11 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import analysis_core as ac +import analysis_breit_wheeler_core as ac import checksumAPI # This script is a frontend for the analysis routines -# in analysis_core.py (please refer to this file for +# in analysis_breit_wheeler_core.py (please refer to this file for # a full description). It reads output files in yt # format and extracts the data needed for # the analysis routines. diff --git a/Examples/Tests/qed/quantum_synchrotron/analysis.py b/Examples/Tests/qed/analysis_quantum_sync.py similarity index 100% rename from Examples/Tests/qed/quantum_synchrotron/analysis.py rename to Examples/Tests/qed/analysis_quantum_sync.py diff --git a/Examples/Tests/qed/schwinger/analysis_schwinger.py b/Examples/Tests/qed/analysis_schwinger.py similarity index 97% rename from Examples/Tests/qed/schwinger/analysis_schwinger.py rename to Examples/Tests/qed/analysis_schwinger.py index 4b320cc267a..30a25e6a956 100755 --- a/Examples/Tests/qed/schwinger/analysis_schwinger.py +++ b/Examples/Tests/qed/analysis_schwinger.py @@ -41,7 +41,8 @@ Bz_test = 0.0 # Find which test we are doing -test_number = re.search("qed_schwinger([1234])", filename).group(1) +test_name = os.path.split(os.getcwd())[1] +test_number = re.search("qed_schwinger_([1234])", test_name).group(1) if test_number == "1": # First Schwinger test with "weak" EM field. No pair should be created. Ex_test = 1.0e16 diff --git a/Examples/Tests/qed/breit_wheeler/inputs_2d b/Examples/Tests/qed/inputs_base_2d_breit_wheeler similarity index 99% rename from Examples/Tests/qed/breit_wheeler/inputs_2d rename to Examples/Tests/qed/inputs_base_2d_breit_wheeler index 857b3243ac6..201520966c2 100644 --- a/Examples/Tests/qed/breit_wheeler/inputs_2d +++ b/Examples/Tests/qed/inputs_base_2d_breit_wheeler @@ -28,6 +28,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.serialize_initial_conditions = 1 +warpx.abort_on_warning_threshold = high # Order of particle shape factors algo.particle_shape = 3 diff --git a/Examples/Tests/qed/breit_wheeler/inputs_3d b/Examples/Tests/qed/inputs_base_3d_breit_wheeler similarity index 100% rename from Examples/Tests/qed/breit_wheeler/inputs_3d rename to Examples/Tests/qed/inputs_base_3d_breit_wheeler diff --git a/Examples/Tests/qed/schwinger/inputs_3d_schwinger b/Examples/Tests/qed/inputs_base_3d_schwinger similarity index 100% rename from Examples/Tests/qed/schwinger/inputs_3d_schwinger rename to Examples/Tests/qed/inputs_base_3d_schwinger diff --git a/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler b/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler new file mode 100644 index 00000000000..53d3cf9e97c --- /dev/null +++ b/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_2d_breit_wheeler diff --git a/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler_opmd b/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler_opmd new file mode 100644 index 00000000000..7edecbcd0a3 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_2d_qed_breit_wheeler_opmd @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_2d_breit_wheeler + +# test input parameters +diag1.format = openpmd +diag1.openpmd_backend = h5 diff --git a/Examples/Tests/qed/quantum_synchrotron/inputs_2d b/Examples/Tests/qed/inputs_test_2d_qed_quantum_sync similarity index 99% rename from Examples/Tests/qed/quantum_synchrotron/inputs_2d rename to Examples/Tests/qed/inputs_test_2d_qed_quantum_sync index 2ac2c782ccd..83d0cee16aa 100644 --- a/Examples/Tests/qed/quantum_synchrotron/inputs_2d +++ b/Examples/Tests/qed/inputs_test_2d_qed_quantum_sync @@ -28,6 +28,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.serialize_initial_conditions = 1 +warpx.abort_on_warning_threshold = high # Order of particle shape factors algo.particle_shape = 3 diff --git a/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler b/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler new file mode 100644 index 00000000000..2058dccb493 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_3d_breit_wheeler diff --git a/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler_opmd b/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler_opmd new file mode 100644 index 00000000000..78847d0a0d4 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_breit_wheeler_opmd @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d_breit_wheeler + +# test input parameters +diag1.format = openpmd +diag1.openpmd_backend = h5 diff --git a/Examples/Tests/qed/quantum_synchrotron/inputs_3d b/Examples/Tests/qed/inputs_test_3d_qed_quantum_sync similarity index 99% rename from Examples/Tests/qed/quantum_synchrotron/inputs_3d rename to Examples/Tests/qed/inputs_test_3d_qed_quantum_sync index 429666ef938..87ffd746ec8 100644 --- a/Examples/Tests/qed/quantum_synchrotron/inputs_3d +++ b/Examples/Tests/qed/inputs_test_3d_qed_quantum_sync @@ -28,6 +28,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.serialize_initial_conditions = 1 +warpx.abort_on_warning_threshold = high # Order of particle shape factors algo.particle_shape = 3 diff --git a/Examples/Tests/qed/inputs_test_3d_qed_schwinger_1 b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_1 new file mode 100644 index 00000000000..cfa0ca80845 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_1 @@ -0,0 +1,6 @@ +# base input parameters +FILE = inputs_base_3d_schwinger + +# test input parameters +warpx.B_external_grid = 16792888.570516706 5256650.141557486 18363530.799561853 +warpx.E_external_grid = 1.e16 0 0 diff --git a/Examples/Tests/qed/inputs_test_3d_qed_schwinger_2 b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_2 new file mode 100644 index 00000000000..420e6bce31f --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_2 @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_3d_schwinger + +# test input parameters +warpx.B_external_grid = 1679288857.0516706 525665014.1557486 1836353079.9561853 +warpx.E_external_grid = 1.e18 0 0 +qed_schwinger.xmin = -2.5e-7 +qed_schwinger.xmax = 2.49e-7 diff --git a/Examples/Tests/qed/inputs_test_3d_qed_schwinger_3 b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_3 new file mode 100644 index 00000000000..e77ce567f32 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_3 @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d_schwinger + +# test input parameters +warpx.E_external_grid = 0 1.090934525450495e+17 0 diff --git a/Examples/Tests/qed/inputs_test_3d_qed_schwinger_4 b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_4 new file mode 100644 index 00000000000..78454e8bb75 --- /dev/null +++ b/Examples/Tests/qed/inputs_test_3d_qed_schwinger_4 @@ -0,0 +1,8 @@ +# base input parameters +FILE = inputs_base_3d_schwinger + +# test input parameters +warpx.B_external_grid = 0 833910140000. 0 +warpx.E_external_grid = 0 0 2.5e+20 +qed_schwinger.ymin = -2.5e-7 +qed_schwinger.zmax = 2.49e-7 diff --git a/Examples/Tests/radiation_reaction/CMakeLists.txt b/Examples/Tests/radiation_reaction/CMakeLists.txt new file mode 100644 index 00000000000..63814f30f29 --- /dev/null +++ b/Examples/Tests/radiation_reaction/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_radiation_reaction # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_radiation_reaction # inputs + analysis.py # analysis + diags/diag1000064 # output + OFF # dependency +) diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py b/Examples/Tests/radiation_reaction/analysis.py similarity index 100% rename from Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py rename to Examples/Tests/radiation_reaction/analysis.py diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d b/Examples/Tests/radiation_reaction/inputs_test_3d_radiation_reaction similarity index 100% rename from Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d rename to Examples/Tests/radiation_reaction/inputs_test_3d_radiation_reaction diff --git a/Examples/Tests/reduced_diags/CMakeLists.txt b/Examples/Tests/reduced_diags/CMakeLists.txt new file mode 100644 index 00000000000..a09d5403270 --- /dev/null +++ b/Examples/Tests/reduced_diags/CMakeLists.txt @@ -0,0 +1,59 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_reduced_diags # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_reduced_diags # inputs + analysis_reduced_diags.py # analysis + diags/diag1000200 # output + OFF # dependency +) + +add_warpx_test( + test_3d_reduced_diags_load_balance_costs_heuristic # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_reduced_diags_load_balance_costs_heuristic # inputs + analysis_reduced_diags_load_balance_costs.py # analysis + diags/diag1000003 # output + OFF # dependency +) + +add_warpx_test( + test_3d_reduced_diags_load_balance_costs_timers # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_reduced_diags_load_balance_costs_timers # inputs + analysis_reduced_diags_load_balance_costs.py # analysis + diags/diag1000003 # output + OFF # dependency +) + +add_warpx_test( + test_3d_reduced_diags_load_balance_costs_timers_picmi # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py # inputs + analysis_reduced_diags_load_balance_costs.py # analysis + diags/diag1000003 # output + OFF # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_3d_reduced_diags_load_balance_costs_timers_psatd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd # inputs + analysis_reduced_diags_load_balance_costs.py # analysis + diags/diag1000003 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py similarity index 92% rename from Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py rename to Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py index 0494b84b0d8..05f696e2fe6 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py @@ -17,6 +17,8 @@ # Possible running time: ~ 1 s +import os +import re import sys import numpy as np @@ -75,5 +77,8 @@ def get_efficiency(i): # than non-load balanced case assert efficiency_before < efficiency_after -test_name = "reduced_diags_loadbalancecosts_timers" +# The PICMI and native input versions run the same test, so +# their results are compared to the same benchmark file +test_name = os.path.split(os.getcwd())[1] +test_name = re.sub("_picmi", "", test_name) checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/reduced_diags/inputs_loadbalancecosts b/Examples/Tests/reduced_diags/inputs_base_3d similarity index 100% rename from Examples/Tests/reduced_diags/inputs_loadbalancecosts rename to Examples/Tests/reduced_diags/inputs_base_3d diff --git a/Examples/Tests/reduced_diags/inputs b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags similarity index 100% rename from Examples/Tests/reduced_diags/inputs rename to Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags diff --git a/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_heuristic b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_heuristic new file mode 100644 index 00000000000..18777d5a1fa --- /dev/null +++ b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_heuristic @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.load_balance_costs_update = Heuristic diff --git a/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers new file mode 100644 index 00000000000..7d8586cd913 --- /dev/null +++ b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.load_balance_costs_update = Timers diff --git a/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py similarity index 93% rename from Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py rename to Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py index 73050b910d8..d1dc6935bb7 100644 --- a/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py +++ b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py @@ -77,16 +77,12 @@ period=3, species=[electrons], data_list=["ux", "uy", "uz", "x", "y", "z", "weighting"], - write_dir=".", - warpx_file_prefix="Python_reduced_diags_loadbalancecosts_timers_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=3, data_list=["Bx", "By", "Bz", "Ex", "Ey", "Ez", "Jx", "Jy", "Jz"], - write_dir=".", - warpx_file_prefix="Python_reduced_diags_loadbalancecosts_timers_plt", ) # Set up simulation @@ -98,6 +94,7 @@ warpx_current_deposition_algo="esirkepov", warpx_field_gathering_algo="energy-conserving", warpx_load_balance_intervals=2, + warpx_load_balance_costs_update="timers", ) # Add species diff --git a/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd new file mode 100644 index 00000000000..7d8586cd913 --- /dev/null +++ b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.load_balance_costs_update = Timers diff --git a/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt new file mode 100644 index 00000000000..9ee2a63d2d2 --- /dev/null +++ b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_relativistic_space_charge_initialization # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_relativistic_space_charge_initialization # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/relativistic_space_charge_initialization/inputs_3d b/Examples/Tests/relativistic_space_charge_initialization/inputs_test_3d_relativistic_space_charge_initialization similarity index 100% rename from Examples/Tests/relativistic_space_charge_initialization/inputs_3d rename to Examples/Tests/relativistic_space_charge_initialization/inputs_test_3d_relativistic_space_charge_initialization diff --git a/Examples/Tests/repelling_particles/CMakeLists.txt b/Examples/Tests/repelling_particles/CMakeLists.txt new file mode 100644 index 00000000000..ed662b67332 --- /dev/null +++ b/Examples/Tests/repelling_particles/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_repelling_particles # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_repelling_particles # inputs + analysis.py # analysis + diags/diag1000200 # output + OFF # dependency +) diff --git a/Examples/Tests/repelling_particles/analysis_repelling.py b/Examples/Tests/repelling_particles/analysis.py similarity index 100% rename from Examples/Tests/repelling_particles/analysis_repelling.py rename to Examples/Tests/repelling_particles/analysis.py diff --git a/Examples/Tests/repelling_particles/inputs_2d b/Examples/Tests/repelling_particles/inputs_test_2d_repelling_particles similarity index 100% rename from Examples/Tests/repelling_particles/inputs_2d rename to Examples/Tests/repelling_particles/inputs_test_2d_repelling_particles diff --git a/Examples/Tests/resampling/CMakeLists.txt b/Examples/Tests/resampling/CMakeLists.txt new file mode 100644 index 00000000000..10d51e0ea47 --- /dev/null +++ b/Examples/Tests/resampling/CMakeLists.txt @@ -0,0 +1,35 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_resample_velocity_coincidence_thinning # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_resample_velocity_coincidence_thinning # inputs + analysis_default_regression.py # analysis + diags/diag1000004 # output + OFF # dependency +) + +add_warpx_test( + test_1d_resample_velocity_coincidence_thinning_cartesian # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_resample_velocity_coincidence_thinning_cartesian # inputs + analysis_default_regression.py # analysis + diags/diag1000004 # output + OFF # dependency +) + +add_warpx_test( + test_2d_leveling_thinning # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_leveling_thinning # inputs + analysis.py # analysis + diags/diag1000008 # output + OFF # dependency +) diff --git a/Examples/Tests/resampling/analysis_leveling_thinning.py b/Examples/Tests/resampling/analysis.py similarity index 100% rename from Examples/Tests/resampling/analysis_leveling_thinning.py rename to Examples/Tests/resampling/analysis.py diff --git a/Examples/Tests/resampling/analysis_default_regression.py b/Examples/Tests/resampling/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/resampling/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning b/Examples/Tests/resampling/inputs_test_1d_resample_velocity_coincidence_thinning similarity index 100% rename from Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning rename to Examples/Tests/resampling/inputs_test_1d_resample_velocity_coincidence_thinning diff --git a/Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning_cartesian b/Examples/Tests/resampling/inputs_test_1d_resample_velocity_coincidence_thinning_cartesian similarity index 100% rename from Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning_cartesian rename to Examples/Tests/resampling/inputs_test_1d_resample_velocity_coincidence_thinning_cartesian diff --git a/Examples/Tests/resampling/inputs_leveling_thinning b/Examples/Tests/resampling/inputs_test_2d_leveling_thinning similarity index 100% rename from Examples/Tests/resampling/inputs_leveling_thinning rename to Examples/Tests/resampling/inputs_test_2d_leveling_thinning diff --git a/Examples/Tests/restart/CMakeLists.txt b/Examples/Tests/restart/CMakeLists.txt new file mode 100644 index 00000000000..33770495dc6 --- /dev/null +++ b/Examples/Tests/restart/CMakeLists.txt @@ -0,0 +1,115 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_id_cpu_read_picmi # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_id_cpu_read_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +# TODO +# - Add checksums file +# - Enable analysis +add_warpx_test( + test_2d_runtime_components_picmi # name + 2 # dims + 1 # nprocs + OFF # eb + inputs_test_2d_runtime_components_picmi.py # inputs + OFF #analysis_default_regression.py # analysis + OFF #diags/diag1000010 # output + OFF # dependency +) + +# TODO +# - Add checksums file +# - Enable analysis +add_warpx_test( + test_2d_runtime_components_picmi_restart # name + 2 # dims + 1 # nprocs + OFF # eb + "inputs_test_2d_runtime_components_picmi.py amr.restart='../test_2d_runtime_components_picmi/diags/chk000005'" # inputs + OFF #analysis_default_restart.py # analysis + OFF #diags/diag1000010 # output + test_2d_runtime_components_picmi # dependency +) + +add_warpx_test( + test_3d_acceleration # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_3d_acceleration_restart # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000010 # output + test_3d_acceleration # dependency +) + +if(WarpX_FFT) + add_warpx_test( + test_3d_acceleration_psatd # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration_psatd # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_acceleration_psatd_restart # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration_psatd_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000010 # output + test_3d_acceleration_psatd # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_acceleration_psatd_time_avg # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration_psatd_time_avg # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_acceleration_psatd_time_avg_restart # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_acceleration_psatd_time_avg_restart # inputs + analysis_default_restart.py # analysis + diags/diag1000010 # output + test_3d_acceleration_psatd_time_avg # dependency + ) +endif() diff --git a/Examples/Tests/restart/analysis_default_regression.py b/Examples/Tests/restart/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/restart/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/restart/analysis_default_restart.py b/Examples/Tests/restart/analysis_default_restart.py new file mode 120000 index 00000000000..0459986eebc --- /dev/null +++ b/Examples/Tests/restart/analysis_default_restart.py @@ -0,0 +1 @@ +../../analysis_default_restart.py \ No newline at end of file diff --git a/Examples/Tests/restart/inputs b/Examples/Tests/restart/inputs_base_3d similarity index 99% rename from Examples/Tests/restart/inputs rename to Examples/Tests/restart/inputs_base_3d index f6aef120466..6724bae64b3 100644 --- a/Examples/Tests/restart/inputs +++ b/Examples/Tests/restart/inputs_base_3d @@ -1,7 +1,6 @@ ################################# ####### GENERAL PARAMETERS ###### ################################# -#amr.restart = diags/chk00005/ max_step = 10 amr.n_cell = 32 32 256 amr.max_grid_size = 64 diff --git a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py b/Examples/Tests/restart/inputs_test_2d_id_cpu_read_picmi.py similarity index 95% rename from Examples/Tests/restart/PICMI_inputs_id_cpu_read.py rename to Examples/Tests/restart/inputs_test_2d_id_cpu_read_picmi.py index 8c2be7b8750..be6e621653f 100755 --- a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py +++ b/Examples/Tests/restart/inputs_test_2d_id_cpu_read_picmi.py @@ -65,16 +65,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix="Python_restart_runtime_components_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10, data_list=["phi"], - write_dir=".", - warpx_file_prefix="Python_restart_runtime_components_plt", ) checkpoint = picmi.Checkpoint( diff --git a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py b/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py similarity index 91% rename from Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py rename to Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py index 3061a3c1ff6..e90bfd266a7 100755 --- a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py +++ b/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py @@ -66,25 +66,15 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix="Python_restart_runtime_components_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=10, data_list=["phi"], - write_dir=".", - warpx_file_prefix="Python_restart_runtime_components_plt", ) -checkpoint = picmi.Checkpoint( - name="chkpoint", - period=5, - write_dir=".", - warpx_file_min_digits=5, - warpx_file_prefix="Python_restart_runtime_components_chk", -) +checkpoint = picmi.Checkpoint(name="chk", period=5) ########################## # simulation setup diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration b/Examples/Tests/restart/inputs_test_3d_acceleration new file mode 100644 index 00000000000..7665a846eef --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration @@ -0,0 +1,2 @@ +# base input parameters +FILE = inputs_base_3d diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration_psatd b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd new file mode 100644 index 00000000000..1f4e258b964 --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.maxwell_solver = psatd +boundary.field_hi = periodic periodic damped +boundary.field_lo = periodic periodic damped +particles.use_fdtd_nci_corr = 0 +psatd.current_correction = 0 +psatd.use_default_v_galilean = 1 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_restart b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_restart new file mode 100644 index 00000000000..ac2a354dcb9 --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_3d_acceleration_psatd + +# test input parameters +amr.restart = "../test_3d_acceleration_psatd/diags/chk000005" diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg new file mode 100644 index 00000000000..d9625a7f058 --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg @@ -0,0 +1,12 @@ +# base input parameters +FILE = inputs_base_3d + +# test input parameters +algo.maxwell_solver = psatd +boundary.field_hi = periodic periodic damped +boundary.field_lo = periodic periodic damped +particles.use_fdtd_nci_corr = 0 +psatd.current_correction = 0 +psatd.do_time_averaging = 1 +psatd.use_default_v_galilean = 1 +warpx.abort_on_warning_threshold = medium diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg_restart b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg_restart new file mode 100644 index 00000000000..44956c2259b --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration_psatd_time_avg_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_3d_acceleration_psatd_time_avg + +# test input parameters +amr.restart = "../test_3d_acceleration_psatd_time_avg/diags/chk000005" diff --git a/Examples/Tests/restart/inputs_test_3d_acceleration_restart b/Examples/Tests/restart/inputs_test_3d_acceleration_restart new file mode 100644 index 00000000000..320224f6c16 --- /dev/null +++ b/Examples/Tests/restart/inputs_test_3d_acceleration_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_3d_acceleration + +# test input parameters +amr.restart = "../test_3d_acceleration/diags/chk000005" diff --git a/Examples/Tests/restart_eb/CMakeLists.txt b/Examples/Tests/restart_eb/CMakeLists.txt new file mode 100644 index 00000000000..54d1d3ea574 --- /dev/null +++ b/Examples/Tests/restart_eb/CMakeLists.txt @@ -0,0 +1,29 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_eb_picmi # name + 3 # dims + 1 # nprocs + ON # eb + inputs_test_3d_eb_picmi.py # inputs + analysis_default_regression.py # analysis + diags/diag1000060 # output + OFF # dependency + ) +endif() + +# FIXME +#if(WarpX_EB) +# add_warpx_test( +# test_3d_eb_picmi_restart # name +# 3 # dims +# 1 # nprocs +# ON # eb +# "inputs_test_3d_eb_picmi.py amr.restart='../test_3d_eb_picmi/diags/chk000030'" # inputs +# analysis_default_restart.py # analysis +# diags/diag1000060 # output +# test_3d_eb_picmi # dependency +# ) +#endif() diff --git a/Examples/Tests/restart_eb/analysis_default_regression.py b/Examples/Tests/restart_eb/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/restart_eb/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/restart_eb/analysis_default_restart.py b/Examples/Tests/restart_eb/analysis_default_restart.py new file mode 120000 index 00000000000..0459986eebc --- /dev/null +++ b/Examples/Tests/restart_eb/analysis_default_restart.py @@ -0,0 +1 @@ +../../analysis_default_restart.py \ No newline at end of file diff --git a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py b/Examples/Tests/restart_eb/inputs_test_3d_eb_picmi.py similarity index 92% rename from Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py rename to Examples/Tests/restart_eb/inputs_test_3d_eb_picmi.py index 0cfd0bcff5f..0f701ba999b 100755 --- a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py +++ b/Examples/Tests/restart_eb/inputs_test_3d_eb_picmi.py @@ -80,24 +80,17 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=diagnostic_intervals, - write_dir=".", - warpx_file_prefix="Python_restart_eb_plt", ) field_diag = picmi.FieldDiagnostic( name="diag1", grid=grid, period=diagnostic_intervals, data_list=["Ex", "Ey", "Ez", "Bx", "By", "Bz"], - write_dir=".", - warpx_file_prefix="Python_restart_eb_plt", ) checkpoint = picmi.Checkpoint( - name="chkpoint", + name="chk", period=diagnostic_intervals, - write_dir=".", - warpx_file_min_digits=5, - warpx_file_prefix="Python_restart_eb_chk", ) ########################## diff --git a/Examples/Tests/rigid_injection/CMakeLists.txt b/Examples/Tests/rigid_injection/CMakeLists.txt new file mode 100644 index 00000000000..210cc86418f --- /dev/null +++ b/Examples/Tests/rigid_injection/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_rigid_injection_btd # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_rigid_injection_btd # inputs + analysis_rigid_injection_btd.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_2d_rigid_injection_lab # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_rigid_injection_lab # inputs + analysis_rigid_injection_lab.py # analysis + diags/diag1000289 # output + OFF # dependency +) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py similarity index 100% rename from Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py rename to Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py similarity index 100% rename from Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py rename to Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py diff --git a/Examples/Tests/rigid_injection/inputs_2d_BoostedFrame b/Examples/Tests/rigid_injection/inputs_test_2d_rigid_injection_btd similarity index 100% rename from Examples/Tests/rigid_injection/inputs_2d_BoostedFrame rename to Examples/Tests/rigid_injection/inputs_test_2d_rigid_injection_btd diff --git a/Examples/Tests/rigid_injection/inputs_2d_LabFrame b/Examples/Tests/rigid_injection/inputs_test_2d_rigid_injection_lab similarity index 100% rename from Examples/Tests/rigid_injection/inputs_2d_LabFrame rename to Examples/Tests/rigid_injection/inputs_test_2d_rigid_injection_lab diff --git a/Examples/Tests/scraping/CMakeLists.txt b/Examples/Tests/scraping/CMakeLists.txt new file mode 100644 index 00000000000..94ec04e35d7 --- /dev/null +++ b/Examples/Tests/scraping/CMakeLists.txt @@ -0,0 +1,28 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_rz_scraping # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_scraping # inputs + analysis_rz.py # analysis + diags/diag1000037 # output + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_scraping_filter # name + RZ # dims + 2 # nprocs + ON # eb + inputs_test_rz_scraping_filter # inputs + analysis_rz_filter.py # analysis + diags/diag1000037 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/scraping/inputs_rz b/Examples/Tests/scraping/inputs_test_rz_scraping similarity index 97% rename from Examples/Tests/scraping/inputs_rz rename to Examples/Tests/scraping/inputs_test_rz_scraping index 0dab9ebedd2..b332de2229a 100644 --- a/Examples/Tests/scraping/inputs_rz +++ b/Examples/Tests/scraping/inputs_test_rz_scraping @@ -21,6 +21,7 @@ boundary.potential_hi_y = 0 boundary.potential_lo_z = 0 boundary.potential_hi_z = 0 +warpx.abort_on_warning_threshold = medium warpx.const_dt = 1.216119097e-11 warpx.eb_implicit_function = "-(x**2-0.1**2)" diff --git a/Examples/Tests/scraping/inputs_rz_filter b/Examples/Tests/scraping/inputs_test_rz_scraping_filter similarity index 97% rename from Examples/Tests/scraping/inputs_rz_filter rename to Examples/Tests/scraping/inputs_test_rz_scraping_filter index 0d67fb96f6c..3a3ab78a226 100644 --- a/Examples/Tests/scraping/inputs_rz_filter +++ b/Examples/Tests/scraping/inputs_test_rz_scraping_filter @@ -21,6 +21,7 @@ boundary.potential_hi_y = 0 boundary.potential_lo_z = 0 boundary.potential_hi_z = 0 +warpx.abort_on_warning_threshold = medium warpx.const_dt = 1.216119097e-11 warpx.eb_implicit_function = "-(x**2-0.1**2)" diff --git a/Examples/Tests/silver_mueller/CMakeLists.txt b/Examples/Tests/silver_mueller/CMakeLists.txt new file mode 100644 index 00000000000..5b9cd278ef0 --- /dev/null +++ b/Examples/Tests/silver_mueller/CMakeLists.txt @@ -0,0 +1,46 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_silver_mueller # name + 1 # dims + 2 # nprocs + OFF # eb + inputs_test_1d_silver_mueller # inputs + analysis.py # analysis + diags/diag1000500 # output + OFF # dependency +) + +add_warpx_test( + test_2d_silver_mueller_x # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_silver_mueller_x # inputs + analysis.py # analysis + diags/diag1000500 # output + OFF # dependency +) + +add_warpx_test( + test_2d_silver_mueller_z # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_silver_mueller_z # inputs + analysis.py # analysis + diags/diag1000500 # output + OFF # dependency +) + +add_warpx_test( + test_rz_silver_mueller_z # name + RZ # dims + 2 # nprocs + OFF # eb + inputs_test_rz_silver_mueller_z # inputs + analysis.py # analysis + diags/diag1000500 # output + OFF # dependency +) diff --git a/Examples/Tests/silver_mueller/analysis_silver_mueller.py b/Examples/Tests/silver_mueller/analysis.py similarity index 100% rename from Examples/Tests/silver_mueller/analysis_silver_mueller.py rename to Examples/Tests/silver_mueller/analysis.py diff --git a/Examples/Tests/silver_mueller/inputs_1d b/Examples/Tests/silver_mueller/inputs_test_1d_silver_mueller similarity index 100% rename from Examples/Tests/silver_mueller/inputs_1d rename to Examples/Tests/silver_mueller/inputs_test_1d_silver_mueller diff --git a/Examples/Tests/silver_mueller/inputs_2d_x b/Examples/Tests/silver_mueller/inputs_test_2d_silver_mueller_x similarity index 100% rename from Examples/Tests/silver_mueller/inputs_2d_x rename to Examples/Tests/silver_mueller/inputs_test_2d_silver_mueller_x diff --git a/Examples/Tests/silver_mueller/inputs_2d_z b/Examples/Tests/silver_mueller/inputs_test_2d_silver_mueller_z similarity index 100% rename from Examples/Tests/silver_mueller/inputs_2d_z rename to Examples/Tests/silver_mueller/inputs_test_2d_silver_mueller_z diff --git a/Examples/Tests/silver_mueller/inputs_rz_z b/Examples/Tests/silver_mueller/inputs_test_rz_silver_mueller_z similarity index 100% rename from Examples/Tests/silver_mueller/inputs_rz_z rename to Examples/Tests/silver_mueller/inputs_test_rz_silver_mueller_z diff --git a/Examples/Tests/single_particle/CMakeLists.txt b/Examples/Tests/single_particle/CMakeLists.txt new file mode 100644 index 00000000000..b2719bee681 --- /dev/null +++ b/Examples/Tests/single_particle/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_bilinear_filter # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_bilinear_filter # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/single_particle/analysis_bilinear_filter.py b/Examples/Tests/single_particle/analysis.py similarity index 100% rename from Examples/Tests/single_particle/analysis_bilinear_filter.py rename to Examples/Tests/single_particle/analysis.py diff --git a/Examples/Tests/single_particle/inputs_2d b/Examples/Tests/single_particle/inputs_test_2d_bilinear_filter similarity index 93% rename from Examples/Tests/single_particle/inputs_2d rename to Examples/Tests/single_particle/inputs_test_2d_bilinear_filter index 71ca2101c22..6f8eb6fdcea 100644 --- a/Examples/Tests/single_particle/inputs_2d +++ b/Examples/Tests/single_particle/inputs_test_2d_bilinear_filter @@ -14,6 +14,8 @@ boundary.field_hi = pec pec algo.charge_deposition = standard algo.field_gathering = energy-conserving warpx.cfl = 1.0 +warpx.use_filter = 1 +warpx.filter_npass_each_dir = 1 5 # Order of particle shape factors algo.particle_shape = 1 diff --git a/Examples/Tests/space_charge_initialization/CMakeLists.txt b/Examples/Tests/space_charge_initialization/CMakeLists.txt new file mode 100644 index 00000000000..af07d677775 --- /dev/null +++ b/Examples/Tests/space_charge_initialization/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_space_charge_initialization # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_space_charge_initialization # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) + +add_warpx_test( + test_3d_space_charge_initialization # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_space_charge_initialization # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency +) diff --git a/Examples/Tests/space_charge_initialization/inputs_test_2d_space_charge_initialization b/Examples/Tests/space_charge_initialization/inputs_test_2d_space_charge_initialization new file mode 100644 index 00000000000..4445217225c --- /dev/null +++ b/Examples/Tests/space_charge_initialization/inputs_test_2d_space_charge_initialization @@ -0,0 +1,37 @@ +max_step = 1 +amr.n_cell = 128 128 +amr.max_grid_size = 32 +amr.max_level = 0 + +geometry.dims = 3 +boundary.field_lo = pec pec +boundary.field_hi = pec pec +geometry.prob_lo = -50.e-6 -50.e-6 +geometry.prob_hi = 50.e-6 50.e-6 +geometry.dims = 2 + +warpx.cfl = 1.e-3 + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = beam +beam.charge = -q_e +beam.mass = m_e +beam.injection_style = "gaussian_beam" +beam.initialize_self_fields = 1 +beam.x_rms = 2.e-6 +beam.y_rms = 2.e-6 +beam.z_rms = 2.e-6 +beam.x_m = 0. +beam.y_m = 0. +beam.z_m = 0.e-6 +beam.npart = 20000 +beam.q_tot = -1.e-20 +beam.momentum_distribution_type = "at_rest" + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez jx jy jz diff --git a/Examples/Tests/space_charge_initialization/inputs_3d b/Examples/Tests/space_charge_initialization/inputs_test_3d_space_charge_initialization similarity index 97% rename from Examples/Tests/space_charge_initialization/inputs_3d rename to Examples/Tests/space_charge_initialization/inputs_test_3d_space_charge_initialization index c8058fac519..d7a9f42fa70 100644 --- a/Examples/Tests/space_charge_initialization/inputs_3d +++ b/Examples/Tests/space_charge_initialization/inputs_test_3d_space_charge_initialization @@ -8,6 +8,7 @@ boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec geometry.prob_lo = -50.e-6 -50.e-6 -50.e-6 geometry.prob_hi = 50.e-6 50.e-6 50.e-6 +geometry.dims = 3 warpx.cfl = 1.e-3 diff --git a/Examples/Tests/subcycling/CMakeLists.txt b/Examples/Tests/subcycling/CMakeLists.txt new file mode 100644 index 00000000000..ccea031f5a4 --- /dev/null +++ b/Examples/Tests/subcycling/CMakeLists.txt @@ -0,0 +1,13 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_2d_subcycling_mr # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_subcycling_mr # inputs + analysis_default_regression.py # analysis + diags/diag1000250 # output + OFF # dependency +) diff --git a/Examples/Tests/subcycling/analysis_default_regression.py b/Examples/Tests/subcycling/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/subcycling/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/subcycling/inputs_2d b/Examples/Tests/subcycling/inputs_test_2d_subcycling_mr similarity index 100% rename from Examples/Tests/subcycling/inputs_2d rename to Examples/Tests/subcycling/inputs_test_2d_subcycling_mr diff --git a/Examples/Tests/vay_deposition/CMakeLists.txt b/Examples/Tests/vay_deposition/CMakeLists.txt new file mode 100644 index 00000000000..9ebe4ec0dba --- /dev/null +++ b/Examples/Tests/vay_deposition/CMakeLists.txt @@ -0,0 +1,28 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_FFT) + add_warpx_test( + test_2d_vay_deposition # name + 2 # dims + 2 # nprocs + OFF # eb + inputs_test_2d_vay_deposition # inputs + analysis.py # analysis + diags/diag1000050 # output + OFF # dependency + ) +endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_vay_deposition # name + 3 # dims + 2 # nprocs + OFF # eb + inputs_test_3d_vay_deposition # inputs + analysis.py # analysis + diags/diag1000025 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/vay_deposition/inputs_2d b/Examples/Tests/vay_deposition/inputs_test_2d_vay_deposition similarity index 100% rename from Examples/Tests/vay_deposition/inputs_2d rename to Examples/Tests/vay_deposition/inputs_test_2d_vay_deposition diff --git a/Examples/Tests/vay_deposition/inputs_3d b/Examples/Tests/vay_deposition/inputs_test_3d_vay_deposition similarity index 100% rename from Examples/Tests/vay_deposition/inputs_3d rename to Examples/Tests/vay_deposition/inputs_test_3d_vay_deposition diff --git a/Examples/analysis_default_regression.py b/Examples/analysis_default_regression.py index 5e1e88ee28b..519bbeeea64 100755 --- a/Examples/analysis_default_regression.py +++ b/Examples/analysis_default_regression.py @@ -17,4 +17,5 @@ if re.search("single_precision", fn): checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6) else: + # using default relative tolerance checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/analysis_default_restart.py b/Examples/analysis_default_restart.py index 30491ad59e9..55bab253dbc 100755 --- a/Examples/analysis_default_restart.py +++ b/Examples/analysis_default_restart.py @@ -1,8 +1,14 @@ #!/usr/bin/env python3 +import os +import sys + import numpy as np import yt +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +import checksumAPI + def check_restart(filename, tolerance=1e-12): """ @@ -31,7 +37,7 @@ def check_restart(filename, tolerance=1e-12): ) # Load output data generated from initial run - benchmark = "orig_" + filename + benchmark = os.path.join(os.getcwd().replace("_restart", ""), filename) ds_benchmark = yt.load(benchmark) # yt 4.0+ has rounding issues with our domain data: @@ -48,7 +54,7 @@ def check_restart(filename, tolerance=1e-12): # Loop over all fields (all particle species, all particle attributes, all grid fields) # and compare output data generated from initial run with output data generated after restart - print("\ntolerance = {:g}".format(tolerance)) + print(f"\ntolerance = {tolerance}") print() for field in ds_benchmark.field_list: dr = ad_restart[field].squeeze().v @@ -56,6 +62,17 @@ def check_restart(filename, tolerance=1e-12): error = np.amax(np.abs(dr - db)) if np.amax(np.abs(db)) != 0.0: error /= np.amax(np.abs(db)) - print("field: {}; error = {:g}".format(field, error)) + print(f"field: {field}; error = {error}") assert error < tolerance print() + + +filename = sys.argv[1] + +# compare restart results against original results +check_restart(filename) + +# compare restart checksums against original checksums +testname = os.path.split(os.getcwd())[1] +testname = testname.replace("_restart", "") +checksumAPI.evaluate_checksum(testname, filename, rtol=1e-12) diff --git a/Regression/Checksum/benchmarks_json/LaserIonAcc3d.json b/Regression/Checksum/benchmarks_json/LaserIonAcc3d.json deleted file mode 100644 index de5472105d9..00000000000 --- a/Regression/Checksum/benchmarks_json/LaserIonAcc3d.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "electrons": { - "particle_momentum_x": 1.6966182372218133e-16, - "particle_momentum_y": 2.6850066145197374e-17, - "particle_momentum_z": 2.0052710316284176e-16, - "particle_position_x": 0.3393352015355679, - "particle_position_y": 1.1078675395554147, - "particle_position_z": 0.3419438867441836, - "particle_weight": 26433181926540.81 - }, - "hydrogen": { - "particle_momentum_x": 1.7161831722699107e-16, - "particle_momentum_y": 4.9196233343263506e-17, - "particle_momentum_z": 2.1370961936359413e-16, - "particle_position_x": 0.3375134789944616, - "particle_position_y": 1.1080021730384098, - "particle_position_z": 0.33939049172256086, - "particle_weight": 26441597005520.95 - }, - "lev=0": { - "Bx": 41555976.87146437, - "By": 175750876.1712573, - "Bz": 35156983.723599546, - "Ex": 3.872657491899755e+17, - "Ey": 3.3815796095277564e+16, - "Ez": 3.937276394651024e+17, - "jx": 3.5072653955241413e+21, - "jy": 4.011484251839508e+20, - "jz": 3.787151010057889e+21, - "rho": 7429502184315.598 - } -} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/TwoParticle_electrostatic.json b/Regression/Checksum/benchmarks_json/TwoParticle_electrostatic.json deleted file mode 100644 index aaf04f8a74c..00000000000 --- a/Regression/Checksum/benchmarks_json/TwoParticle_electrostatic.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "electron1": { - "particle_momentum_x": 3.346088201352191e-29, - "particle_momentum_y": 3.346088199932699e-29, - "particle_momentum_z": 3.3460881978884573e-29, - "particle_position_x": 0.1545496421786394, - "particle_position_y": 0.15454964213891717, - "particle_position_z": 0.15454964208395047, - "particle_weight": 1.0 - }, - "electron2": { - "particle_momentum_x": 3.346088199424244e-29, - "particle_momentum_y": 3.346088202432085e-29, - "particle_momentum_z": 3.346088202108714e-29, - "particle_position_x": 0.15454964215048347, - "particle_position_y": 0.15454964222866666, - "particle_position_z": 0.15454964222208387, - "particle_weight": 1.0 - }, - "lev=0": { - "Ex": 7.101527209952963e-05, - "Ey": 7.10152721046017e-05, - "Ez": 7.101527211163835e-05, - "rho": 1.3125030985727997e-15 - } -} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/collisionZ.json b/Regression/Checksum/benchmarks_json/test_1d_collision_z.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collisionZ.json rename to Regression/Checksum/benchmarks_json/test_1d_collision_z.json diff --git a/Regression/Checksum/benchmarks_json/Python_dsmc_1d.json b/Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_dsmc_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_fluid_1D.json b/Regression/Checksum/benchmarks_json/test_1d_langmuir_fluid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_fluid_1D.json rename to Regression/Checksum/benchmarks_json/test_1d_langmuir_fluid.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json b/Regression/Checksum/benchmarks_json/test_1d_langmuir_multi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_langmuir_multi.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_acceleration.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d_fluid.json b/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration_1d_fluid.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d_fluid_boosted.json b/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration_1d_fluid_boosted.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid_boosted.json diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_picmi.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjection_1d.json b/Regression/Checksum/benchmarks_json/test_1d_laser_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjection_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_injection.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_1d.json b/Regression/Checksum/benchmarks_json/test_1d_laser_injection_from_lasy_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_injection_from_lasy_file.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_1d_boost.json b/Regression/Checksum/benchmarks_json/test_1d_laser_injection_from_lasy_file_boost.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_1d_boost.json rename to Regression/Checksum/benchmarks_json/test_1d_laser_injection_from_lasy_file_boost.json diff --git a/Regression/Checksum/benchmarks_json/Python_ohms_law_solver_EM_modes_1d.json b/Regression/Checksum/benchmarks_json/test_1d_ohm_solver_em_modes_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ohms_law_solver_EM_modes_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_ohm_solver_em_modes_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_ohms_law_solver_ion_beam_1d.json b/Regression/Checksum/benchmarks_json/test_1d_ohm_solver_ion_beam_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ohms_law_solver_ion_beam_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_ohm_solver_ion_beam_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json b/Regression/Checksum/benchmarks_json/test_1d_plasma_acceleration_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json rename to Regression/Checksum/benchmarks_json/test_1d_plasma_acceleration_picmi.json diff --git a/Regression/Checksum/benchmarks_json/resample_velocity_coincidence_thinning.json b/Regression/Checksum/benchmarks_json/test_1d_resample_velocity_coincidence_thinning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/resample_velocity_coincidence_thinning.json rename to Regression/Checksum/benchmarks_json/test_1d_resample_velocity_coincidence_thinning.json diff --git a/Regression/Checksum/benchmarks_json/resample_velocity_coincidence_thinning_cartesian.json b/Regression/Checksum/benchmarks_json/test_1d_resample_velocity_coincidence_thinning_cartesian.json similarity index 100% rename from Regression/Checksum/benchmarks_json/resample_velocity_coincidence_thinning_cartesian.json rename to Regression/Checksum/benchmarks_json/test_1d_resample_velocity_coincidence_thinning_cartesian.json diff --git a/Regression/Checksum/benchmarks_json/SemiImplicitPicard_1d.json b/Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json similarity index 100% rename from Regression/Checksum/benchmarks_json/SemiImplicitPicard_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json diff --git a/Regression/Checksum/benchmarks_json/silver_mueller_1d.json b/Regression/Checksum/benchmarks_json/test_1d_silver_mueller.json similarity index 100% rename from Regression/Checksum/benchmarks_json/silver_mueller_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_silver_mueller.json diff --git a/Regression/Checksum/benchmarks_json/ThetaImplicitPicard_1d.json b/Regression/Checksum/benchmarks_json/test_1d_theta_implicit_picard.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ThetaImplicitPicard_1d.json rename to Regression/Checksum/benchmarks_json/test_1d_theta_implicit_picard.json diff --git a/Regression/Checksum/benchmarks_json/averaged_galilean_2d_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_averaged_galilean_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/averaged_galilean_2d_psatd.json rename to Regression/Checksum/benchmarks_json/test_2d_averaged_galilean_psatd.json diff --git a/Regression/Checksum/benchmarks_json/averaged_galilean_2d_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_2d_averaged_galilean_psatd_hybrid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/averaged_galilean_2d_psatd_hybrid.json rename to Regression/Checksum/benchmarks_json/test_2d_averaged_galilean_psatd_hybrid.json diff --git a/Regression/Checksum/benchmarks_json/background_mcc.json b/Regression/Checksum/benchmarks_json/test_2d_background_mcc.json similarity index 100% rename from Regression/Checksum/benchmarks_json/background_mcc.json rename to Regression/Checksum/benchmarks_json/test_2d_background_mcc.json diff --git a/Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json b/Regression/Checksum/benchmarks_json/test_2d_background_mcc_dp_psp.json similarity index 100% rename from Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json rename to Regression/Checksum/benchmarks_json/test_2d_background_mcc_dp_psp.json diff --git a/Regression/Checksum/benchmarks_json/bilinear_filter.json b/Regression/Checksum/benchmarks_json/test_2d_bilinear_filter.json similarity index 100% rename from Regression/Checksum/benchmarks_json/bilinear_filter.json rename to Regression/Checksum/benchmarks_json/test_2d_bilinear_filter.json diff --git a/Regression/Checksum/benchmarks_json/collisionXZ.json b/Regression/Checksum/benchmarks_json/test_2d_collision_xz.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collisionXZ.json rename to Regression/Checksum/benchmarks_json/test_2d_collision_xz.json diff --git a/Regression/Checksum/benchmarks_json/comoving_2d_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/comoving_2d_psatd_hybrid.json rename to Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json diff --git a/Regression/Checksum/benchmarks_json/dive_cleaning_2d.json b/Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/dive_cleaning_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_rotated_cube_2d.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_rotated_cube.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_boundary_rotated_cube_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_rotated_cube.json diff --git a/Regression/Checksum/benchmarks_json/embedded_circle.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_circle.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_circle.json rename to Regression/Checksum/benchmarks_json/test_2d_embedded_circle.json diff --git a/Regression/Checksum/benchmarks_json/EnergyConservingThermalPlasma.json b/Regression/Checksum/benchmarks_json/test_2d_energy_conserving_thermal_plasma.json similarity index 100% rename from Regression/Checksum/benchmarks_json/EnergyConservingThermalPlasma.json rename to Regression/Checksum/benchmarks_json/test_2d_energy_conserving_thermal_plasma.json diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_2d_psatd.json rename to Regression/Checksum/benchmarks_json/test_2d_galilean_psatd.json diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_current_correction_psb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json rename to Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_current_correction_psb.json diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_2d_psatd_hybrid.json rename to Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json diff --git a/Regression/Checksum/benchmarks_json/test_2d_id_cpu_read_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_id_cpu_read_picmi.json new file mode 100644 index 00000000000..a59b780ba38 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_id_cpu_read_picmi.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "phi": 0.001516261625969309 + }, + "electrons": { + "particle_momentum_x": 7.751654441658017e-26, + "particle_momentum_y": 6.938526597814195e-26, + "particle_momentum_z": 6.572520038890184e-26, + "particle_newPid": 500.0, + "particle_position_x": 1.4999588764815643, + "particle_position_y": 1.4999551809411737, + "particle_weight": 200.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/ionization_boost.json b/Regression/Checksum/benchmarks_json/test_2d_ionization_boost.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ionization_boost.json rename to Regression/Checksum/benchmarks_json/test_2d_ionization_boost.json diff --git a/Regression/Checksum/benchmarks_json/ionization_lab.json b/Regression/Checksum/benchmarks_json/test_2d_ionization_lab.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ionization_lab.json rename to Regression/Checksum/benchmarks_json/test_2d_ionization_lab.json diff --git a/Regression/Checksum/benchmarks_json/Python_ionization.json b/Regression/Checksum/benchmarks_json/test_2d_ionization_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ionization.json rename to Regression/Checksum/benchmarks_json/test_2d_ionization_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_fluid_2D.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_fluid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_fluid_2D.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_fluid.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_anisotropic.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_anisotropic.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_momentum_conserving.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_momentum_conserving.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_momentum_conserving.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_momentum_conserving.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_psatd.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_mr_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_nodal.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_nodal.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_Langmuir_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_current_correction_nodal.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_current_correction_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_current_correction_nodal.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_current_correction_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_momentum_conserving.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_momentum_conserving.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_momentum_conserving.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_momentum_conserving.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_multiJ.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_multiJ.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_multiJ_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_multiJ_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_nodal.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_nodal.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition_nodal.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition_nodal.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition_particle_shape_4.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_Vay_deposition_particle_shape_4.json rename to Regression/Checksum/benchmarks_json/test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4.json diff --git a/Regression/Checksum/benchmarks_json/Larmor.json b/Regression/Checksum/benchmarks_json/test_2d_larmor.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Larmor.json rename to Regression/Checksum/benchmarks_json/test_2d_larmor.json diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json b/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationMR.json b/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAccelerationMR.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_mr.json diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json b/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_mr_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LaserAccelerationMR.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_mr_picmi.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjection_2d.json b/Regression/Checksum/benchmarks_json/test_2d_laser_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjection_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_injection.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromBINARYFile.json b/Regression/Checksum/benchmarks_json/test_2d_laser_injection_from_binary_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromBINARYFile.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_injection_from_binary_file.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_2d.json b/Regression/Checksum/benchmarks_json/test_2d_laser_injection_from_lasy_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_injection_from_lasy_file.json diff --git a/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json b/Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserIonAcc2d.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc.json diff --git a/Regression/Checksum/benchmarks_json/LaserIonAcc2d_no_field_diag.json b/Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc_no_field_diag.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserIonAcc2d_no_field_diag.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc_no_field_diag.json diff --git a/Regression/Checksum/benchmarks_json/Python_LaserIonAcc2d.json b/Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LaserIonAcc2d.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_ion_acc_picmi.json diff --git a/Regression/Checksum/benchmarks_json/LaserOnFine.json b/Regression/Checksum/benchmarks_json/test_2d_laser_on_fine.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserOnFine.json rename to Regression/Checksum/benchmarks_json/test_2d_laser_on_fine.json diff --git a/Regression/Checksum/benchmarks_json/leveling_thinning.json b/Regression/Checksum/benchmarks_json/test_2d_leveling_thinning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/leveling_thinning.json rename to Regression/Checksum/benchmarks_json/test_2d_leveling_thinning.json diff --git a/Regression/Checksum/benchmarks_json/Maxwell_Hybrid_QED_solver.json b/Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Maxwell_Hybrid_QED_solver.json rename to Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json diff --git a/Regression/Checksum/benchmarks_json/nci_corrector.json b/Regression/Checksum/benchmarks_json/test_2d_nci_corrector.json similarity index 100% rename from Regression/Checksum/benchmarks_json/nci_corrector.json rename to Regression/Checksum/benchmarks_json/test_2d_nci_corrector.json diff --git a/Regression/Checksum/benchmarks_json/nci_correctorMR.json b/Regression/Checksum/benchmarks_json/test_2d_nci_corrector_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/nci_correctorMR.json rename to Regression/Checksum/benchmarks_json/test_2d_nci_corrector_mr.json diff --git a/Regression/Checksum/benchmarks_json/Python_ohms_law_solver_landau_damping_2d.json b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ohms_law_solver_landau_damping_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_ohms_law_solver_magnetic_reconnection_2d.json b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ohms_law_solver_magnetic_reconnection_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json diff --git a/Regression/Checksum/benchmarks_json/parabolic_channel_initialization_2d_single_precision.json b/Regression/Checksum/benchmarks_json/test_2d_parabolic_channel_initialization.json similarity index 100% rename from Regression/Checksum/benchmarks_json/parabolic_channel_initialization_2d_single_precision.json rename to Regression/Checksum/benchmarks_json/test_2d_parabolic_channel_initialization.json diff --git a/Regression/Checksum/benchmarks_json/particle_thermal_boundary.json b/Regression/Checksum/benchmarks_json/test_2d_particle_thermal_boundary.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_thermal_boundary.json rename to Regression/Checksum/benchmarks_json/test_2d_particle_thermal_boundary.json diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml_2d.json b/Regression/Checksum/benchmarks_json/test_2d_particles_in_pml.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particles_in_pml_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_particles_in_pml.json diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json b/Regression/Checksum/benchmarks_json/test_2d_particles_in_pml_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json rename to Regression/Checksum/benchmarks_json/test_2d_particles_in_pml_mr.json diff --git a/Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost2d.json b/Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost2d.json rename to Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_boosted.json diff --git a/Regression/Checksum/benchmarks_json/PlasmaAccelerationMR.json b/Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PlasmaAccelerationMR.json rename to Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_mr.json diff --git a/Regression/Checksum/benchmarks_json/momentum-conserving-gather.json b/Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_mr_momentum_conserving.json similarity index 100% rename from Regression/Checksum/benchmarks_json/momentum-conserving-gather.json rename to Regression/Checksum/benchmarks_json/test_2d_plasma_acceleration_mr_momentum_conserving.json diff --git a/Regression/Checksum/benchmarks_json/PlasmaMirror.json b/Regression/Checksum/benchmarks_json/test_2d_plasma_mirror.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PlasmaMirror.json rename to Regression/Checksum/benchmarks_json/test_2d_plasma_mirror.json diff --git a/Regression/Checksum/benchmarks_json/pml_x_ckc.json b/Regression/Checksum/benchmarks_json/test_2d_pml_x_ckc.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_x_ckc.json rename to Regression/Checksum/benchmarks_json/test_2d_pml_x_ckc.json diff --git a/Regression/Checksum/benchmarks_json/pml_x_galilean.json b/Regression/Checksum/benchmarks_json/test_2d_pml_x_galilean.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_x_galilean.json rename to Regression/Checksum/benchmarks_json/test_2d_pml_x_galilean.json diff --git a/Regression/Checksum/benchmarks_json/pml_x_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_pml_x_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_x_psatd.json rename to Regression/Checksum/benchmarks_json/test_2d_pml_x_psatd.json diff --git a/Regression/Checksum/benchmarks_json/pml_x_yee.json b/Regression/Checksum/benchmarks_json/test_2d_pml_x_yee.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_x_yee.json rename to Regression/Checksum/benchmarks_json/test_2d_pml_x_yee.json diff --git a/Regression/Checksum/benchmarks_json/pml_x_yee_eb.json b/Regression/Checksum/benchmarks_json/test_2d_pml_x_yee_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_x_yee_eb.json rename to Regression/Checksum/benchmarks_json/test_2d_pml_x_yee_eb.json diff --git a/Regression/Checksum/benchmarks_json/Python_prev_positions.json b/Regression/Checksum/benchmarks_json/test_2d_prev_positions_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_prev_positions.json rename to Regression/Checksum/benchmarks_json/test_2d_prev_positions_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Proton_Boron_Fusion_2D.json b/Regression/Checksum/benchmarks_json/test_2d_proton_boron_fusion.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Proton_Boron_Fusion_2D.json rename to Regression/Checksum/benchmarks_json/test_2d_proton_boron_fusion.json diff --git a/Regression/Checksum/benchmarks_json/Python_wrappers.json b/Regression/Checksum/benchmarks_json/test_2d_python_wrappers_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_wrappers.json rename to Regression/Checksum/benchmarks_json/test_2d_python_wrappers_picmi.json diff --git a/Regression/Checksum/benchmarks_json/qed_breit_wheeler_2d.json b/Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_breit_wheeler_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler.json diff --git a/Regression/Checksum/benchmarks_json/qed_quantum_sync_2d.json b/Regression/Checksum/benchmarks_json/test_2d_qed_quantum_sync.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_quantum_sync_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_qed_quantum_sync.json diff --git a/Regression/Checksum/benchmarks_json/RefinedInjection.json b/Regression/Checksum/benchmarks_json/test_2d_refined_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/RefinedInjection.json rename to Regression/Checksum/benchmarks_json/test_2d_refined_injection.json diff --git a/Regression/Checksum/benchmarks_json/RepellingParticles.json b/Regression/Checksum/benchmarks_json/test_2d_repelling_particles.json similarity index 100% rename from Regression/Checksum/benchmarks_json/RepellingParticles.json rename to Regression/Checksum/benchmarks_json/test_2d_repelling_particles.json diff --git a/Regression/Checksum/benchmarks_json/RigidInjection_BTD.json b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/RigidInjection_BTD.json rename to Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json diff --git a/Regression/Checksum/benchmarks_json/RigidInjection_lab.json b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_lab.json similarity index 100% rename from Regression/Checksum/benchmarks_json/RigidInjection_lab.json rename to Regression/Checksum/benchmarks_json/test_2d_rigid_injection_lab.json diff --git a/Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json b/Regression/Checksum/benchmarks_json/test_2d_silver_mueller_x.json similarity index 100% rename from Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json rename to Regression/Checksum/benchmarks_json/test_2d_silver_mueller_x.json diff --git a/Regression/Checksum/benchmarks_json/silver_mueller_2d_z.json b/Regression/Checksum/benchmarks_json/test_2d_silver_mueller_z.json similarity index 100% rename from Regression/Checksum/benchmarks_json/silver_mueller_2d_z.json rename to Regression/Checksum/benchmarks_json/test_2d_silver_mueller_z.json diff --git a/Regression/Checksum/benchmarks_json/space_charge_initialization_2d.json b/Regression/Checksum/benchmarks_json/test_2d_space_charge_initialization.json similarity index 100% rename from Regression/Checksum/benchmarks_json/space_charge_initialization_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_space_charge_initialization.json diff --git a/Regression/Checksum/benchmarks_json/subcyclingMR.json b/Regression/Checksum/benchmarks_json/test_2d_subcycling_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/subcyclingMR.json rename to Regression/Checksum/benchmarks_json/test_2d_subcycling_mr.json diff --git a/Regression/Checksum/benchmarks_json/ThetaImplicitJFNK_VandB_2d.json b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ThetaImplicitJFNK_VandB_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb.json diff --git a/Regression/Checksum/benchmarks_json/ThetaImplicitJFNK_VandB_2d_PICMI.json b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ThetaImplicitJFNK_VandB_2d_PICMI.json rename to Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Uniform_2d.json b/Regression/Checksum/benchmarks_json/test_2d_uniform_plasma.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Uniform_2d.json rename to Regression/Checksum/benchmarks_json/test_2d_uniform_plasma.json diff --git a/Regression/Checksum/benchmarks_json/VayDeposition2D.json b/Regression/Checksum/benchmarks_json/test_2d_vay_deposition.json similarity index 100% rename from Regression/Checksum/benchmarks_json/VayDeposition2D.json rename to Regression/Checksum/benchmarks_json/test_2d_vay_deposition.json diff --git a/Regression/Checksum/benchmarks_json/restart.json b/Regression/Checksum/benchmarks_json/test_3d_acceleration.json similarity index 100% rename from Regression/Checksum/benchmarks_json/restart.json rename to Regression/Checksum/benchmarks_json/test_3d_acceleration.json diff --git a/Regression/Checksum/benchmarks_json/restart_psatd.json b/Regression/Checksum/benchmarks_json/test_3d_acceleration_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/restart_psatd.json rename to Regression/Checksum/benchmarks_json/test_3d_acceleration_psatd.json diff --git a/Regression/Checksum/benchmarks_json/restart_psatd_time_avg.json b/Regression/Checksum/benchmarks_json/test_3d_acceleration_psatd_time_avg.json similarity index 100% rename from Regression/Checksum/benchmarks_json/restart_psatd_time_avg.json rename to Regression/Checksum/benchmarks_json/test_3d_acceleration_psatd_time_avg.json diff --git a/Regression/Checksum/benchmarks_json/averaged_galilean_3d_psatd.json b/Regression/Checksum/benchmarks_json/test_3d_averaged_galilean_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/averaged_galilean_3d_psatd.json rename to Regression/Checksum/benchmarks_json/test_3d_averaged_galilean_psatd.json diff --git a/Regression/Checksum/benchmarks_json/averaged_galilean_3d_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_3d_averaged_galilean_psatd_hybrid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/averaged_galilean_3d_psatd_hybrid.json rename to Regression/Checksum/benchmarks_json/test_3d_averaged_galilean_psatd_hybrid.json diff --git a/Regression/Checksum/benchmarks_json/BeamBeamCollision.json b/Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/BeamBeamCollision.json rename to Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json diff --git a/Regression/Checksum/benchmarks_json/collider_diagnostics.json b/Regression/Checksum/benchmarks_json/test_3d_collider_diagnostics.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collider_diagnostics.json rename to Regression/Checksum/benchmarks_json/test_3d_collider_diagnostics.json diff --git a/Regression/Checksum/benchmarks_json/collisionISO.json b/Regression/Checksum/benchmarks_json/test_3d_collision_iso.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collisionISO.json rename to Regression/Checksum/benchmarks_json/test_3d_collision_iso.json diff --git a/Regression/Checksum/benchmarks_json/collisionXYZ.json b/Regression/Checksum/benchmarks_json/test_3d_collision_xyz.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collisionXYZ.json rename to Regression/Checksum/benchmarks_json/test_3d_collision_xyz.json diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json b/Regression/Checksum/benchmarks_json/test_3d_deuterium_deuterium_fusion.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json rename to Regression/Checksum/benchmarks_json/test_3d_deuterium_deuterium_fusion.json diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D_intraspecies.json b/Regression/Checksum/benchmarks_json/test_3d_deuterium_deuterium_fusion_intraspecies.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D_intraspecies.json rename to Regression/Checksum/benchmarks_json/test_3d_deuterium_deuterium_fusion_intraspecies.json diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json b/Regression/Checksum/benchmarks_json/test_3d_deuterium_tritium_fusion.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json rename to Regression/Checksum/benchmarks_json/test_3d_deuterium_tritium_fusion.json diff --git a/Regression/Checksum/benchmarks_json/diff_lumi_diag.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json similarity index 100% rename from Regression/Checksum/benchmarks_json/diff_lumi_diag.json rename to Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json diff --git a/Regression/Checksum/benchmarks_json/divb_cleaning_3d.json b/Regression/Checksum/benchmarks_json/test_3d_divb_cleaning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/divb_cleaning_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_divb_cleaning.json diff --git a/Regression/Checksum/benchmarks_json/dive_cleaning_3d.json b/Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/dive_cleaning_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json diff --git a/Regression/Checksum/benchmarks_json/Python_restart_eb.json b/Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_restart_eb.json rename to Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphere.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphere.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb_mixed_bc.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb_mixed_bc.json diff --git a/Regression/Checksum/benchmarks_json/Python_ElectrostaticSphereEB.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ElectrostaticSphereEB.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_eb_picmi.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereLabFrame.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_lab_frame.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereLabFrame.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_lab_frame.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereLabFrame_MR_emass_10.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_lab_frame_mr_emass_10.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereLabFrame_MR_emass_10.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_lab_frame_mr_emass_10.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereRelNodal.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_rel_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereRelNodal.json rename to Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_rel_nodal.json diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_cube.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_boundary_cube.json rename to Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_cube_macroscopic.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_boundary_cube_macroscopic.json rename to Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_rotated_cube.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json similarity index 100% rename from Regression/Checksum/benchmarks_json/embedded_boundary_rotated_cube.json rename to Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json diff --git a/Regression/Checksum/benchmarks_json/FluxInjection3D.json b/Regression/Checksum/benchmarks_json/test_3d_flux_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/FluxInjection3D.json rename to Regression/Checksum/benchmarks_json/test_3d_flux_injection.json diff --git a/Regression/Checksum/benchmarks_json/focusing_gaussian_beam.json b/Regression/Checksum/benchmarks_json/test_3d_focusing_gaussian_beam.json similarity index 100% rename from Regression/Checksum/benchmarks_json/focusing_gaussian_beam.json rename to Regression/Checksum/benchmarks_json/test_3d_focusing_gaussian_beam.json diff --git a/Regression/Checksum/benchmarks_json/galilean_3d_psatd.json b/Regression/Checksum/benchmarks_json/test_3d_galilean_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_3d_psatd.json rename to Regression/Checksum/benchmarks_json/test_3d_galilean_psatd.json diff --git a/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_3d_galilean_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_3d_galilean_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/test_3d_galilean_psatd_current_correction_psb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json rename to Regression/Checksum/benchmarks_json/test_3d_galilean_psatd_current_correction_psb.json diff --git a/Regression/Checksum/benchmarks_json/Python_gaussian_beam.json b/Regression/Checksum/benchmarks_json/test_3d_gaussian_beam_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_gaussian_beam.json rename to Regression/Checksum/benchmarks_json/test_3d_gaussian_beam_picmi.json diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles.json similarity index 100% rename from Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json rename to Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles.json diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json rename to Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_moving.json similarity index 100% rename from Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json rename to Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_moving.json diff --git a/Regression/Checksum/benchmarks_json/initial_distribution.json b/Regression/Checksum/benchmarks_json/test_3d_initial_distribution.json similarity index 100% rename from Regression/Checksum/benchmarks_json/initial_distribution.json rename to Regression/Checksum/benchmarks_json/test_3d_initial_distribution.json diff --git a/Regression/Checksum/benchmarks_json/ion_stopping.json b/Regression/Checksum/benchmarks_json/test_3d_ion_stopping.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ion_stopping.json rename to Regression/Checksum/benchmarks_json/test_3d_ion_stopping.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_fluid_multi.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_fluid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_fluid_multi.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_fluid.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_nodal.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_nodal.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Python_Langmuir.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_Langmuir.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_current_correction_nodal.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_current_correction_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_current_correction_nodal.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_current_correction_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_div_cleaning.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_div_cleaning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_div_cleaning.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_div_cleaning.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_momentum_conserving.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_momentum_conserving.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_momentum_conserving.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_momentum_conserving.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_multiJ.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_multiJ.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_multiJ_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_multiJ_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_nodal.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_nodal.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_single_precision.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_single_precision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_single_precision.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_single_precision.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_Vay_deposition.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_vay_deposition.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_Vay_deposition.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_vay_deposition.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_Vay_deposition_nodal.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_vay_deposition_nodal.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_Vay_deposition_nodal.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_psatd_vay_deposition_nodal.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_single_precision.json b/Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_single_precision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_single_precision.json rename to Regression/Checksum/benchmarks_json/test_3d_langmuir_multi_single_precision.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration.json b/Regression/Checksum/benchmarks_json/test_3d_laser_acceleration.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_acceleration.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_BTD.json b/Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_btd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration_BTD.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_btd.json diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration.json b/Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LaserAcceleration.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_picmi.json diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json b/Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_single_precision_comms.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_acceleration_single_precision_comms.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjection.json b/Regression/Checksum/benchmarks_json/test_3d_laser_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjection.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_injection.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile.json b/Regression/Checksum/benchmarks_json/test_3d_laser_injection_from_lasy_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile.json rename to Regression/Checksum/benchmarks_json/test_3d_laser_injection_from_lasy_file.json diff --git a/Regression/Checksum/benchmarks_json/Python_LoadExternalGridField3D.json b/Regression/Checksum/benchmarks_json/test_3d_load_external_field_grid_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LoadExternalGridField3D.json rename to Regression/Checksum/benchmarks_json/test_3d_load_external_field_grid_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_LoadExternalParticleField3D.json b/Regression/Checksum/benchmarks_json/test_3d_load_external_field_particle_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LoadExternalParticleField3D.json rename to Regression/Checksum/benchmarks_json/test_3d_load_external_field_particle_picmi.json diff --git a/Regression/Checksum/benchmarks_json/magnetostatic_eb_3d.json b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/magnetostatic_eb_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json diff --git a/Regression/Checksum/benchmarks_json/Python_magnetostatic_eb_3d.json b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_magnetostatic_eb_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json diff --git a/Regression/Checksum/benchmarks_json/NodalElectrostaticSolver.json b/Regression/Checksum/benchmarks_json/test_3d_nodal_electrostatic_solver.json similarity index 100% rename from Regression/Checksum/benchmarks_json/NodalElectrostaticSolver.json rename to Regression/Checksum/benchmarks_json/test_3d_nodal_electrostatic_solver.json diff --git a/Regression/Checksum/benchmarks_json/openbc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json similarity index 100% rename from Regression/Checksum/benchmarks_json/openbc_poisson_solver.json rename to Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json diff --git a/Regression/Checksum/benchmarks_json/particle_boundaries_3d.json b/Regression/Checksum/benchmarks_json/test_3d_particle_boundaries.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_boundaries_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_particle_boundaries.json diff --git a/Regression/Checksum/benchmarks_json/particle_fields_diags.json b/Regression/Checksum/benchmarks_json/test_3d_particle_fields_diags.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_fields_diags.json rename to Regression/Checksum/benchmarks_json/test_3d_particle_fields_diags.json diff --git a/Regression/Checksum/benchmarks_json/particle_fields_diags_single_precision.json b/Regression/Checksum/benchmarks_json/test_3d_particle_fields_diags_single_precision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_fields_diags_single_precision.json rename to Regression/Checksum/benchmarks_json/test_3d_particle_fields_diags_single_precision.json diff --git a/Regression/Checksum/benchmarks_json/particle_pusher.json b/Regression/Checksum/benchmarks_json/test_3d_particle_pusher.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_pusher.json rename to Regression/Checksum/benchmarks_json/test_3d_particle_pusher.json diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml.json b/Regression/Checksum/benchmarks_json/test_3d_particles_in_pml.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particles_in_pml.json rename to Regression/Checksum/benchmarks_json/test_3d_particles_in_pml.json diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json b/Regression/Checksum/benchmarks_json/test_3d_particles_in_pml_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json rename to Regression/Checksum/benchmarks_json/test_3d_particles_in_pml_mr.json diff --git a/Regression/Checksum/benchmarks_json/PEC_field.json b/Regression/Checksum/benchmarks_json/test_3d_pec_field.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PEC_field.json rename to Regression/Checksum/benchmarks_json/test_3d_pec_field.json diff --git a/Regression/Checksum/benchmarks_json/PEC_field_mr.json b/Regression/Checksum/benchmarks_json/test_3d_pec_field_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PEC_field_mr.json rename to Regression/Checksum/benchmarks_json/test_3d_pec_field_mr.json diff --git a/Regression/Checksum/benchmarks_json/PEC_particle.json b/Regression/Checksum/benchmarks_json/test_3d_pec_particle.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PEC_particle.json rename to Regression/Checksum/benchmarks_json/test_3d_pec_particle.json diff --git a/Regression/Checksum/benchmarks_json/photon_pusher.json b/Regression/Checksum/benchmarks_json/test_3d_photon_pusher.json similarity index 100% rename from Regression/Checksum/benchmarks_json/photon_pusher.json rename to Regression/Checksum/benchmarks_json/test_3d_photon_pusher.json diff --git a/Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost3d.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost3d.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_boosted.json diff --git a/Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost3d_hybrid.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_boosted_hybrid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/PlasmaAccelerationBoost3d_hybrid.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_boosted_hybrid.json diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_mr_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_PlasmaAccelerationMR.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_mr_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_acceleration_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Plasma_lens.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Plasma_lens.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_lens.json diff --git a/Regression/Checksum/benchmarks_json/Plasma_lens_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Plasma_lens_boosted.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json diff --git a/Regression/Checksum/benchmarks_json/hard_edged_plasma_lens.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_hard_edged.json similarity index 100% rename from Regression/Checksum/benchmarks_json/hard_edged_plasma_lens.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_lens_hard_edged.json diff --git a/Regression/Checksum/benchmarks_json/Plasma_lens_short.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_short.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Plasma_lens_short.json rename to Regression/Checksum/benchmarks_json/test_3d_plasma_lens_short.json diff --git a/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json b/Regression/Checksum/benchmarks_json/test_3d_pml_psatd_dive_divb_cleaning.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json rename to Regression/Checksum/benchmarks_json/test_3d_pml_psatd_dive_divb_cleaning.json diff --git a/Regression/Checksum/benchmarks_json/Point_of_contact_EB_3d.json b/Regression/Checksum/benchmarks_json/test_3d_point_of_contact_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Point_of_contact_EB_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_point_of_contact_eb.json diff --git a/Regression/Checksum/benchmarks_json/Python_projection_divb_cleaner_callback_3d.json b/Regression/Checksum/benchmarks_json/test_3d_projection_divb_cleaner_callback_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_projection_divb_cleaner_callback_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_projection_divb_cleaner_callback_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Python_projection_divb_cleaner_3d.json b/Regression/Checksum/benchmarks_json/test_3d_projection_divb_cleaner_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_projection_divb_cleaner_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_projection_divb_cleaner_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Proton_Boron_Fusion_3D.json b/Regression/Checksum/benchmarks_json/test_3d_proton_boron_fusion.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Proton_Boron_Fusion_3D.json rename to Regression/Checksum/benchmarks_json/test_3d_proton_boron_fusion.json diff --git a/Regression/Checksum/benchmarks_json/qed_breit_wheeler_3d.json b/Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_breit_wheeler_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler.json diff --git a/Regression/Checksum/benchmarks_json/qed_quantum_sync_3d.json b/Regression/Checksum/benchmarks_json/test_3d_qed_quantum_sync.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_quantum_sync_3d.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_quantum_sync.json diff --git a/Regression/Checksum/benchmarks_json/qed_schwinger1.json b/Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_1.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_schwinger1.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_1.json diff --git a/Regression/Checksum/benchmarks_json/qed_schwinger2.json b/Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_2.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_schwinger2.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_2.json diff --git a/Regression/Checksum/benchmarks_json/qed_schwinger3.json b/Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_3.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_schwinger3.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_3.json diff --git a/Regression/Checksum/benchmarks_json/qed_schwinger4.json b/Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_4.json similarity index 100% rename from Regression/Checksum/benchmarks_json/qed_schwinger4.json rename to Regression/Checksum/benchmarks_json/test_3d_qed_schwinger_4.json diff --git a/Regression/Checksum/benchmarks_json/radiation_reaction.json b/Regression/Checksum/benchmarks_json/test_3d_radiation_reaction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/radiation_reaction.json rename to Regression/Checksum/benchmarks_json/test_3d_radiation_reaction.json diff --git a/Regression/Checksum/benchmarks_json/reduced_diags.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags.json similarity index 100% rename from Regression/Checksum/benchmarks_json/reduced_diags.json rename to Regression/Checksum/benchmarks_json/test_3d_reduced_diags.json diff --git a/Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_heuristic.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_heuristic.json similarity index 100% rename from Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_heuristic.json rename to Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_heuristic.json diff --git a/Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_timers.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers.json similarity index 100% rename from Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_timers.json rename to Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers.json diff --git a/Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_timers_psatd.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/reduced_diags_loadbalancecosts_timers_psatd.json rename to Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_psatd.json diff --git a/Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_single_precision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json rename to Regression/Checksum/benchmarks_json/test_3d_reduced_diags_single_precision.json diff --git a/Regression/Checksum/benchmarks_json/relativistic_space_charge_initialization.json b/Regression/Checksum/benchmarks_json/test_3d_relativistic_space_charge_initialization.json similarity index 100% rename from Regression/Checksum/benchmarks_json/relativistic_space_charge_initialization.json rename to Regression/Checksum/benchmarks_json/test_3d_relativistic_space_charge_initialization.json diff --git a/Regression/Checksum/benchmarks_json/space_charge_initialization.json b/Regression/Checksum/benchmarks_json/test_3d_space_charge_initialization.json similarity index 100% rename from Regression/Checksum/benchmarks_json/space_charge_initialization.json rename to Regression/Checksum/benchmarks_json/test_3d_space_charge_initialization.json diff --git a/Regression/Checksum/benchmarks_json/uniform_plasma_restart.json b/Regression/Checksum/benchmarks_json/test_3d_uniform_plasma.json similarity index 100% rename from Regression/Checksum/benchmarks_json/uniform_plasma_restart.json rename to Regression/Checksum/benchmarks_json/test_3d_uniform_plasma.json diff --git a/Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json b/Regression/Checksum/benchmarks_json/test_3d_uniform_plasma_multiJ.json similarity index 100% rename from Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json rename to Regression/Checksum/benchmarks_json/test_3d_uniform_plasma_multiJ.json diff --git a/Regression/Checksum/benchmarks_json/VayDeposition3D.json b/Regression/Checksum/benchmarks_json/test_3d_vay_deposition.json similarity index 100% rename from Regression/Checksum/benchmarks_json/VayDeposition3D.json rename to Regression/Checksum/benchmarks_json/test_3d_vay_deposition.json diff --git a/Regression/Checksum/benchmarks_json/BTD_rz.json b/Regression/Checksum/benchmarks_json/test_rz_btd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/BTD_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_btd.json diff --git a/Regression/Checksum/benchmarks_json/collisionRZ.json b/Regression/Checksum/benchmarks_json/test_rz_collision.json similarity index 100% rename from Regression/Checksum/benchmarks_json/collisionRZ.json rename to Regression/Checksum/benchmarks_json/test_rz_collision.json diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json b/Regression/Checksum/benchmarks_json/test_rz_deuterium_tritium_fusion.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json rename to Regression/Checksum/benchmarks_json/test_rz_deuterium_tritium_fusion.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereRZ.json b/Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereRZ.json rename to Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json b/Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json rename to Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere_eb.json diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json b/Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere_eb_mr.json similarity index 100% rename from Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json rename to Regression/Checksum/benchmarks_json/test_rz_electrostatic_sphere_eb_mr.json diff --git a/Regression/Checksum/benchmarks_json/EmbeddedBoundaryDiffraction.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/EmbeddedBoundaryDiffraction.json rename to Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json diff --git a/Regression/Checksum/benchmarks_json/FluxInjection.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json similarity index 100% rename from Regression/Checksum/benchmarks_json/FluxInjection.json rename to Regression/Checksum/benchmarks_json/test_rz_flux_injection.json diff --git a/Regression/Checksum/benchmarks_json/galilean_rz_psatd.json b/Regression/Checksum/benchmarks_json/test_rz_galilean_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_rz_psatd.json rename to Regression/Checksum/benchmarks_json/test_rz_galilean_psatd.json diff --git a/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_rz_galilean_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_rz_galilean_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/test_rz_galilean_psatd_current_correction_psb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json rename to Regression/Checksum/benchmarks_json/test_rz_galilean_psatd_current_correction_psb.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_fluid_RZ.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_fluid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_fluid_RZ.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_fluid.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_rz.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_multi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_multi.json diff --git a/Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_Langmuir_rz_multimode.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_picmi.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd_current_correction.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd_current_correction.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd_current_correction.json diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd_multiJ.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Langmuir_multi_rz_psatd_multiJ.json rename to Regression/Checksum/benchmarks_json/test_rz_langmuir_multi_psatd_multiJ.json diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json rename to Regression/Checksum/benchmarks_json/test_rz_laser_acceleration.json diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAccelerationRZ.json b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_LaserAccelerationRZ.json rename to Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_picmi.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromRZLASYFile.json b/Regression/Checksum/benchmarks_json/test_rz_laser_injection_from_RZ_lasy_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromRZLASYFile.json rename to Regression/Checksum/benchmarks_json/test_rz_laser_injection_from_RZ_lasy_file.json diff --git a/Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_RZ.json b/Regression/Checksum/benchmarks_json/test_rz_laser_injection_from_lasy_file.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LaserInjectionFromLASYFile_RZ.json rename to Regression/Checksum/benchmarks_json/test_rz_laser_injection_from_lasy_file.json diff --git a/Regression/Checksum/benchmarks_json/LoadExternalFieldRZGrid.json b/Regression/Checksum/benchmarks_json/test_rz_load_external_field_grid.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LoadExternalFieldRZGrid.json rename to Regression/Checksum/benchmarks_json/test_rz_load_external_field_grid.json diff --git a/Regression/Checksum/benchmarks_json/LoadExternalFieldRZParticles.json b/Regression/Checksum/benchmarks_json/test_rz_load_external_field_particles.json similarity index 100% rename from Regression/Checksum/benchmarks_json/LoadExternalFieldRZParticles.json rename to Regression/Checksum/benchmarks_json/test_rz_load_external_field_particles.json diff --git a/Regression/Checksum/benchmarks_json/Python_magnetostatic_eb_rz.json b/Regression/Checksum/benchmarks_json/test_rz_magnetostatic_eb_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_magnetostatic_eb_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_magnetostatic_eb_picmi.json diff --git a/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json b/Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json rename to Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Python_ohms_law_solver_EM_modes_rz.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Python_ohms_law_solver_EM_modes_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json diff --git a/Regression/Checksum/benchmarks_json/particle_boundary_interaction.json b/Regression/Checksum/benchmarks_json/test_rz_particle_boundary_interaction_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/particle_boundary_interaction.json rename to Regression/Checksum/benchmarks_json/test_rz_particle_boundary_interaction_picmi.json diff --git a/Regression/Checksum/benchmarks_json/pml_psatd_rz.json b/Regression/Checksum/benchmarks_json/test_rz_pml_psatd.json similarity index 100% rename from Regression/Checksum/benchmarks_json/pml_psatd_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_pml_psatd.json diff --git a/Regression/Checksum/benchmarks_json/Point_of_contact_EB_rz.json b/Regression/Checksum/benchmarks_json/test_rz_point_of_contact_eb.json similarity index 100% rename from Regression/Checksum/benchmarks_json/Point_of_contact_EB_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_point_of_contact_eb.json diff --git a/Regression/Checksum/benchmarks_json/projection_divb_cleaner_rz.json b/Regression/Checksum/benchmarks_json/test_rz_projection_divb_cleaner.json similarity index 100% rename from Regression/Checksum/benchmarks_json/projection_divb_cleaner_rz.json rename to Regression/Checksum/benchmarks_json/test_rz_projection_divb_cleaner.json diff --git a/Regression/Checksum/benchmarks_json/scraping.json b/Regression/Checksum/benchmarks_json/test_rz_scraping.json similarity index 100% rename from Regression/Checksum/benchmarks_json/scraping.json rename to Regression/Checksum/benchmarks_json/test_rz_scraping.json diff --git a/Regression/Checksum/benchmarks_json/silver_mueller_rz_z.json b/Regression/Checksum/benchmarks_json/test_rz_silver_mueller_z.json similarity index 100% rename from Regression/Checksum/benchmarks_json/silver_mueller_rz_z.json rename to Regression/Checksum/benchmarks_json/test_rz_silver_mueller_z.json diff --git a/Regression/Checksum/benchmarks_json/spacecraft_charging.json b/Regression/Checksum/benchmarks_json/test_rz_spacecraft_charging_picmi.json similarity index 100% rename from Regression/Checksum/benchmarks_json/spacecraft_charging.json rename to Regression/Checksum/benchmarks_json/test_rz_spacecraft_charging_picmi.json diff --git a/Regression/PostProcessingUtils/__init__.py b/Regression/PostProcessingUtils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Regression/requirements.txt b/Regression/requirements.txt index 5bdd04ba106..509123899ba 100644 --- a/Regression/requirements.txt +++ b/Regression/requirements.txt @@ -1,10 +1,12 @@ +-r ../requirements.txt dill -lasy +lasy>=0.5.0 matplotlib mpi4py numpy openpmd-api openpmd-viewer pandas +periodictable scipy yt From c12c2f5880f37ba5741b477da6be21a3927b8e1a Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 6 Sep 2024 01:57:18 -0700 Subject: [PATCH 18/91] CTest follow-up: remove obsolete files/scripts (#5220) --- .github/workflows/source.yml | 2 - .github/workflows/source/ci_matrix.py | 30 - .github/workflows/source/test_ci_matrix.sh | 41 - Regression/WarpX-GPU-tests.ini | 678 ---- Regression/WarpX-tests.ini | 3929 -------------------- Regression/prepare_file_ci.py | 177 - run_test.sh | 115 - 7 files changed, 4972 deletions(-) delete mode 100644 .github/workflows/source/ci_matrix.py delete mode 100755 .github/workflows/source/test_ci_matrix.sh delete mode 100644 Regression/WarpX-GPU-tests.ini delete mode 100644 Regression/WarpX-tests.ini delete mode 100644 Regression/prepare_file_ci.py delete mode 100755 run_test.sh diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 143be1971fb..7a2086cfdff 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -27,8 +27,6 @@ jobs: run: .github/workflows/source/hasEOLwhiteSpace - name: Check test input files run: .github/workflows/source/check_inputs.py - - name: Check that the test matrix for CI includes all tests - run: .github/workflows/source/test_ci_matrix.sh - name: Doxygen run: | sudo apt-get install -y --no-install-recommends doxygen diff --git a/.github/workflows/source/ci_matrix.py b/.github/workflows/source/ci_matrix.py deleted file mode 100644 index ff9ea295013..00000000000 --- a/.github/workflows/source/ci_matrix.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python - -# Concatenation of tests in each of the 6 elements in CI matrix -f = open("./ci_matrix_elements.txt") -matrix_elements = f.readlines() -f.close() -# All tests read by prepare_file_ci.py -f = open("./ci_all_tests.txt") -all_tests = f.readlines() -f.close() - -# Now let's make sure these two are equal - -# Remove these elements from both lists, as they are are not test names -elements_to_remove = ["[main]\n", "[AMReX]\n", "[source]\n", "[extra-PICSAR]\n"] -for element in elements_to_remove: - for x in range(matrix_elements.count(element)): - matrix_elements.remove(element) - for x in range(all_tests.count(element)): - all_tests.remove(element) - -# Sort lists, and make sure they are equal -matrix_elements.sort() -all_tests.sort() -print("Tests in matrix, but not in initial list (typically if a test is done twice):") -print(list(set(matrix_elements) - set(all_tests))) -print("Tests in initial list but not in the matrix:") -print(list(set(all_tests) - set(matrix_elements))) - -assert matrix_elements == all_tests diff --git a/.github/workflows/source/test_ci_matrix.sh b/.github/workflows/source/test_ci_matrix.sh deleted file mode 100755 index 62903b66e45..00000000000 --- a/.github/workflows/source/test_ci_matrix.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -eu -o pipefail - -cp .github/workflows/source/ci_matrix.py Regression/ -cd Regression/ - -# Put the name of all CI tests into a text file -python prepare_file_ci.py -grep "^\[" ci-tests.ini > ci_all_tests.txt - - -export WARPX_CI_PSATD=TRUE - -# Concatenate the names of all elements in CI matrix into another test file -WARPX_CI_REGULAR_CARTESIAN_1D=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini > ci_matrix_elements.txt -WARPX_CI_REGULAR_CARTESIAN_2D=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt -WARPX_CI_REGULAR_CARTESIAN_3D=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt -WARPX_CI_SINGLE_PRECISION=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt -WARPX_CI_RZ_OR_NOMPI=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt -WARPX_CI_QED=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt -WARPX_CI_EB=TRUE python prepare_file_ci.py -grep "^\[" ci-tests.ini >> ci_matrix_elements.txt - -# Check that the resulting lists are equal -{ - python ci_matrix.py && - rm ci_all_tests.txt ci_matrix_elements.txt ci_matrix.py && - echo "test passed" && - exit 0 -} || { - rm ci_all_tests.txt ci_matrix_elements.txt ci_matrix.py && - echo "tests failed" && - exit 1 -} diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini deleted file mode 100644 index fade8193140..00000000000 --- a/Regression/WarpX-GPU-tests.ini +++ /dev/null @@ -1,678 +0,0 @@ -# This file is used both for the nightly regression tests -# on the garunda server, and for CI tests. -# In the case of CI, some of the parameters entered -# below are overwritten, see prepare_file_ci.py -[main] -# repeat captured errors to stderr, e.g., for CI runs -verbose = 1 - -testTopDir = /home/regtester/RegTesting/rt-WarpX/ -webTopDir = /home/regtester/RegTesting/rt-WarpX/web - -sourceTree = C_Src - -# suiteName is the name prepended to all output directories -suiteName = WarpX-GPU - -COMP = g++ -add_to_c_make_command = TEST=TRUE USE_ASSERTION=TRUE WarpxBinDir= - -archive_output = 0 -purge_output = 1 - -MAKE = make -numMakeJobs = 8 - -# We build by default a few tools for output comparison. -# The build time for those can be skipped if they are not needed. -ftools = - -# Control the build of the particle_compare tool. -# Needed for test particle_tolerance option. -use_ctools = 0 - -# MPIcommand should use the placeholders: -# @host@ to indicate where to put the hostname to run on -# @nprocs@ to indicate where to put the number of processors -# @command@ to indicate where to put the command to run -# -# only tests with useMPI = 1 will run in parallel -# nprocs is problem dependent and specified in the individual problem -# sections. - -#MPIcommand = mpiexec -host @host@ -n @nprocs@ @command@ -MPIcommand = mpiexec -n @nprocs@ @command@ -MPIhost = - -reportActiveTestsOnly = 1 - -# Add "GO UP" link at the top of the web page? -goUpLink = 1 - -# string queried to change plotfiles and checkpoint files -plot_file_name = diag1.file_prefix -check_file_name = none - -# email -sendEmailWhenFail = 1 -emailTo = weiqunzhang@lbl.gov, jlvay@lbl.gov, rlehe@lbl.gov, atmyers@lbl.gov, mthevenet@lbl.gov, oshapoval@lbl.gov, ldianaamorim@lbl.gov, rjambunathan@lbl.gov, axelhuebl@lbl.gov, ezoni@lbl.gov -emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more details. - -[AMReX] -dir = /home/regtester/git/amrex/ -branch = 216ce6f37de4b65be57fc1006b3457b4fc318e03 - -[source] -dir = /home/regtester/git/WarpX -branch = development - -[extra-PICSAR] -dir = /home/regtester/git/picsar/ -branch = 7b5449f92a4b30a095cc4a67f0a8b1fc69680e15 - -# individual problems follow - -[pml_x_yee] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=yee -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py - -[pml_x_ckc] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=ckc -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -analysisRoutine = Examples/Tests/pml/analysis_pml_ckc.py - -#[pml_x_psatd] -#buildDir = . -#inputFile = Examples/Tests/pml/inputs_2d -#runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 -#dim = 2 -#addToCompileString = USE_FFT=TRUE USE_GPU=TRUE -#restartTest = 0 -#useMPI = 1 -#numprocs = 2 -#useOMP = 0 -#numthreads = 1 -#compileTest = 0 -#doVis = 0 -#analysisRoutine = Examples/Tests/pml/analysis_pml_psatd.py -# -[RigidInjection_lab] -buildDir = . -inputFile = Examples/Tests/RigidInjection/inputs_2d_LabFrame -runtime_params = -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -analysisRoutine = Examples/Tests/RigidInjection/analysis_rigid_injection_LabFrame.py - -[RigidInjection_boost_backtransformed] -buildDir = . -inputFile = Examples/Tests/RigidInjection/inputs_2d_BoostedFrame -runtime_params = -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -doComparison = 0 -aux1File = Tools/PostProcessing/read_raw_data.py -analysisRoutine = Examples/Tests/RigidInjection/analysis_rigid_injection_BoostedFrame.py - -[nci_corrector] -buildDir = . -inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d -runtime_params = amr.max_level=0 particles.use_fdtd_nci_corr=1 -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -doComparison = 0 -analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py - -# [nci_correctorMR] -# buildDir = . -# inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d -# runtime_params = amr.max_level=1 particles.use_fdtd_nci_corr=1 -# dim = 2 -# addToCompileString = USE_GPU=TRUE -# restartTest = 0 -# useMPI = 1 -# numprocs = 2 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# doComparison = 0 -# analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py -# -# [ionization_lab] -# buildDir = . -# inputFile = Examples/Tests/ionization/inputs_2d_rt -# runtime_params = -# dim = 2 -# addToCompileString = USE_GPU=TRUE -# restartTest = 0 -# useMPI = 1 -# numprocs = 2 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# analysisRoutine = Examples/Tests/ionization/analysis_ionization.py -# -# [ionization_boost] -# buildDir = . -# inputFile = Examples/Tests/ionization/inputs_2d_bf_rt -# runtime_params = -# dim = 2 -# addToCompileString = USE_GPU=TRUE -# restartTest = 0 -# useMPI = 1 -# numprocs = 2 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# analysisRoutine = Examples/Tests/ionization/analysis_ionization.py -# -[bilinear_filter] -buildDir = . -inputFile = Examples/Tests/single_particle/inputs_2d -runtime_params = warpx.use_filter=1 warpx.filter_npass_each_dir=1 5 -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -analysisRoutine = Examples/Tests/single_particle/analysis_bilinear_filter.py - -[Langmuir_2d] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py -analysisOutputImage = langmuir2d_analysis.png - -[Langmuir_2d_single_precision] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux -dim = 2 -addToCompileString = USE_GPU=TRUE PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py -analysisOutputImage = langmuir2d_analysis.png - -[Langmuir_2d_nompi] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 0 -numprocs = 1 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py -analysisOutputImage = langmuir2d_analysis.png - -[Langmuir_x] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py -analysisOutputImage = langmuir_x_analysis.png - -[Langmuir_y] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -runtime_params = electrons.uy=0.01 electrons.ymax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ey jy diag1.electrons.variables=w uy -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py -analysisOutputImage = langmuir_y_analysis.png - -[Langmuir_z] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_rt -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -runtime_params = electrons.uz=0.01 electrons.zmax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ez jz diag1.electrons.variables=w uz -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py -analysisOutputImage = langmuir_z_analysis.png - -[Langmuir_multi] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -runtime_params = warpx.do_dynamic_scheduling=0 -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py -analysisOutputImage = langmuir_multi_analysis.png - -[Langmuir_multi_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -runtime_params = warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py -analysisOutputImage = langmuir_multi_analysis.png - -[Langmuir_multi_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py -analysisOutputImage = langmuir_multi_analysis.png - -[Langmuir_multi_psatd_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py -analysisOutputImage = langmuir_multi_analysis.png - -[Langmuir_multi_2d_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -runtime_params = warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py -analysisOutputImage = langmuir_multi_2d_analysis.png - -[Langmuir_multi_2d_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt -runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons positrons -analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py -analysisOutputImage = langmuir_multi_2d_analysis.png - -# [Langmuir_multi_2d_psatd_nodal] -# buildDir = . -# inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt -# runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell -# dim = 2 -# addToCompileString = USE_FFT=TRUE USE_GPU=TRUE -# restartTest = 0 -# useMPI = 1 -# numprocs = 4 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# compareParticles = 0 -# particleTypes = electrons positrons -# analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py -# analysisOutputImage = langmuir_multi_2d_analysis.png -# -# [Langmuir_multi_rz] -# buildDir = . -# inputFile = Examples/Tests/langmuir/inputs_2d_multi_rz_rt -# dim = 2 -# addToCompileString = USE_RZ=TRUE USE_GPU=TRUE -# restartTest = 0 -# useMPI = 1 -# numprocs = 4 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# runtime_params = diag1.electrons.variables=w ux uy uz diag1.ions.variables=w ux uy uz -# compareParticles = 0 -# particleTypes = electrons ions -# analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_rz.py -# analysisOutputImage = langmuir_multi_rz_analysis.png -# -# [Langmuir_rz_multimode] -# buildDir = . -# inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py -# customRunCmd = python PICMI_inputs_langmuir_rz_multimode_analyze.py -# runtime_params = -# dim = 2 -# addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE USE_GPU=TRUE PYINSTALLOPTIONS="--user --prefix=" -# restartTest = 0 -# useMPI = 1 -# numprocs = 4 -# useOMP = 0 -# numthreads = 1 -# compileTest = 0 -# doVis = 0 -# compareParticles = 0 -# particleTypes = electrons protons -# outputFile = diags/plotfiles/plt00040 -# -[LaserInjection] -buildDir = . -inputFile = Examples/Tests/laser_injection/inputs_3d_rt -dim = 3 -runtime_params = max_step=20 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 4 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -analysisRoutine = Examples/Tests/laser_injection/analysis_laser.py -analysisOutputImage = laser_analysis.png - -[LaserInjection_2d] -buildDir = . -inputFile = Examples/Tests/laser_injection/inputs_2d_rt -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 -compareParticles = 0 - -#xxxxx -#[LaserAcceleration] -#buildDir = . -#inputFile = Examples/Physics_applications/laser_acceleration/inputs_3d -#runtime_params = warpx.do_dynamic_scheduling=0 amr.n_cell=32 32 256 max_step=100 electrons.zmin=0.e-6 warpx.serialize_initial_conditions=1 -#dim = 3 -#addToCompileString = USE_GPU=TRUE -#restartTest = 0 -#useMPI = 1 -#numprocs = 2 -#useOMP = 0 -#numthreads = 1 -#compileTest = 0 -#doVis = 0 -#compareParticles = 0 -#particleTypes = electrons -# -[subcyclingMR] -buildDir = . -inputFile = Examples/Tests/subcycling/inputs_2d -runtime_params = warpx.serialize_initial_conditions=1 warpx.do_dynamic_scheduling=0 -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 - -[LaserAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_2d -runtime_params = amr.max_level=1 max_step=200 warpx.serialize_initial_conditions=1 warpx.fine_tag_lo=-5.e-6 -35.e-6 warpx.fine_tag_hi=5.e-6 -25.e-6 -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons beam - -[PlasmaAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_2d -runtime_params = amr.max_level=1 amr.n_cell=32 512 max_step=400 warpx.serialize_initial_conditions=1 warpx.do_dynamic_scheduling=0 -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = beam driver plasma_e - -[Python_Langmuir] -buildDir = . -inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py -customRunCmd = python PICMI_inputs_langmuir_rt.py -runtime_params = -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_GPU=TRUE PYINSTALLOPTIONS="--user --prefix=" -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons -outputFile = diags/diag200040 - -[uniform_plasma_restart] -buildDir = . -inputFile = Examples/Physics_applications/uniform_plasma/inputs_3d -runtime_params = chk.file_prefix=uniform_plasma_restart_chk -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 1 -restartFileNum = 6 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -particleTypes = electrons - -[particles_in_pml_2d] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_2d -runtime_params = -dim = 2 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[particles_in_pml] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_3d -runtime_params = -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[photon_pusher] -buildDir = . -inputFile = Examples/Tests/photon_pusher/inputs_3d -runtime_params = -dim = 3 -addToCompileString = USE_GPU=TRUE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 0 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 0 -analysisRoutine = Examples/Tests/photon_pusher/analysis_photon_pusher.py diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini deleted file mode 100644 index 5fb937a0b94..00000000000 --- a/Regression/WarpX-tests.ini +++ /dev/null @@ -1,3929 +0,0 @@ -# This file is used both for the nightly regression tests -# on the battra server, and for CI tests. -# In the case of CI, some of the parameters entered -# below are overwritten, see prepare_file_ci.py -[main] -# repeat captured errors to stderr, e.g., for CI runs -verbose = 1 - -testTopDir = /home/regtester/AMReX_RegTesting/rt-WarpX/ -webTopDir = /home/regtester/AMReX_RegTesting/rt-WarpX/web - -sourceTree = C_Src - -# suiteName is the name prepended to all output directories -suiteName = WarpX - -archive_output = 0 -purge_output = 1 - -useCmake = 1 -isSuperbuild = 1 -MAKE = make -numMakeJobs = 8 - -# We build by default a few tools for output comparison. -# The build time for those can be skipped if they are not needed. -ftools = - -# Control the build of the particle_compare tool. -# Needed for test particle_tolerance option. -use_ctools = 0 - -# MPIcommand should use the placeholders: -# @host@ to indicate where to put the hostname to run on -# @nprocs@ to indicate where to put the number of processors -# @command@ to indicate where to put the command to run -# -# only tests with useMPI = 1 will run in parallel -# nprocs is problem dependent and specified in the individual problem -# sections. - -#MPIcommand = mpiexec -host @host@ -n @nprocs@ @command@ -MPIcommand = mpiexec -n @nprocs@ @command@ -MPIhost = - -reportActiveTestsOnly = 1 - -# Add "GO UP" link at the top of the web page? -goUpLink = 1 - -# string queried to change plotfiles and checkpoint files -plot_file_name = diag1.file_prefix -check_file_name = none - -# email -sendEmailWhenFail = 1 -emailTo = weiqunzhang@lbl.gov, jlvay@lbl.gov, rlehe@lbl.gov, atmyers@lbl.gov, oshapoval@lbl.gov, henri.vincenti@cea.fr, rjambunathan@lbl.gov, yinjianzhao@lbl.gov -emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more details. - -[AMReX] -dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 216ce6f37de4b65be57fc1006b3457b4fc318e03 - -[source] -dir = /home/regtester/AMReX_RegTesting/warpx -branch = development -cmakeSetupOpts = -DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON -DWarpX_PYTHON_IPO=OFF -DpyAMReX_IPO=OFF -# -DPY_PIP_INSTALL_OPTIONS="--disable-pip-version-check" - -# individual problems follow - -[averaged_galilean_2d_psatd] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_2d -runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[averaged_galilean_2d_psatd_hybrid] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_2d -runtime_params = amr.max_grid_size_x=128 amr.max_grid_size_y=64 warpx.grid_type=hybrid psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[averaged_galilean_3d_psatd] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_3d -runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[averaged_galilean_3d_psatd_hybrid] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_3d -runtime_params = warpx.grid_type=hybrid psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[background_mcc] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[background_mcc_dp_psp] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PRECISION=DOUBLE -DWarpX_PARTICLE_PRECISION=SINGLE -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[bilinear_filter] -buildDir = . -inputFile = Examples/Tests/single_particle/inputs_2d -runtime_params = warpx.use_filter=1 warpx.filter_npass_each_dir=1 5 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/single_particle/analysis_bilinear_filter.py - -[BTD_rz] -buildDir = . -inputFile = Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py - -[collider_diagnostics] -buildDir = . -inputFile = Examples/Tests/collider_relevant_diags/inputs_3d_multiple_particles -runtime_params = warpx.abort_on_warning_threshold=high -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collider_relevant_diags/analysis_multiple_particles.py - -[collisionZ] -buildDir = . -inputFile = Examples/Tests/collision/inputs_1d -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_1d.py - -[collisionISO] -buildDir = . -inputFile = Examples/Tests/collision/inputs_3d_isotropization -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_3d_isotropization.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[collisionRZ] -buildDir = . -inputFile = Examples/Tests/collision/inputs_rz -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=FALSE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=OFF -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_rz.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[collisionXYZ] -buildDir = . -inputFile = Examples/Tests/collision/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_3d.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[collisionXZ] -buildDir = . -inputFile = Examples/Tests/collision/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_2d.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[comoving_2d_psatd_hybrid] -buildDir = . -inputFile = Examples/Tests/comoving/inputs_2d_hybrid -runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Deuterium_Deuterium_Fusion_3D] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py - -[Deuterium_Deuterium_Fusion_3D_intraspecies] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d_intraspecies -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py - -[Deuterium_Tritium_Fusion_3D] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py - -[Deuterium_Tritium_Fusion_RZ] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz -runtime_params = warpx.abort_on_warning_threshold=high -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py - -[dirichletbc] -buildDir = . -inputFile = Examples/Tests/electrostatic_dirichlet_bc/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_dirichlet_bc/analysis.py - -[divb_cleaning_3d] -buildDir = . -inputFile = Examples/Tests/divb_cleaning/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/divb_cleaning/analysis.py - -[dive_cleaning_2d] -buildDir = . -inputFile = Examples/Tests/dive_cleaning/inputs_3d -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = geometry.dims=2 -analysisRoutine = Examples/Tests/dive_cleaning/analysis.py - -[dive_cleaning_3d] -buildDir = . -inputFile = Examples/Tests/dive_cleaning/inputs_3d -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = -analysisRoutine = Examples/Tests/dive_cleaning/analysis.py - -[ElectrostaticSphere] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere/inputs_3d -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py - -[ElectrostaticSphereLabFrame_MR_emass_10] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere/inputs_3d -runtime_params = warpx.do_electrostatic=labframe diag2.electron.variables=x y z ux uy uz w warpx.abort_on_warning_threshold=medium electron.mass = 10 amr.max_level = 1 amr.ref_ratio_vect = 2 2 2 warpx.fine_tag_lo = -0.5 -0.5 -0.5 warpx.fine_tag_hi = 0.5 0.5 0.5 max_step = 2 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py - -[ElectrostaticSphereEB] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis.py - -[ElectrostaticSphereEB_mixedBCs] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[EmbeddedBoundaryDiffraction] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_diffraction/inputs_rz -runtime_params = -dim = 2 -addToCompileString = USE_EB=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = EmbeddedBoundaryDiffraction_plt -analysisRoutine = Examples/Tests/embedded_boundary_diffraction/analysis_fields.py - -[ElectrostaticSphereEB_RZ] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_rz -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = USE_EB=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis_rz.py - -[ElectrostaticSphereEB_RZ_MR] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr -runtime_params = warpx.abort_on_warning_threshold = medium amr.ref_ratio_vect = 2 2 2 -dim = 2 -addToCompileString = USE_EB=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = ElectrostaticSphereEB_RZ_MR_plt -analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py - -[ElectrostaticSphereLabFrame] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere/inputs_3d -runtime_params = warpx.do_electrostatic=labframe diag2.electron.variables=x y z ux uy uz w phi -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py - -[ElectrostaticSphereRZ] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere/inputs_rz -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py - -[ElectrostaticSphereRelNodal] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = medium warpx.grid_type = collocated -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py - -[embedded_boundary_cube] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_cube/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields.py - -[embedded_boundary_cube_2d] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_cube/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py - -[embedded_boundary_cube_macroscopic] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_cube/inputs_3d -runtime_params = algo.em_solver_medium=macroscopic macroscopic.epsilon=1.5*8.8541878128e-12 macroscopic.sigma=0 macroscopic.mu=1.25663706212e-06 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields.py - -[embedded_boundary_python_API] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py -runtime_params = -customRunCmd = python PICMI_inputs_EB_API.py -dim = 3 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_python_api/analysis.py - -[embedded_boundary_rotated_cube] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_rotated_cube/inputs_3d -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py - -[embedded_boundary_rotated_cube_2d] -buildDir = . -inputFile = Examples/Tests/embedded_boundary_rotated_cube/inputs_2d -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py - -[embedded_circle] -buildDir = . -inputFile = Examples/Tests/embedded_circle/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = USE_EB=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_EB=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/embedded_circle/analysis.py - -[FieldProbe] -buildDir = . -inputFile = Examples/Tests/field_probe/inputs_2d -runtime_params = -dim = 2 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/field_probe/analysis_field_probe.py - -[FluxInjection] -buildDir = . -inputFile = Examples/Tests/flux_injection/inputs_rz -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/flux_injection/analysis_flux_injection_rz.py - -[FluxInjection3D] -buildDir = . -inputFile = Examples/Tests/flux_injection/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/flux_injection/analysis_flux_injection_3d.py - -[galilean_2d_psatd] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_2d -runtime_params = warpx.grid_type=collocated algo.current_deposition=direct psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_2d_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_2d -runtime_params = psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE amr.max_grid_size=64 amr.blocking_factor=64 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_2d_psatd_current_correction_psb] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_2d -runtime_params = psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_2d_psatd_hybrid] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_2d_hybrid -runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[galilean_3d_psatd] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_3d -runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_3d_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_3d -runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 warpx.numprocs=1 1 2 psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_3d_psatd_current_correction_psb] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_3d -runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 warpx.numprocs=1 1 1 psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_rz_psatd] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_rz -runtime_params = electrons.random_theta=0 ions.random_theta=0 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_rz_psatd_current_correction_psb] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_rz -runtime_params = psatd.periodic_single_box_fft=1 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[galilean_rz_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_rz -runtime_params = psatd.periodic_single_box_fft=0 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 amr.max_grid_size=32 amr.blocking_factor=32 -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py - -[hard_edged_plasma_lens] -buildDir = . -inputFile = Examples/Tests/plasma_lens/inputs_lattice_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/plasma_lens/analysis.py - -[hard_edged_quadrupoles] -buildDir = . -inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py - -[hard_edged_quadrupoles_boosted] -buildDir = . -inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py - -[hard_edged_quadrupoles_moving] -buildDir = . -inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py - -[initial_distribution] -buildDir = . -inputFile = Examples/Tests/initial_distribution/inputs -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/initial_distribution/analysis_distribution.py -aux1File = Tools/PostProcessing/read_raw_data.py - -[ionization_boost] -buildDir = . -inputFile = Examples/Tests/ionization/inputs_2d_bf_rt -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ionization/analysis_ionization.py - -[ionization_lab] -buildDir = . -inputFile = Examples/Tests/ionization/inputs_2d_rt -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ionization/analysis_ionization.py - -[ion_stopping] -buildDir = . -inputFile = Examples/Tests/ion_stopping/inputs_3d -runtime_params = warpx.cfl=0.7 -dim = 3 -addToCompileString = -cmakeSetupOpts = -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ion_stopping/analysis_ion_stopping.py - -[Langmuir_multi] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_fluid_1D] -buildDir = . -inputFile = Examples/Tests/langmuir_fluids/inputs_1d -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir_fluids/analysis_1d.py - -[Langmuir_fluid_RZ] -buildDir = . -inputFile = Examples/Tests/langmuir_fluids/inputs_rz -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir_fluids/analysis_rz.py - -[Langmuir_fluid_2D] -buildDir = . -inputFile = Examples/Tests/langmuir_fluids/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir_fluids/analysis_2d.py - -[Langmuir_fluid_multi] -buildDir = . -inputFile = Examples/Tests/langmuir_fluids/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir_fluids/analysis_3d.py - -[Langmuir_multi_1d] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_1d -runtime_params = algo.current_deposition=esirkepov diag1.electrons.variables=z w ux uy uz diag1.positrons.variables=z w ux uy uz -dim = 1 -addToCompileString = USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_1d.py - -[Langmuir_multi_2d_MR] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver = ckc warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio = 4 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = x z w ux uy uz diag1.positrons.variables = x z w ux uy uz -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_MR_anisotropic] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver = ckc warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio_vect = 4 2 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = x z w ux uy uz diag1.positrons.variables = x z w ux uy uz -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_MR_momentum_conserving] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=ckc warpx.use_filter=1 amr.max_level=1 amr.ref_ratio=4 warpx.fine_tag_lo=-10.e-6 -10.e-6 warpx.fine_tag_hi=10.e-6 10.e-6 algo.field_gathering=momentum-conserving diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_MR_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver = psatd warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio = 4 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = x z w ux uy uz diag1.positrons.variables = x z w ux uy uz psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=esirkepov psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_current_correction_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.grid_type=collocated diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_momentum_conserving] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd algo.field_gathering=momentum-conserving diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_multiJ] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_multiJ_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.grid_type=collocated -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_Vay_deposition] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=vay diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_Vay_deposition_particle_shape_4] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=vay diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 algo.particle_shape=4 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_2d_psatd_Vay_deposition_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_2d -runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 warpx.grid_type=collocated algo.current_deposition=vay diag1.electrons.variables=x z w ux uy uz diag1.positrons.variables=x z w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_2d.py - -[Langmuir_multi_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = warpx.grid_type=collocated algo.current_deposition=direct -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd algo.current_deposition=esirkepov psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_current_correction_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.grid_type=collocated diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_div_cleaning] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 psatd.update_with_rho = 1 algo.current_deposition = direct warpx.do_dive_cleaning = 1 warpx.do_divb_cleaning = 1 diag1.intervals = 0, 38:40:1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE F warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_momentum_conserving] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd algo.field_gathering=momentum-conserving warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_multiJ] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_multiJ_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.grid_type=collocated -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=direct warpx.cfl = 0.5773502691896258 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_single_precision] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_PRECISION=SINGLE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_Vay_deposition] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_psatd_Vay_deposition_nodal] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Langmuir_multi_rz] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_rz -runtime_params = diag1.electrons.variables=x y z w ux uy uz diag1.ions.variables=x y z w ux uy uz diag1.dump_rz_modes=0 -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_rz.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[Langmuir_multi_rz_psatd] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_rz -runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=x y z w ux uy uz diag1.ions.variables=x y z w ux uy uz diag1.dump_rz_modes=0 algo.current_deposition=direct warpx.do_dive_cleaning=0 psatd.update_with_rho=1 electrons.random_theta=0 ions.random_theta=0 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_rz.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[Langmuir_multi_rz_psatd_current_correction] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_rz -runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=x y z w ux uy uz diag1.ions.variables=x y z w ux uy uz diag1.dump_rz_modes=0 algo.current_deposition=direct warpx.do_dive_cleaning=0 amr.max_grid_size=128 psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.fields_to_plot=jr jz Er Ez Bt rho divE electrons.random_theta=0 ions.random_theta=0 -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_rz.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[Langmuir_multi_rz_psatd_multiJ] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_rz -runtime_params = amr.max_grid_size=32 algo.maxwell_solver=psatd diag1.electrons.variables=x y z w ux uy uz diag1.ions.variables=x y z w ux uy uz diag1.dump_rz_modes=0 algo.current_deposition=direct warpx.do_dive_cleaning=0 psatd.update_with_rho=1 warpx.n_rz_azimuthal_modes=2 electrons.random_theta=0 electrons.num_particles_per_cell_each_dim=2 4 2 ions.random_theta=0 ions.num_particles_per_cell_each_dim=2 4 2 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=4 warpx.use_filter=1 -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_rz.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[Langmuir_multi_single_precision] -buildDir = . -inputFile = Examples/Tests/langmuir/inputs_3d -runtime_params = -dim = 3 -addToCompileString = PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PRECISION=SINGLE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/langmuir/analysis_3d.py - -[Larmor] -buildDir = . -inputFile = Examples/Tests/larmor/inputs_2d_mr -runtime_params = max_step=10 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[LaserAcceleration] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = LaserAcceleration_plt -analysisRoutine = Examples/analysis_default_openpmd_regression.py - -[LaserAcceleration_1d] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_1d -runtime_params = -dim = 1 -addToCompileString = USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[LaserAcceleration_1d_fluid] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_1d_fluids -runtime_params = -dim = 1 -addToCompileString = USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/laser_acceleration/analysis_1d_fluids.py - -[LaserAcceleration_1d_fluid_boosted] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_1d_fluids_boosted -runtime_params = -dim = 1 -addToCompileString = USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/laser_acceleration/analysis_1d_fluids_boosted.py - -[LaserAccelerationBoost] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_2d_boost -runtime_params = amr.n_cell=64 512 max_step=300 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[LaserAcceleration_BTD] -buildDir = . -inputFile = Examples/Tests/boosted_diags/inputs_3d -runtime_params = -dim = 3 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/boosted_diags/analysis.py - -[LaserAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[LaserAccelerationRZ] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_rz -runtime_params = diag1.dump_rz_modes=1 warpx.abort_on_warning_threshold=high -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[LaserAccelerationRZ_opmd] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_rz -runtime_params = diag1.format=openpmd diag1.openpmd_backend=h5 max_step=20 diag1.fields_to_plot=Er Bt Bz jr jt jz rho part_per_cell part_per_grid rho_beam rho_electrons warpx.abort_on_warning_threshold=high -dim = 2 -addToCompileString = USE_RZ=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = LaserAccelerationRZ_opmd_plt -analysisRoutine = Examples/Tests/openpmd_rz/analysis_openpmd_rz.py - -[LaserAcceleration_single_precision_comms] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_3d -runtime_params = warpx.do_single_precision_comms=1 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = LaserAcceleration_single_precision_comms_plt -analysisRoutine = Examples/analysis_default_openpmd_regression.py - -[LaserInjection] -buildDir = . -inputFile = Examples/Tests/laser_injection/inputs_3d_rt -runtime_params = max_step=20 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/laser_injection/analysis_laser.py - -[LaserInjection_1d] -buildDir = . -inputFile = Examples/Tests/laser_injection/inputs_1d_rt -runtime_params = -dim = 1 -addToCompileString = USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/laser_injection/analysis_1d.py - -[LaserInjection_2d] -buildDir = . -inputFile = Examples/Tests/laser_injection/inputs_2d_rt -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/laser_injection/analysis_2d.py - -[LaserInjectionFromBINARYFile] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_2d_binary.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.2d_test_binary -customRunCmd = ./analysis_2d_binary.py -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromLASYFile] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_3d.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.3d_test -customRunCmd = ./analysis_3d.py -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromLASYFile_1d] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_1d.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.1d_test -customRunCmd = ./analysis_1d.py -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromLASYFile_1d_boost] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_1d_boost.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.1d_boost_test -customRunCmd = ./analysis_1d_boost.py -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromLASYFile_2d] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_2d.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.2d_test -customRunCmd = ./analysis_2d.py -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromLASYFile_RZ] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_RZ.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.RZ_test -customRunCmd = ./analysis_RZ.py -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserInjectionFromRZLASYFile] -buildDir = . -inputFile = Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py -aux1File = Examples/Tests/laser_injection_from_file/inputs.from_RZ_file_test -customRunCmd = ./analysis_from_RZ_file.py -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -selfTest = 1 -stSuccessString = Passed - -[LaserIonAcc2d] -buildDir = . -inputFile = Examples/Physics_applications/laser_ion/inputs_2d -outputFile = LaserIonAcc2d_plt -runtime_params = -dim = 2 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_openpmd_regression.py - -[LaserOnFine] -buildDir = . -inputFile = Examples/Tests/laser_on_fine/inputs_2d -runtime_params = max_step=50 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[leveling_thinning] -buildDir = . -inputFile = Examples/Tests/resampling/inputs_leveling_thinning -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/resampling/analysis_leveling_thinning.py - -[LoadExternalFieldRZGrid] -buildDir = . -inputFile = Examples/Tests/LoadExternalField/inputs_rz_grid_fields -runtime_params = warpx.abort_on_warning_threshold=medium chk.file_prefix=LoadExternalFieldRZGrid_chk chk.file_min_digits=5 -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_OPENPMD=ON -restartTest = 1 -restartFileNum = 150 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/LoadExternalField/analysis_rz.py - -[LoadExternalFieldRZParticles] -buildDir = . -inputFile = Examples/Tests/LoadExternalField/inputs_rz_particle_fields -runtime_params = warpx.abort_on_warning_threshold=medium chk.file_prefix=LoadExternalFieldRZParticles_chk chk.file_min_digits=5 -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_OPENPMD=ON -restartTest = 1 -restartFileNum = 150 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/LoadExternalField/analysis_rz.py - -[magnetostatic_eb_3d] -buildDir = . -inputFile = Examples/Tests/magnetostatic_eb/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 2 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Maxwell_Hybrid_QED_solver] -buildDir = . -inputFile = Examples/Tests/maxwell_hybrid_qed/inputs_2d -runtime_params = warpx.cfl=0.7071067811865475 -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py - -[momentum-conserving-gather] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_2d -runtime_params = amr.max_level=1 amr.n_cell=32 512 max_step=400 algo.field_gathering=momentum-conserving -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[multi_J_rz_psatd] -buildDir = . -inputFile = Examples/Tests/multi_j/inputs_rz -runtime_params = warpx.abort_on_warning_threshold=medium psatd.J_in_time=linear -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[nci_corrector] -buildDir = . -inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d -runtime_params = amr.max_level=0 particles.use_fdtd_nci_corr=1 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py - -[nci_correctorMR] -buildDir = . -inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d -runtime_params = amr.max_level=1 particles.use_fdtd_nci_corr=1 amr.n_cell=64 64 warpx.fine_tag_lo=-20.e-6 -20.e-6 warpx.fine_tag_hi=20.e-6 20.e-6 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py - -[parabolic_channel_initialization_2d_single_precision] -buildDir = . -inputFile = Examples/Tests/initial_plasma_profile/inputs -dim = 2 -addToCompileString = PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PRECISION=SINGLE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = -analysisRoutine = Examples/Tests/initial_plasma_profile/analysis.py - -[particle_absorption] -buildDir = . -inputFile = Examples/Tests/particle_boundary_process/inputs_absorption -runtime_params = -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_boundary_process/analysis_absorption.py - -[particle_boundaries_3d] -buildDir = . -inputFile = Examples/Tests/boundaries/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/boundaries/analysis.py - -[particle_fields_diags] -buildDir = . -inputFile = Examples/Tests/particle_fields_diags/inputs -aux1File = Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py -runtime_params = -dim = 3 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_fields_diags/analysis_particle_diags.py - -[particle_fields_diags_single_precision] -buildDir = . -inputFile = Examples/Tests/particle_fields_diags/inputs -aux1File = Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py -runtime_params = -dim = 3 -addToCompileString = PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PRECISION=SINGLE -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_fields_diags/analysis_particle_diags_single.py - -[particle_pusher] -buildDir = . -inputFile = Examples/Tests/particle_pusher/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_pusher/analysis_pusher.py - -[particle_scrape] -buildDir = . -inputFile = Examples/Tests/particle_boundary_scrape/inputs_scrape -runtime_params = -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_boundary_scrape/analysis_scrape.py - -[particles_in_pml] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[particles_in_pml_2d] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[particles_in_pml_2d_MR] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_mr_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[particles_in_pml_3d_MR] -buildDir = . -inputFile = Examples/Tests/particles_in_pml/inputs_mr_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py - -[PEC_field] -buildDir = . -inputFile = Examples/Tests/pec/inputs_field_PEC_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pec/analysis_pec.py - -[PEC_field_mr] -buildDir = . -inputFile = Examples/Tests/pec/inputs_field_PEC_mr_3d -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pec/analysis_pec_mr.py - -[PEC_particle] -buildDir = . -inputFile = Examples/Tests/pec/inputs_particle_PEC_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[photon_pusher] -buildDir = . -inputFile = Examples/Tests/photon_pusher/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/photon_pusher/analysis_photon_pusher.py - -[PlasmaAccelerationBoost2d] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_2d_boost -runtime_params = amr.n_cell=64 256 max_step=20 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[PlasmaAccelerationBoost3d] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_3d_boost -runtime_params = amr.n_cell=64 64 128 max_step=5 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[PlasmaAccelerationBoost3d_hybrid] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_3d_boost -runtime_params = amr.n_cell=64 64 128 max_step=25 warpx.grid_type=hybrid warpx.do_current_centering=0 -dim = 3 -addToCompileString = -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[PlasmaAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/inputs_2d -runtime_params = amr.max_level=1 amr.n_cell=32 512 max_step=400 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Plasma_lens] -buildDir = . -inputFile = Examples/Tests/plasma_lens/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/plasma_lens/analysis.py - -[Plasma_lens_boosted] -buildDir = . -inputFile = Examples/Tests/plasma_lens/inputs_boosted_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/plasma_lens/analysis.py - -[Plasma_lens_short] -buildDir = . -inputFile = Examples/Tests/plasma_lens/inputs_short_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/plasma_lens/analysis.py - -[PlasmaMirror] -buildDir = . -inputFile = Examples/Physics_applications/plasma_mirror/inputs_2d -runtime_params = amr.n_cell=256 128 max_step=20 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[pml_psatd_dive_divb_cleaning] -buildDir = . -inputFile = Examples/Tests/pml/inputs_3d -runtime_params = warpx.do_similar_dm_pml=0 warpx.abort_on_warning_threshold=medium ablastr.fillboundary_always_sync=1 -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[pml_psatd_rz] -buildDir = . -inputFile = Examples/Tests/pml/inputs_rz -runtime_params = warpx.cfl=0.7 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_RZ=TRUE USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pml/analysis_pml_psatd_rz.py - -[pml_x_ckc] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = algo.maxwell_solver=ckc -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pml/analysis_pml_ckc.py - -[pml_x_galilean] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = algo.maxwell_solver=psatd psatd.update_with_rho=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz rho divE warpx.cfl=0.7071067811865475 warpx.do_pml_dive_cleaning=1 warpx.do_pml_divb_cleaning=1 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium psatd.v_galilean=0. 0. 0.99 warpx.grid_type=collocated -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pml/analysis_pml_psatd.py - -[pml_x_psatd] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = algo.maxwell_solver=psatd psatd.update_with_rho=1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho divE warpx.cfl = 0.7071067811865475 warpx.do_pml_dive_cleaning=0 warpx.do_pml_divb_cleaning=0 chk.file_prefix=pml_x_psatd_chk chk.file_min_digits=5 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 1 -restartFileNum = 150 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pml/analysis_pml_psatd.py - -[pml_x_yee] -buildDir = . -inputFile = Examples/Tests/pml/inputs_2d -runtime_params = algo.maxwell_solver=yee chk.file_prefix=pml_x_yee_chk chk.file_min_digits=5 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 1 -restartFileNum = 150 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py - -[projection_divb_cleaner_rz] -buildDir = . -inputFile = Examples/Tests/projection_divb_cleaner/inputs_rz -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/projection_divb_cleaner/analysis_rz.py - -[Proton_Boron_Fusion_2D] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_proton_boron_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py - -[Proton_Boron_Fusion_3D] -buildDir = . -inputFile = Examples/Tests/nuclear_fusion/inputs_proton_boron_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py - -[Python_background_mcc] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_2d.py - -[Python_background_mcc_1d] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_1d.py --test --pythonsolver -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_1d.py - -[Python_background_mcc_1d_tridiag] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_1d.py --test -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_1d.py - -[Python_collisionXZ] -buildDir = . -inputFile = Examples/Tests/collision/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/collision/analysis_collision_2d.py -aux1File = Regression/PostProcessingUtils/post_processing_utils.py - -[Python_dirichletbc] -buildDir = . -inputFile = Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_dirichlet_bc/analysis.py - -[Python_dsmc_1d] -buildDir = . -inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_1d.py --test --dsmc -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py - -[Python_ElectrostaticSphereEB] -buildDir = . -inputFile = Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis.py - -[Python_gaussian_beam] -buildDir = . -inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py -customRunCmd = python3 PICMI_inputs_gaussian_beam.py -runtime_params = -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_gaussian_beam_no_field_output] -buildDir = . -inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py -customRunCmd = python3 PICMI_inputs_gaussian_beam.py --fields_to_plot none -runtime_params = -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Python_gaussian_beam_opmd] -buildDir = . -inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py -customRunCmd = python3 PICMI_inputs_gaussian_beam.py --diagformat=openpmd -runtime_params = -dim = 3 -addToCompileString = USE_OPENPMD=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Python_gaussian_beam_opmd_no_field_output] -buildDir = . -inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py -customRunCmd = python PICMI_inputs_gaussian_beam.py --diagformat=openpmd --fields_to_plot none -runtime_params = -dim = 3 -addToCompileString = USE_OPENPMD=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Python_id_cpu_read] -buildDir = . -inputFile = Examples/Tests/restart/PICMI_inputs_id_cpu_read.py -runtime_params = -customRunCmd = python3 PICMI_inputs_id_cpu_read.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 - -[Python_ionization] -buildDir = . -inputFile = Examples/Tests/ionization/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ionization/analysis_ionization.py - -[Python_Langmuir] -buildDir = . -inputFile = Examples/Tests/langmuir/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_Langmuir_2d] -buildDir = . -inputFile = Examples/Tests/langmuir/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_Langmuir_rz_multimode] -buildDir = . -inputFile = Examples/Tests/langmuir/PICMI_inputs_rz.py -runtime_params = -customRunCmd = python3 PICMI_inputs_rz.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_LaserAcceleration] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_LaserAcceleration_1d] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/PICMI_inputs_1d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_1d.py -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_LaserAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_LaserAccelerationRZ] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py -runtime_params = -customRunCmd = python3 PICMI_inputs_rz.py -dim = 2 -addToCompileString = USE_RZ=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_LaserIonAcc2d] -buildDir = . -inputFile = Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py -outputFile= diags/Python_LaserIonAcc2d_plt -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_OPENPMD=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_openpmd_regression.py - -[Python_LoadExternalGridField3D] -buildDir = . -inputFile = Examples/Tests/LoadExternalField/PICMI_inputs_3d_grid_fields.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d_grid_fields.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/LoadExternalField/analysis_3d.py - -[Python_LoadExternalParticleField3D] -buildDir = . -inputFile = Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d_particle_fields.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/LoadExternalField/analysis_3d.py - -[Python_magnetostatic_eb_3d] -buildDir = . -inputFile = Examples/Tests/magnetostatic_eb/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_EB=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 2 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_magnetostatic_eb_rz] -buildDir = . -inputFile = Examples/Tests/magnetostatic_eb/PICMI_inputs_rz.py -runtime_params = -customRunCmd = python3 PICMI_inputs_rz.py -dim = 2 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_EB=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 2 -analysisRoutine = Examples/Tests/magnetostatic_eb/analysis_rz.py - -[Python_ohms_law_solver_EM_modes_1d] -buildDir = . -inputFile = Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py -runtime_params = warpx.abort_on_warning_threshold = medium -customRunCmd = python3 PICMI_inputs.py --test --dim 1 --bdir z -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ohm_solver_EM_modes/analysis.py - -[Python_ohms_law_solver_EM_modes_rz] -buildDir = . -inputFile = Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py -runtime_params = warpx.abort_on_warning_threshold = medium -customRunCmd = python3 PICMI_inputs_rz.py --test -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_APP=OFF -DWarpX_QED=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ohm_solver_EM_modes/analysis_rz.py - -[Python_ohms_law_solver_ion_beam_1d] -buildDir = . -inputFile = Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py -runtime_params = warpx.abort_on_warning_threshold = medium -customRunCmd = python3 PICMI_inputs.py --test --dim 1 --resonant -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ohm_solver_ion_beam_instability/analysis.py - -[Python_ohms_law_solver_landau_damping_2d] -buildDir = . -inputFile = Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py -runtime_params = warpx.abort_on_warning_threshold = medium -customRunCmd = python3 PICMI_inputs.py --test --dim 2 --temp_ratio 0.1 -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py - -[Python_ohms_law_solver_magnetic_reconnection_2d] -buildDir = . -inputFile = Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py -runtime_params = warpx.abort_on_warning_threshold = medium -customRunCmd = python3 PICMI_inputs.py --test -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py - -[Python_particle_attr_access] -buildDir = . -inputFile = Examples/Tests/particle_data_python/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_data_python/analysis.py - -[Python_particle_attr_access_unique] -buildDir = . -inputFile = Examples/Tests/particle_data_python/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py --unique -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_data_python/analysis.py - -[Python_particle_reflection] -buildDir = . -inputFile = Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py -runtime_params = -customRunCmd = python3 PICMI_inputs_reflection.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_boundary_process/analysis_reflection.py - -[Python_particle_scrape] -buildDir = . -inputFile = Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py -runtime_params = -customRunCmd = python3 PICMI_inputs_scrape.py -dim = 3 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_boundary_scrape/analysis_scrape.py - -# TODO: Enable in pyAMReX, then enable lines in PICMI_inputs_2d.py again -# https://github.com/AMReX-Codes/pyamrex/issues/163 -[Python_pass_mpi_comm] -buildDir = . -inputFile = Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -# TODO: comment in again once enabled -#analysisRoutine = Examples/Tests/pass_mpi_communicator/analysis.py - -[Python_PlasmaAcceleration] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py -runtime_params = -customRunCmd = python3 PICMI_inputs_plasma_acceleration.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_PlasmaAcceleration1d] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_1d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_plasma_acceleration_1d.py -dim = 1 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE -cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -DWarpX_QED=OFF -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_PlasmaAccelerationMR] -buildDir = . -inputFile = Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py -runtime_params = -customRunCmd = python3 PICMI_inputs_plasma_acceleration_mr.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_plasma_lens] -buildDir = . -inputFile = Examples/Tests/plasma_lens/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/plasma_lens/analysis.py - -[Python_prev_positions] -buildDir = . -inputFile = Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_prev_pos_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_projection_divb_cleaner_3d] -buildDir = . -inputFile = Examples/Tests/projection_divb_cleaner/PICMI_inputs_3d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3d.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_projection_divb_cleaner_callback_3d] -buildDir = . -inputFile = Examples/Tests/projection_divb_cleaner/PICMI_inputs_3D_pyload.py -runtime_params = -customRunCmd = python3 PICMI_inputs_3D_pyload.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_OPENPMD=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Python_reduced_diags_loadbalancecosts_timers] -buildDir = . -inputFile = Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py -runtime_params = algo.load_balance_costs_update=Timers -customRunCmd = python3 PICMI_inputs_loadbalancecosts.py -dim = 3 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py - -[Python_restart_eb] -buildDir = . -inputFile = Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py -runtime_params = -customRunCmd = python3 PICMI_inputs_restart_eb.py -dim = 3 -addToCompileString = USE_EB=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 1 -restartFileNum = 30 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/restart/analysis_restart.py - -[Python_restart_runtime_components] -buildDir = . -inputFile = Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py -runtime_params = -customRunCmd = python3 PICMI_inputs_runtime_component_analyze.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 1 -restartFileNum = 5 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 - -[Python_wrappers] -buildDir = . -inputFile = Examples/Tests/python_wrappers/PICMI_inputs_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_2d.py -dim = 2 -addToCompileString = USE_FFT=TRUE USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[qed_breit_wheeler_2d] -buildDir = . -inputFile = Examples/Tests/qed/breit_wheeler/inputs_2d -aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_yt.py - -[qed_breit_wheeler_2d_opmd] -buildDir = . -inputFile = Examples/Tests/qed/breit_wheeler/inputs_2d -aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py -runtime_params = diag1.format = openpmd diag1.openpmd_backend = h5 warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = QED=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_QED=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = qed_breit_wheeler_2d_opmd_plt -analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_opmd.py - -[qed_breit_wheeler_3d] -buildDir = . -inputFile = Examples/Tests/qed/breit_wheeler/inputs_3d -aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py -runtime_params = warpx.abort_on_warning_threshold = high -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_yt.py - -[qed_breit_wheeler_3d_opmd] -buildDir = . -inputFile = Examples/Tests/qed/breit_wheeler/inputs_3d -aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py -runtime_params = diag1.format = openpmd diag1.openpmd_backend = h5 warpx.abort_on_warning_threshold = high -dim = 3 -addToCompileString = QED=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = qed_breit_wheeler_3d_opmd_plt -analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_opmd.py - -[qed_quantum_sync_2d] -buildDir = . -inputFile = Examples/Tests/qed/quantum_synchrotron/inputs_2d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 2 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/quantum_synchrotron/analysis.py - -[qed_quantum_sync_3d] -buildDir = . -inputFile = Examples/Tests/qed/quantum_synchrotron/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/quantum_synchrotron/analysis.py - -[qed_schwinger1] -buildDir = . -inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger -runtime_params = warpx.E_external_grid = 1.e16 0 0 warpx.B_external_grid = 16792888.570516706 5256650.141557486 18363530.799561853 -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py - -[qed_schwinger2] -buildDir = . -inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger -runtime_params = warpx.E_external_grid = 1.e18 0 0 warpx.B_external_grid = 1679288857.0516706 525665014.1557486 1836353079.9561853 qed_schwinger.xmin = -2.5e-7 qed_schwinger.xmax = 2.49e-7 -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py - -[qed_schwinger3] -buildDir = . -inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger -runtime_params = warpx.E_external_grid = 0 1.090934525450495e+17 0 -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py - -[qed_schwinger4] -buildDir = . -inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger -runtime_params = warpx.E_external_grid = 0 0 2.5e+20 warpx.B_external_grid = 0 833910140000. 0 qed_schwinger.ymin = -2.5e-7 qed_schwinger.zmax = 2.49e-7 -dim = 3 -addToCompileString = QED=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_QED=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py - -[radiation_reaction] -buildDir = . -inputFile = Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py - -[reduced_diags] -buildDir = . -inputFile = Examples/Tests/reduced_diags/inputs -aux1File = Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py -runtime_params = -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags.py - -[reduced_diags_loadbalancecosts_heuristic] -buildDir = . -inputFile = Examples/Tests/reduced_diags/inputs_loadbalancecosts -runtime_params = algo.load_balance_costs_update=Heuristic -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py - -[reduced_diags_loadbalancecosts_timers] -buildDir = . -inputFile = Examples/Tests/reduced_diags/inputs_loadbalancecosts -runtime_params = algo.load_balance_costs_update=Timers -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py - -[reduced_diags_loadbalancecosts_timers_psatd] -buildDir = . -inputFile = Examples/Tests/reduced_diags/inputs_loadbalancecosts -runtime_params = algo.load_balance_costs_update=Timers -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py - -[reduced_diags_single_precision] -buildDir = . -inputFile = Examples/Tests/reduced_diags/inputs -aux1File = Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py -runtime_params = -dim = 3 -addToCompileString = PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PRECISION=SINGLE -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_single.py - -[RefinedInjection] -buildDir = . -inputFile = Examples/Physics_applications/laser_acceleration/inputs_2d -runtime_params = warpx.refine_plasma=1 amr.ref_ratio_vect = 2 1 -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py - -[relativistic_space_charge_initialization] -buildDir = . -inputFile = Examples/Tests/relativistic_space_charge_initialization/inputs_3d -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = -analysisRoutine = Examples/Tests/relativistic_space_charge_initialization/analysis.py - -[RepellingParticles] -buildDir = . -inputFile = Examples/Tests/repelling_particles/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/repelling_particles/analysis_repelling.py - -[resample_velocity_coincidence_thinning] -buildDir = . -inputFile = Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[resample_velocity_coincidence_thinning_cartesian] -buildDir = . -inputFile = Examples/Tests/resampling/inputs_1d_velocity_coincidence_thinning_cartesian -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[restart] -buildDir = . -inputFile = Examples/Tests/restart/inputs -runtime_params = chk.file_prefix=restart_chk chk.file_min_digits=5 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 1 -restartFileNum = 5 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/restart/analysis_restart.py - -[restart_psatd] -buildDir = . -inputFile = Examples/Tests/restart/inputs -runtime_params = algo.maxwell_solver=psatd psatd.use_default_v_galilean=1 particles.use_fdtd_nci_corr=0 chk.file_prefix=restart_psatd_chk chk.file_min_digits=5 boundary.field_lo=periodic periodic damped boundary.field_hi=periodic periodic damped psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 1 -restartFileNum = 5 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/restart/analysis_restart.py - -[restart_psatd_time_avg] -buildDir = . -inputFile = Examples/Tests/restart/inputs -runtime_params = algo.maxwell_solver=psatd psatd.use_default_v_galilean=1 particles.use_fdtd_nci_corr=0 chk.file_prefix=restart_psatd_time_avg_chk chk.file_min_digits=5 boundary.field_lo=periodic periodic damped boundary.field_hi=periodic periodic damped psatd.do_time_averaging=1 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 1 -restartFileNum = 5 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/restart/analysis_restart.py - -[RigidInjection_BTD] -buildDir = . -inputFile = Examples/Tests/rigid_injection/inputs_2d_BoostedFrame -runtime_params = -dim = 2 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py - -[RigidInjection_lab] -buildDir = . -inputFile = Examples/Tests/rigid_injection/inputs_2d_LabFrame -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py - -[scraping] -buildDir = . -inputFile = Examples/Tests/scraping/inputs_rz -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = USE_EB=TRUE USE_RZ=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/scraping/analysis_rz.py - -[scraping_filter] -buildDir = . -inputFile = Examples/Tests/scraping/inputs_rz_filter -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 2 -addToCompileString = USE_EB=TRUE USE_RZ=TRUE USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/scraping/analysis_rz_filter.py - -[silver_mueller_1d] -buildDir = . -inputFile = Examples/Tests/silver_mueller/inputs_1d -runtime_params = -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py - -[silver_mueller_2d_x] -buildDir = . -inputFile = Examples/Tests/silver_mueller/inputs_2d_x -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py - -[silver_mueller_2d_z] -buildDir = . -inputFile = Examples/Tests/silver_mueller/inputs_2d_z -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py - -[silver_mueller_rz_z] -buildDir = . -inputFile = Examples/Tests/silver_mueller/inputs_rz_z -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py - -[space_charge_initialization] -buildDir = . -inputFile = Examples/Tests/space_charge_initialization/inputs_3d -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = -analysisRoutine = Examples/Tests/space_charge_initialization/analysis.py - -[space_charge_initialization_2d] -buildDir = . -inputFile = Examples/Tests/space_charge_initialization/inputs_3d -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -runtime_params = geometry.dims=2 -analysisRoutine = Examples/Tests/space_charge_initialization/analysis.py - -[subcyclingMR] -buildDir = . -inputFile = Examples/Tests/subcycling/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[Uniform_2d] -buildDir = . -inputFile = Examples/Physics_applications/uniform_plasma/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/analysis_default_regression.py - -[uniform_plasma_restart] -buildDir = . -inputFile = Examples/Physics_applications/uniform_plasma/inputs_3d -runtime_params = chk.file_prefix=uniform_plasma_restart_chk chk.file_min_digits=5 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 1 -restartFileNum = 6 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/restart/analysis_restart.py - -[uniform_plasma_multiJ] -buildDir = . -inputFile = Examples/Tests/nci_psatd_stability/inputs_3d -runtime_params = psatd.solution_type=first-order psatd.J_in_time=constant psatd.rho_in_time=constant warpx.do_dive_cleaning=1 warpx.do_divb_cleaning=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=1 diag1.fields_to_plot=Bx By Bz divE Ex Ey Ez F G jx jy jz rho warpx.abort_on_warning_threshold=medium -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_multiJ.py - -[VayDeposition2D] -buildDir = . -inputFile = Examples/Tests/vay_deposition/inputs_2d -runtime_params = -dim = 2 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/vay_deposition/analysis.py - -[VayDeposition3D] -buildDir = . -inputFile = Examples/Tests/vay_deposition/inputs_3d -runtime_params = -dim = 3 -addToCompileString = USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/vay_deposition/analysis.py - -[NodalElectrostaticSolver] -buildDir = . -inputFile = Examples/Tests/nodal_electrostatic/inputs_3d -runtime_params = warpx.abort_on_warning_threshold=high -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 1 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/nodal_electrostatic/analysis_3d.py - -[BeamBeamCollision] -buildDir = . -inputFile = Examples/Physics_applications/beam-beam_collision/inputs -runtime_params = warpx.abort_on_warning_threshold=high -dim = 3 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = BeamBeamCollision_plt -analysisRoutine = Examples/analysis_default_openpmd_regression.py - -[spacecraft_charging] -buildDir = . -inputFile = Examples/Physics_applications/spacecraft_charging/PICMI_inputs_rz.py -runtime_params = -customRunCmd = python3 PICMI_inputs_rz.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS="RZ" -DWarpX_EB=ON -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = spacecraft_charging_plt -analysisRoutine = Examples/Physics_applications/spacecraft_charging/analysis.py - -[Point_of_contact_EB_3d] -buildDir = . -inputFile = Examples/Tests/point_of_contact_EB/inputs_3d -runtime_params = -dim = 3 -addToCompileString = USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = Point_of_contact_EB_3d_plt -analysisRoutine = Examples/Tests/point_of_contact_EB/analysis.py - -[Point_of_contact_EB_rz] -buildDir = . -inputFile = Examples/Tests/point_of_contact_EB/inputs_rz -runtime_params = -dim = 2 -addToCompileString = USE_RZ=TRUE USE_EB=TRUE -cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_EB=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = Point_of_contact_EB_rz_plt -analysisRoutine = Examples/Tests/point_of_contact_EB/analysis.py - -[ThetaImplicitPicard_1d] -buildDir = . -inputFile = Examples/Tests/Implicit/inputs_1d -runtime_params = warpx.abort_on_warning_threshold=high amr.max_grid_size=32 -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/Implicit/analysis_1d.py - -[ThetaImplicitJFNK_VandB_2d] -buildDir = . -inputFile = Examples/Tests/Implicit/inputs_vandb_jfnk_2d -runtime_params = warpx.abort_on_warning_threshold=high -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py - -[ThetaImplicitJFNK_VandB_2d_PICMI] -buildDir = . -inputFile = Examples/Tests/Implicit/PICMI_inputs_vandb_jfnk_2d.py -runtime_params = -customRunCmd = python3 PICMI_inputs_vandb_jfnk_2d.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/Implicit/analysis_vandb_jfnk_2d.py - -[SemiImplicitPicard_1d] -buildDir = . -inputFile = Examples/Tests/Implicit/inputs_1d_semiimplicit -runtime_params = warpx.abort_on_warning_threshold=high amr.max_grid_size=32 -dim = 1 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=1 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/Implicit/analysis_1d.py - -[EnergyConservingThermalPlasma] -buildDir = . -inputFile = Examples/Tests/energy_conserving_thermal_plasma/inputs_2d_electrostatic -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/energy_conserving_thermal_plasma/analysis.py - -[focusing_gaussian_beam] -buildDir = . -inputFile = Examples/Tests/gaussian_beam/inputs_focusing_beam -runtime_params = -dim = 3 -addToCompileString = USE_OPENPMD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/gaussian_beam/analysis_focusing_beam.py - -[particle_boundary_interaction] -buildDir = . -inputFile = Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py -runtime_params = -customRunCmd = python3 PICMI_inputs_rz.py -dim = 2 -addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE -cmakeSetupOpts = -DWarpX_DIMS="RZ" -DWarpX_EB=ON -DWarpX_PYTHON=ON -target = pip_install -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -outputFile = particle_boundary_interaction_plt -analysisRoutine = Examples/Tests/particle_boundary_interaction/analysis.py - -[particle_thermal_boundary] -buildDir = . -inputFile = Examples/Tests/particle_thermal_boundary/inputs_2d -runtime_params = -dim = 2 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/particle_thermal_boundary/analysis_2d.py - -[openbc_poisson_solver] -buildDir = . -inputFile = Examples/Tests/openbc_poisson_solver/inputs_3d -runtime_params = warpx.abort_on_warning_threshold = high -dim = 3 -addToCompileString = USE_OPENPMD=TRUE USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/openbc_poisson_solver/analysis.py - -[diff_lumi_diag] -buildDir = . -inputFile = Examples/Tests/diff_lumi_diag/inputs -runtime_params = warpx.abort_on_warning_threshold = medium -dim = 3 -addToCompileString = USE_OPENPMD=TRUE USE_FFT=TRUE -cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_OPENPMD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = Examples/Tests/diff_lumi_diag/analysis.py -aux1File = Tools/PostProcessing/read_raw_data.py diff --git a/Regression/prepare_file_ci.py b/Regression/prepare_file_ci.py deleted file mode 100644 index cb9bf0304f3..00000000000 --- a/Regression/prepare_file_ci.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright 2018-2019 Andrew Myers, Luca Fedeli, Maxence Thevenet -# Remi Lehe -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import os - -# This script modifies `WarpX-test.ini` (which is used for nightly builds) -# and creates the file `ci-test.ini` (which is used for continuous -# integration) -# The subtests that are selected are controlled by WARPX_TEST_DIM -# The architecture (CPU/GPU) is selected by WARPX_TEST_ARCH -import re - -# Get relevant environment variables -arch = os.environ.get("WARPX_TEST_ARCH", "CPU") - -ci_regular_cartesian_1d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_1D") == "TRUE" -ci_regular_cartesian_2d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_2D") == "TRUE" -ci_regular_cartesian_3d = os.environ.get("WARPX_CI_REGULAR_CARTESIAN_3D") == "TRUE" -ci_psatd = os.environ.get("WARPX_CI_PSATD", "TRUE") == "TRUE" -ci_single_precision = os.environ.get("WARPX_CI_SINGLE_PRECISION") == "TRUE" -ci_rz_or_nompi = os.environ.get("WARPX_CI_RZ_OR_NOMPI") == "TRUE" -ci_qed = os.environ.get("WARPX_CI_QED") == "TRUE" -ci_eb = os.environ.get("WARPX_CI_EB") == "TRUE" -ci_openpmd = os.environ.get("WARPX_CI_OPENPMD") == "TRUE" -ci_ccache = os.environ.get("WARPX_CI_CCACHE") == "TRUE" -ci_num_make_jobs = os.environ.get("WARPX_CI_NUM_MAKE_JOBS", None) - -# Find the directory in which the tests should be run -current_dir = os.getcwd() -test_dir = re.sub("warpx/Regression", "", current_dir) - -with open("WarpX-tests.ini") as f: - text = f.read() - -# Replace default folder name -text = re.sub("/home/regtester/AMReX_RegTesting", test_dir, text) -# Remove the web directory -text = re.sub("[\w\-\/]*/web", "", text) - -# Add doComparison = 0 for each test -text = re.sub( - "\[(?P.*)\]\nbuildDir = ", "[\g]\ndoComparison = 0\nbuildDir = ", text -) - -# Change compile options when running on GPU -if arch == "GPU": - text = re.sub( - "addToCompileString =", "addToCompileString = USE_GPU=TRUE USE_OMP=FALSE ", text - ) -print("Compiling for %s" % arch) - -# Extra dependencies -if ci_openpmd: - text = re.sub( - "addToCompileString =", "addToCompileString = USE_OPENPMD=TRUE ", text - ) - -# always build with PSATD support (runtime controlled if used) -if ci_psatd: - text = re.sub("addToCompileString =", "addToCompileString = USE_FFT=TRUE ", text) - text = re.sub("USE_FFT=FALSE", "", text) - -# CCache -if ci_ccache: - text = re.sub("addToCompileString =", "addToCompileString = USE_CCACHE=TRUE ", text) - -# Add runtime options: -# > crash for unused variables -# > trap NaNs, divisions by zero, and overflows -# > abort upon any warning message by default -text = re.sub( - "runtime_params =", - "runtime_params = amrex.abort_on_unused_inputs=1 " - + "amrex.fpe_trap_invalid=1 amrex.fpe_trap_zero=1 amrex.fpe_trap_overflow=1 " - + "warpx.always_warn_immediately=1 warpx.abort_on_warning_threshold=low", - text, -) - -# Add runtime options for CPU: -# > serialize initial conditions and no dynamic scheduling in OpenMP -if arch == "CPU": - text = re.sub( - "runtime_params =", - "runtime_params = " - + "warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1", - text, - ) - -# Use less/more cores for compiling, e.g. public CI only provides 2 cores -if ci_num_make_jobs is not None: - text = re.sub( - "numMakeJobs = \d+", "numMakeJobs = {}".format(ci_num_make_jobs), text - ) - -# Prevent emails from being sent -text = re.sub("sendEmailWhenFail = 1", "sendEmailWhenFail = 0", text) - -# Select the tests to be run -# -------------------------- - -# - Extract test blocks (they are identified by the fact that they contain "inputFile") -select_test_regex = r"(\[(.+\n)*inputFile(.+\n)*)" -test_blocks = [match[0] for match in re.findall(select_test_regex, text)] -# - Remove the test blocks from `text` (only the selected ones will be added back) -text = re.sub(select_test_regex, "", text) - - -def select_tests(blocks, match_string_list, do_test): - """Remove or keep tests from list in WarpX-tests.ini according to do_test variable""" - if do_test not in [True, False]: - raise ValueError("do_test must be True or False") - if do_test is False: - for match_string in match_string_list: - print("Selecting tests without " + match_string) - blocks = [block for block in blocks if match_string not in block] - else: - for match_string in match_string_list: - print("Selecting tests with " + match_string) - blocks = [block for block in blocks if match_string in block] - return blocks - - -if ci_regular_cartesian_1d: - test_blocks = select_tests(test_blocks, ["dim = 1"], True) - test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) - test_blocks = select_tests( - test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False - ) - test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) - test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) - test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) - -if ci_regular_cartesian_2d: - test_blocks = select_tests(test_blocks, ["dim = 2"], True) - test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) - test_blocks = select_tests( - test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False - ) - test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) - test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) - test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) - -if ci_regular_cartesian_3d: - test_blocks = select_tests(test_blocks, ["dim = 3"], True) - test_blocks = select_tests( - test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], False - ) - test_blocks = select_tests(test_blocks, ["useMPI = 0"], False) - test_blocks = select_tests(test_blocks, ["QED=TRUE"], False) - test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], False) - -if ci_single_precision: - test_blocks = select_tests( - test_blocks, ["PRECISION=FLOAT", "USE_SINGLE_PRECISION_PARTICLES=TRUE"], True - ) - -if ci_rz_or_nompi: - block1 = select_tests(test_blocks, ["USE_RZ=TRUE"], True) - block2 = select_tests(test_blocks, ["useMPI = 0"], True) - test_blocks = block1 + block2 - -if ci_qed: - test_blocks = select_tests(test_blocks, ["QED=TRUE"], True) - -if ci_eb: - test_blocks = select_tests(test_blocks, ["USE_RZ=TRUE"], False) - test_blocks = select_tests(test_blocks, ["USE_EB=TRUE"], True) - -# - Add the selected test blocks to the text -text = text + "\n" + "\n".join(test_blocks) - -with open("ci-tests.ini", "w") as f: - f.write(text) diff --git a/run_test.sh b/run_test.sh deleted file mode 100755 index 9e9f55d314e..00000000000 --- a/run_test.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -# Copyright 2018-2020 Axel Huebl, David Grote, Edoardo Zoni -# Luca Fedeli, Maxence Thevenet, Remi Lehe -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -# This script runs some of WarpX's standard regression tests, but -# without comparing the output to previously run simulations. -# This checks that: -# - The code compiles and runs without error -# - For some of the tests, a Python script checks that the results are -# physically correct. - -# The tests can be influenced by environment variables: -# Use `export WARPX_CI_CLEAN_TESTS=ON` in order to remove all subdirectories -# from each test directory, directly after a test has passed. -# Use `export WARPX_CI_DIM=3` or `export WARPX_CI_DIM=2` in order to -# select only the tests that correspond to this dimension. -# Use `export WARPX_TEST_ARCH=CPU` or `export WARPX_TEST_ARCH=GPU` in order -# to run the tests on CPU or GPU respectively. - -set -eu -o pipefail - -# Parse command line arguments: if test names are given as command line arguments, -# store them in variable tests_arg and define new command line argument to call -# regtest.py with option --tests (works also for single test) -tests_arg=$* -tests_run=${tests_arg:+--tests=${tests_arg}} - -# environment options -WARPX_CI_CLEAN_TESTS=${WARPX_CI_CLEAN_TESTS:-""} -WARPX_CI_TMP=${WARPX_CI_TMP:-""} - -# Remove contents and link to a previous test directory (intentionally two arguments) -rm -rf test_dir/* test_dir -# Create a temporary test directory -if [ -z "${WARPX_CI_TMP}" ]; then - tmp_dir=$(mktemp --help >/dev/null 2>&1 && mktemp -d -t ci-XXXXXXXXXX || mktemp -d "${TMPDIR:-/tmp}"/ci-XXXXXXXXXX) - if [ $? -ne 0 ]; then - echo "Cannot create temporary directory" - exit 1 - fi -else - tmp_dir=${WARPX_CI_TMP} -fi - -# Copy WarpX into current test directory -rm -rf ${tmp_dir}/warpx -mkdir -p ${tmp_dir}/warpx -cp -r ./* ${tmp_dir}/warpx - -# Link the test directory -ln -s ${tmp_dir} test_dir - -# Switch to the test directory -cd test_dir -echo "cd $PWD" - -# Prepare a virtual environment -rm -rf py-venv -python3 -m venv py-venv -source py-venv/bin/activate -python3 -m pip install --upgrade pip -python3 -m pip install --upgrade build packaging setuptools wheel -python3 -m pip install --upgrade cmake -python3 -m pip install --upgrade -r warpx/Regression/requirements.txt -python3 -m pip cache purge - -# Clone AMReX and warpx-data -git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 216ce6f37de4b65be57fc1006b3457b4fc318e03 && cd - -# warpx-data contains various required data sets -git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git -# openPMD-example-datasets contains various required data sets -mkdir -p openPMD-example-datasets -cd openPMD-example-datasets -curl -sOL https://github.com/openPMD/openPMD-example-datasets/raw/4ba1d257c5b4897c0a3cd57742bb0987343a902e/example-femm-thetaMode.h5 -curl -sOL https://github.com/openPMD/openPMD-example-datasets/raw/4ba1d257c5b4897c0a3cd57742bb0987343a902e/example-femm-3d.h5 -cd - - -# Clone the AMReX regression test utility -git clone https://github.com/AMReX-Codes/regression_testing.git - -# Prepare regression tests -mkdir -p rt-WarpX/WarpX-benchmarks -cd warpx/Regression -echo "cd $PWD" -python3 prepare_file_ci.py -cp ci-tests.ini ../../rt-WarpX -cp -r Checksum ../../regression_testing/ - -# Run tests -cd ../../regression_testing/ -echo "cd $PWD" -if [ -z "${WARPX_CI_CLEAN_TESTS}" ]; then - test_rm_dir="" -else - test_rm_dir="--clean_testdir" -fi -# run only tests specified in variable tests_arg (single test or multiple tests) -if [[ ! -z "${tests_arg}" ]]; then - python3 regtest.py ../rt-WarpX/ci-tests.ini ${test_rm_dir} --skip_comparison --no_update all "${tests_run}" -# run all tests (variables tests_arg and tests_run are empty) -else - python3 regtest.py ../rt-WarpX/ci-tests.ini ${test_rm_dir} --skip_comparison --no_update all -fi - -# clean up python virtual environment -cd ../ -echo "cd $PWD" -deactivate From e2bb0de94b424bfc2de257119302643f857e768c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 6 Sep 2024 14:33:16 -0700 Subject: [PATCH 19/91] EBs: Compiled by Default, Controlled at Runtime (#4865) * Build Logic: Enable EBs by Default * EB: Introduce Runtime Parameter * Cleaning: Review Weiqun * Cleaning: EB w/ Clang Tidy * Formatting for Clarity Only changes spaces * CMake: Simplify `add_warpx_test` EB is not mutually exclusive anymore: we can run non-EB tests with EB compiled in. * Azure CI: EB is default-ON now --- .azure-pipelines.yml | 4 - .github/workflows/source/check_inputs.py | 2 +- CMakeLists.txt | 2 +- Docs/source/developers/testing.rst | 4 - Docs/source/install/cmake.rst | 4 +- Docs/source/usage/parameters.rst | 7 +- Examples/CMakeLists.txt | 13 -- .../beam_beam_collision/CMakeLists.txt | 1 - .../capacitive_discharge/CMakeLists.txt | 7 +- .../laser_acceleration/CMakeLists.txt | 14 -- .../laser_ion/CMakeLists.txt | 2 - .../plasma_acceleration/CMakeLists.txt | 8 - .../plasma_mirror/CMakeLists.txt | 1 - .../spacecraft_charging/CMakeLists.txt | 1 - .../uniform_plasma/CMakeLists.txt | 3 - .../Tests/accelerator_lattice/CMakeLists.txt | 3 - Examples/Tests/boosted_diags/CMakeLists.txt | 1 - Examples/Tests/boundaries/CMakeLists.txt | 1 - Examples/Tests/btd_rz/CMakeLists.txt | 1 - .../collider_relevant_diags/CMakeLists.txt | 1 - Examples/Tests/collision/CMakeLists.txt | 6 - Examples/Tests/diff_lumi_diag/CMakeLists.txt | 1 - Examples/Tests/divb_cleaning/CMakeLists.txt | 1 - Examples/Tests/dive_cleaning/CMakeLists.txt | 2 - .../electrostatic_dirichlet_bc/CMakeLists.txt | 2 - .../Tests/electrostatic_sphere/CMakeLists.txt | 5 - .../electrostatic_sphere_eb/CMakeLists.txt | 5 - .../embedded_boundary_cube/CMakeLists.txt | 3 - .../CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - .../CMakeLists.txt | 2 - Examples/Tests/embedded_circle/CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - Examples/Tests/field_probe/CMakeLists.txt | 1 - Examples/Tests/flux_injection/CMakeLists.txt | 2 - Examples/Tests/gaussian_beam/CMakeLists.txt | 2 - Examples/Tests/implicit/CMakeLists.txt | 4 - .../Tests/initial_distribution/CMakeLists.txt | 1 - .../initial_plasma_profile/CMakeLists.txt | 1 - Examples/Tests/ion_stopping/CMakeLists.txt | 1 - Examples/Tests/ionization/CMakeLists.txt | 3 - Examples/Tests/langmuir/CMakeLists.txt | 35 --- Examples/Tests/langmuir_fluids/CMakeLists.txt | 4 - Examples/Tests/larmor/CMakeLists.txt | 1 - Examples/Tests/laser_injection/CMakeLists.txt | 3 - .../laser_injection_from_file/CMakeLists.txt | 14 -- Examples/Tests/laser_on_fine/CMakeLists.txt | 1 - .../Tests/load_external_field/CMakeLists.txt | 6 - .../Tests/magnetostatic_eb/CMakeLists.txt | 3 - .../Tests/maxwell_hybrid_qed/CMakeLists.txt | 1 - .../Tests/nci_fdtd_stability/CMakeLists.txt | 2 - .../Tests/nci_psatd_stability/CMakeLists.txt | 17 -- .../Tests/nodal_electrostatic/CMakeLists.txt | 1 - Examples/Tests/nuclear_fusion/CMakeLists.txt | 6 - .../Tests/ohm_solver_em_modes/CMakeLists.txt | 2 - .../CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - .../open_bc_poisson_solver/CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - .../particle_boundary_process/CMakeLists.txt | 2 - .../particle_boundary_scrape/CMakeLists.txt | 2 - .../Tests/particle_data_python/CMakeLists.txt | 3 - .../particle_fields_diags/CMakeLists.txt | 4 +- Examples/Tests/particle_pusher/CMakeLists.txt | 1 - .../particle_thermal_boundary/CMakeLists.txt | 1 - .../Tests/particles_in_pml/CMakeLists.txt | 4 - .../pass_mpi_communicator/CMakeLists.txt | 1 - Examples/Tests/pec/CMakeLists.txt | 3 - Examples/Tests/photon_pusher/CMakeLists.txt | 1 - Examples/Tests/plasma_lens/CMakeLists.txt | 5 - Examples/Tests/pml/CMakeLists.txt | 8 - .../Tests/point_of_contact_eb/CMakeLists.txt | 2 - .../projection_divb_cleaner/CMakeLists.txt | 3 - Examples/Tests/python_wrappers/CMakeLists.txt | 1 - Examples/Tests/qed/CMakeLists.txt | 10 - .../Tests/radiation_reaction/CMakeLists.txt | 1 - Examples/Tests/reduced_diags/CMakeLists.txt | 5 - .../CMakeLists.txt | 1 - .../Tests/repelling_particles/CMakeLists.txt | 1 - Examples/Tests/resampling/CMakeLists.txt | 3 - Examples/Tests/restart/CMakeLists.txt | 9 - Examples/Tests/restart_eb/CMakeLists.txt | 4 +- Examples/Tests/rigid_injection/CMakeLists.txt | 2 - Examples/Tests/scraping/CMakeLists.txt | 2 - Examples/Tests/silver_mueller/CMakeLists.txt | 4 - Examples/Tests/single_particle/CMakeLists.txt | 1 - .../CMakeLists.txt | 2 - Examples/Tests/subcycling/CMakeLists.txt | 1 - Examples/Tests/vay_deposition/CMakeLists.txt | 2 - Source/BoundaryConditions/PML.H | 3 +- Source/BoundaryConditions/PML.cpp | 46 ++-- Source/BoundaryConditions/WarpXEvolvePML.cpp | 29 ++- .../BoundaryScrapingDiagnostics.cpp | 9 +- .../Diagnostics/ReducedDiags/ChargeOnEB.cpp | 36 +-- Source/Diagnostics/WarpXIO.cpp | 2 +- Source/EmbeddedBoundary/CMakeLists.txt | 1 + Source/EmbeddedBoundary/DistanceToEB.H | 56 +++-- Source/EmbeddedBoundary/Enabled.H | 2 +- Source/EmbeddedBoundary/ParticleScraper.H | 4 +- .../EmbeddedBoundary/WarpXFaceExtensions.cpp | 80 ++++--- Source/EmbeddedBoundary/WarpXInitEB.cpp | 35 +-- Source/Evolve/WarpXEvolve.cpp | 11 +- Source/FieldSolver/ElectrostaticSolver.cpp | 98 ++++---- .../FiniteDifferenceSolver/EvolveB.cpp | 32 ++- .../FiniteDifferenceSolver/EvolveE.cpp | 56 ++--- .../FiniteDifferenceSolver/EvolveECTRho.cpp | 6 +- .../FiniteDifferenceSolver/EvolveEPML.cpp | 37 ++- .../HybridPICModel/HybridPICModel.cpp | 76 +++--- .../HybridPICSolveE.cpp | 140 +++++------ .../MacroscopicEvolveE.cpp | 31 +-- .../MagnetostaticSolver.cpp | 1 + Source/Initialization/WarpXInitData.cpp | 101 ++++---- Source/Parallelization/WarpXRegrid.cpp | 45 ++-- Source/Particles/MultiParticleContainer.cpp | 10 +- Source/Particles/ParticleBoundaryBuffer.cpp | 221 +++++++++--------- .../Particles/PhysicalParticleContainer.cpp | 15 +- Source/Particles/WarpXParticleContainer.cpp | 9 +- Source/Utils/WarpXAlgorithmSelection.cpp | 2 +- Source/WarpX.H | 6 +- Source/WarpX.cpp | 159 ++++++++----- Source/ablastr/fields/PoissonSolver.H | 184 +++++++++------ Source/ablastr/fields/VectorPoissonSolver.H | 47 ++-- setup.py | 2 +- 124 files changed, 848 insertions(+), 1060 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 6e9884966fe..1355dc2f647 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -36,10 +36,6 @@ jobs: cylindrical_rz: WARPX_CMAKE_FLAGS: -DWarpX_DIMS=RZ -DWarpX_FFT=ON -DWarpX_PYTHON=ON WARPX_RZ_FFT: 'TRUE' - # embedded boundaries - embedded_boundaries: - WARPX_CMAKE_FLAGS: -DWarpX_DIMS='1;2;3;RZ' -DWarpX_FFT=ON -DWarpX_PYTHON=ON -DWarpX_EB=ON - WARPX_RZ_FFT: 'TRUE' # single precision #single_precision: # WARPX_CMAKE_FLAGS: -DWarpX_DIMS='1;2;3;RZ' -DWarpX_FFT=ON -DWarpX_PYTHON=ON -DWarpX_PRECISION=SINGLE diff --git a/.github/workflows/source/check_inputs.py b/.github/workflows/source/check_inputs.py index 3cb2d8f735e..2012d6ed672 100755 --- a/.github/workflows/source/check_inputs.py +++ b/.github/workflows/source/check_inputs.py @@ -27,7 +27,7 @@ # skip lines related to other function arguments # NOTE: update range call to reflect changes # in the interface of 'add_warpx_test' - for _ in range(3): + for _ in range(2): # skip over: dims, numprocs next(f) # strip leading whitespaces, remove end-of-line comments testinput = next(f).lstrip().split(" ")[0] diff --git a/CMakeLists.txt b/CMakeLists.txt index d20de57f81c..3b4e9199f53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,7 +68,7 @@ include(CMakeDependentOption) option(WarpX_APP "Build the WarpX executable application" ON) option(WarpX_ASCENT "Ascent in situ diagnostics" OFF) option(WarpX_CATALYST "Catalyst in situ diagnostics" OFF) -option(WarpX_EB "Embedded boundary support" OFF) +option(WarpX_EB "Embedded boundary support" ON) option(WarpX_LIB "Build WarpX as a library" OFF) option(WarpX_MPI "Multi-node support (message-passing)" ON) option(WarpX_OPENPMD "openPMD I/O (HDF5, ADIOS)" ON) diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index fd57b61fa17..8b85976c6f0 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -103,7 +103,6 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as test_1d_laser_acceleration # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_acceleration # inputs analysis.py # analysis diags/diag1000100 # output (plotfile) @@ -118,7 +117,6 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as test_2d_laser_acceleration_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_acceleration_picmi.py # inputs analysis.py # analysis diags/diag1000100 # output (plotfile) @@ -133,7 +131,6 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as test_3d_laser_acceleration_restart # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_acceleration_restart # inputs analysis_default_restart.py # analysis diags/diag1000100 # output (plotfile) @@ -150,7 +147,6 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as test_rz_laser_acceleration_picmi # name RZ # dims 2 # nprocs - OFF # eb "inputs_test_rz_laser_acceleration_picmi.py --test --dir 1" # inputs analysis.py # analysis diags/diag1/ # output (openPMD time series) diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index 959c8cb6ad6..60d9eecc2b4 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -88,7 +88,7 @@ CMake Option Default & Values Descr ``WarpX_CATALYST`` ON/**OFF** Catalyst in situ visualization ``WarpX_COMPUTE`` NOACC/**OMP**/CUDA/SYCL/HIP On-node, accelerated computing backend ``WarpX_DIMS`` **3**/2/1/RZ Simulation dimensionality. Use ``"1;2;RZ;3"`` for all. -``WarpX_EB`` ON/**OFF** Embedded boundary support (not supported in RZ yet) +``WarpX_EB`` **ON**/OFF Embedded boundary support (not supported in RZ yet) ``WarpX_IPO`` ON/**OFF** Compile WarpX with interprocedural optimization (aka LTO) ``WarpX_LIB`` ON/**OFF** Build WarpX as a library, e.g., for PICMI Python ``WarpX_MPI`` **ON**/OFF Multi-node support (message-passing) @@ -269,7 +269,7 @@ Environment Variable Default & Values Descr ============================= ============================================ ================================================================ ``WARPX_COMPUTE`` NOACC/**OMP**/CUDA/SYCL/HIP On-node, accelerated computing backend ``WARPX_DIMS`` ``"1;2;3;RZ"`` Simulation dimensionalities (semicolon-separated list) -``WARPX_EB`` ON/**OFF** Embedded boundary support (not supported in RZ yet) +``WARPX_EB`` **ON**/OFF Embedded boundary support (not supported in RZ yet) ``WARPX_MPI`` ON/**OFF** Multi-node support (message-passing) ``WARPX_OPENPMD`` **ON**/OFF openPMD I/O (HDF5, ADIOS) ``WARPX_PRECISION`` SINGLE/**DOUBLE** Floating point precision (single/double) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 980fb1ef2e0..1d9c0c14bbf 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -590,14 +590,15 @@ In WarpX, the embedded boundary can be defined in either of two ways: A function of `x`, `y`, `z` that defines the surface of the embedded boundary. That surface lies where the function value is 0 ; the physics simulation area is where the function value is negative ; - the interior of the embeddded boundary is where the function value is positive. + the interior of the embedded boundary is where the function value is positive. - **From an STL file:** In that case, you will need to set the following parameters in the input file. * ``eb2.stl_file`` (`string`) - The path to an STL file. In addition, you also need to set ``eb2.geom_type = stl``, - in order for the file to be read by WarpX. + The path to an `STL file `__. + In addition, you also need to set ``eb2.geom_type = stl``, in order for the file to be read by WarpX. + `See the AMReX documentation for more details `__. Whether the embedded boundary is defined with an analytical function or an STL file, you can additionally define the electric potential at the embedded boundary with an analytical function: diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index f2898b557f4..7ebb1465be4 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -20,7 +20,6 @@ endif() # name: unique name of this test # dims: 1,2,RZ,3 # nprocs: 1 or 2 (maybe refactor later on to just depend on WarpX_MPI) -# eb: needs EB support? (temporary until handled as runtime parameter) # inputs: inputs file or PICMI script, WarpX_MPI decides w/ or w/o MPI # analysis: analysis script, always run without MPI # output: output file(s) to analyze @@ -30,7 +29,6 @@ function(add_warpx_test name dims nprocs - eb inputs analysis output @@ -42,17 +40,6 @@ function(add_warpx_test return() endif() - # cannot run EB tests w/o EB build - if(eb AND NOT WarpX_EB) - message(WARNING "${name}: cannot run EB tests without EB build") - return() - endif() - - # do not run no-EB tests w/ EB build - if(NOT eb AND WarpX_EB) - return() - endif() - # cannot run tests with unsupported geometry if(NOT dims IN_LIST WarpX_DIMS) return() diff --git a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt index 793675efaba..0b34eeff865 100644 --- a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt +++ b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_beam_beam_collision # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_beam_beam_collision # inputs analysis_default_openpmd_regression.py # analysis diags/diag1/ # output diff --git a/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt index 4f67131556e..5af1d0a0664 100644 --- a/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt +++ b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_background_mcc_picmi # name 1 # dims 2 # nprocs - OFF # eb "inputs_base_1d_picmi.py --test --pythonsolver" # inputs analysis_1d.py # analysis diags/diag1000050 # output @@ -16,7 +15,6 @@ add_warpx_test( test_1d_dsmc_picmi # name 1 # dims 2 # nprocs - OFF # eb "inputs_base_1d_picmi.py --test --dsmc" # inputs analysis_dsmc.py # analysis diags/diag1000050 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_background_mcc # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_background_mcc # inputs analysis_default_regression.py # analysis diags/diag1000050 # output @@ -39,8 +36,7 @@ add_warpx_test( # test_2d_background_mcc_dp_psp # name # 2 # dims # 2 # nprocs -# OFF # eb -# inputs_test_2d_background_mcc_dp_psp # inputs +## inputs_test_2d_background_mcc_dp_psp # inputs # analysis_default_regression.py # analysis # diags/diag1000050 # output # OFF # dependency @@ -50,7 +46,6 @@ add_warpx_test( test_2d_background_mcc_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_background_mcc_picmi.py # inputs analysis_2d.py # analysis diags/diag1000050 # output diff --git a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt index 9f4a5f1dc58..1a09a669a6d 100644 --- a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_laser_acceleration # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_acceleration # inputs analysis_default_regression.py # analysis diags/diag1000100 # output @@ -16,7 +15,6 @@ add_warpx_test( test_1d_laser_acceleration_fluid # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_acceleration_fluid # inputs analysis_1d_fluid.py # analysis diags/diag1040000 # output @@ -27,7 +25,6 @@ add_warpx_test( test_1d_laser_acceleration_fluid_boosted # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_acceleration_fluid_boosted # inputs analysis_1d_fluid_boosted.py # analysis diags/diag1000001 # output @@ -38,7 +35,6 @@ add_warpx_test( test_1d_laser_acceleration_picmi # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_acceleration_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000100 # output @@ -49,7 +45,6 @@ add_warpx_test( test_2d_laser_acceleration_boosted # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_acceleration_boosted # inputs analysis_default_regression.py # analysis diags/diag1000002 # output @@ -60,7 +55,6 @@ add_warpx_test( test_2d_laser_acceleration_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_acceleration_mr # inputs analysis_default_regression.py # analysis diags/diag1000200 # output @@ -71,7 +65,6 @@ add_warpx_test( test_2d_laser_acceleration_mr_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_acceleration_mr_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000200 # output @@ -82,7 +75,6 @@ add_warpx_test( test_2d_refined_injection # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_refined_injection # inputs analysis_refined_injection.py # analysis diags/diag1000200 # output @@ -93,7 +85,6 @@ add_warpx_test( test_3d_laser_acceleration # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_acceleration # inputs analysis_default_openpmd_regression.py # analysis diags/diag1/ # output @@ -104,7 +95,6 @@ add_warpx_test( test_3d_laser_acceleration_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_acceleration_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000100 # output @@ -115,7 +105,6 @@ add_warpx_test( test_3d_laser_acceleration_single_precision_comms # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_acceleration_single_precision_comms # inputs analysis_default_openpmd_regression.py # analysis diags/diag1/ # output @@ -126,7 +115,6 @@ add_warpx_test( test_rz_laser_acceleration # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_laser_acceleration # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -137,7 +125,6 @@ add_warpx_test( test_rz_laser_acceleration_opmd # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_laser_acceleration_opmd # inputs analysis_openpmd_rz.py # analysis diags/diag1/ # output @@ -148,7 +135,6 @@ add_warpx_test( test_rz_laser_acceleration_picmi # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_laser_acceleration_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000010 # output diff --git a/Examples/Physics_applications/laser_ion/CMakeLists.txt b/Examples/Physics_applications/laser_ion/CMakeLists.txt index ba51e4d1398..f05203de0e8 100644 --- a/Examples/Physics_applications/laser_ion/CMakeLists.txt +++ b/Examples/Physics_applications/laser_ion/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_laser_ion_acc # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_ion_acc # inputs analysis_default_openpmd_regression.py # analysis diags/diag1/ # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_laser_ion_acc_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_ion_acc_picmi.py # inputs analysis_default_openpmd_regression.py # analysis diags/diag1/ # output diff --git a/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt index ec3e4b09563..00a0f80b457 100644 --- a/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_plasma_acceleration_picmi # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_plasma_acceleration_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1001000 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_plasma_acceleration_boosted # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_plasma_acceleration_boosted # inputs analysis_default_regression.py # analysis diags/diag1000020 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_plasma_acceleration_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_plasma_acceleration_mr # inputs analysis_default_regression.py # analysis diags/diag1000400 # output @@ -38,7 +35,6 @@ add_warpx_test( test_2d_plasma_acceleration_mr_momentum_conserving # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_plasma_acceleration_mr_momentum_conserving # inputs analysis_default_regression.py # analysis diags/diag1000400 # output @@ -49,7 +45,6 @@ add_warpx_test( test_3d_plasma_acceleration_boosted # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_acceleration_boosted # inputs analysis_default_regression.py # analysis diags/diag1000005 # output @@ -60,7 +55,6 @@ add_warpx_test( test_3d_plasma_acceleration_boosted_hybrid # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_acceleration_boosted_hybrid # inputs analysis_default_regression.py # analysis diags/diag1000025 # output @@ -71,7 +65,6 @@ add_warpx_test( test_3d_plasma_acceleration_mr_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_acceleration_mr_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000002 # output @@ -82,7 +75,6 @@ add_warpx_test( test_3d_plasma_acceleration_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_acceleration_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000010 # output diff --git a/Examples/Physics_applications/plasma_mirror/CMakeLists.txt b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt index b90e775a4b5..073245f758a 100644 --- a/Examples/Physics_applications/plasma_mirror/CMakeLists.txt +++ b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_plasma_mirror # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_plasma_mirror # inputs analysis_default_regression.py # analysis diags/diag1000020 # output diff --git a/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt index 181304e9193..95349e525cc 100644 --- a/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt +++ b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt @@ -6,7 +6,6 @@ add_warpx_test( test_rz_spacecraft_charging_picmi # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_spacecraft_charging_picmi.py # inputs analysis.py # analysis diags/diag1/ # output diff --git a/Examples/Physics_applications/uniform_plasma/CMakeLists.txt b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt index f654dc79063..79dec989c1f 100644 --- a/Examples/Physics_applications/uniform_plasma/CMakeLists.txt +++ b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_uniform_plasma # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_uniform_plasma # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_uniform_plasma # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_uniform_plasma # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_uniform_plasma_restart # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_uniform_plasma_restart # inputs analysis_default_restart.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/accelerator_lattice/CMakeLists.txt b/Examples/Tests/accelerator_lattice/CMakeLists.txt index 7fc6b4dc8e4..f3a28d30d4a 100644 --- a/Examples/Tests/accelerator_lattice/CMakeLists.txt +++ b/Examples/Tests/accelerator_lattice/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_hard_edged_quadrupoles # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_hard_edged_quadrupoles # inputs analysis.py # analysis diags/diag1000050 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_hard_edged_quadrupoles_boosted # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_hard_edged_quadrupoles_boosted # inputs analysis.py # analysis diags/diag1000050 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_hard_edged_quadrupoles_moving # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_hard_edged_quadrupoles_moving # inputs analysis.py # analysis diags/diag1000050 # output diff --git a/Examples/Tests/boosted_diags/CMakeLists.txt b/Examples/Tests/boosted_diags/CMakeLists.txt index f0a6ceaf397..8deb7f2bee2 100644 --- a/Examples/Tests/boosted_diags/CMakeLists.txt +++ b/Examples/Tests/boosted_diags/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_laser_acceleration_btd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_acceleration_btd # inputs analysis.py # analysis diags/diag1000003 # output diff --git a/Examples/Tests/boundaries/CMakeLists.txt b/Examples/Tests/boundaries/CMakeLists.txt index 928b4b95071..fccd45e2ebf 100644 --- a/Examples/Tests/boundaries/CMakeLists.txt +++ b/Examples/Tests/boundaries/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_particle_boundaries # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_particle_boundaries # inputs analysis.py # analysis diags/diag1000008 # output diff --git a/Examples/Tests/btd_rz/CMakeLists.txt b/Examples/Tests/btd_rz/CMakeLists.txt index 15a01eb1680..6a85f653c65 100644 --- a/Examples/Tests/btd_rz/CMakeLists.txt +++ b/Examples/Tests/btd_rz/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_rz_btd # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_btd # inputs analysis.py # analysis diags/diag1000289 # output diff --git a/Examples/Tests/collider_relevant_diags/CMakeLists.txt b/Examples/Tests/collider_relevant_diags/CMakeLists.txt index ad999477507..338f66970bc 100644 --- a/Examples/Tests/collider_relevant_diags/CMakeLists.txt +++ b/Examples/Tests/collider_relevant_diags/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_collider_diagnostics # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_collider_diagnostics # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/collision/CMakeLists.txt b/Examples/Tests/collision/CMakeLists.txt index 4293ba248e7..36f8a1cb1d6 100644 --- a/Examples/Tests/collision/CMakeLists.txt +++ b/Examples/Tests/collision/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_collision_z # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_collision_z # inputs analysis_collision_1d.py # analysis diags/diag1000600 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_collision_xz # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_collision_xz # inputs analysis_collision_2d.py # analysis diags/diag1000150 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_collision_xz_picmi # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_collision_xz_picmi.py # inputs analysis_collision_2d.py # analysis diags/diag1000150 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_collision_iso # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_collision_iso # inputs analysis_collision_3d_isotropization.py # analysis diags/diag1000100 # output @@ -49,7 +45,6 @@ add_warpx_test( test_3d_collision_xyz # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_collision_xyz # inputs analysis_collision_3d.py # analysis diags/diag1000150 # output @@ -60,7 +55,6 @@ add_warpx_test( test_rz_collision # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_collision # inputs analysis_collision_rz.py # analysis diags/diag1000150 # output diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt index 2385a758fb6..1651d74115e 100644 --- a/Examples/Tests/diff_lumi_diag/CMakeLists.txt +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_diff_lumi_diag # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_diff_lumi_diag # inputs analysis.py # analysis diags/diag1000080 # output diff --git a/Examples/Tests/divb_cleaning/CMakeLists.txt b/Examples/Tests/divb_cleaning/CMakeLists.txt index f0a8162212f..d4aae31472e 100644 --- a/Examples/Tests/divb_cleaning/CMakeLists.txt +++ b/Examples/Tests/divb_cleaning/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_divb_cleaning # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_divb_cleaning # inputs analysis.py # analysis diags/diag1000400 # output diff --git a/Examples/Tests/dive_cleaning/CMakeLists.txt b/Examples/Tests/dive_cleaning/CMakeLists.txt index 1e72305b673..c23c2aef539 100644 --- a/Examples/Tests/dive_cleaning/CMakeLists.txt +++ b/Examples/Tests/dive_cleaning/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_dive_cleaning # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_dive_cleaning # inputs analysis.py # analysis diags/diag1000128 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_dive_cleaning # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_dive_cleaning # inputs analysis.py # analysis diags/diag1000128 # output diff --git a/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt index 93e837d4b59..1325d1a6bf5 100644 --- a/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt +++ b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_dirichlet_bc # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_dirichlet_bc # inputs analysis.py # analysis diags/diag1000100 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_dirichlet_bc_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_dirichlet_bc_picmi.py # inputs analysis.py # analysis diags/diag1000100 # output diff --git a/Examples/Tests/electrostatic_sphere/CMakeLists.txt b/Examples/Tests/electrostatic_sphere/CMakeLists.txt index e80beb08e97..41a151b7884 100644 --- a/Examples/Tests/electrostatic_sphere/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_electrostatic_sphere # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_electrostatic_sphere # inputs analysis_electrostatic_sphere.py # analysis diags/diag1000030 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_electrostatic_sphere_lab_frame # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_electrostatic_sphere_lab_frame # inputs analysis_electrostatic_sphere.py # analysis diags/diag1000030 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_electrostatic_sphere_lab_frame_mr_emass_10 # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 # inputs analysis_electrostatic_sphere.py # analysis diags/diag1000002 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_electrostatic_sphere_rel_nodal # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_electrostatic_sphere_rel_nodal # inputs analysis_electrostatic_sphere.py # analysis diags/diag1000030 # output @@ -49,7 +45,6 @@ add_warpx_test( test_rz_electrostatic_sphere # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_electrostatic_sphere # inputs analysis_electrostatic_sphere.py # analysis diags/diag1000030 # output diff --git a/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt index ad5e8974225..7f7b1389119 100644 --- a/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_electrostatic_sphere_eb # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_electrostatic_sphere_eb # inputs analysis.py # analysis diags/diag1000001 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_3d_electrostatic_sphere_eb_mixed_bc # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_electrostatic_sphere_eb_mixed_bc # inputs analysis_default_regression.py # analysis diags/diag1000001 # output @@ -32,7 +30,6 @@ if(WarpX_EB) test_3d_electrostatic_sphere_eb_picmi # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_electrostatic_sphere_eb_picmi.py # inputs analysis.py # analysis diags/diag1000002 # output @@ -45,7 +42,6 @@ if(WarpX_EB) test_rz_electrostatic_sphere_eb # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_electrostatic_sphere_eb # inputs analysis_rz.py # analysis diags/diag1000001 # output @@ -58,7 +54,6 @@ if(WarpX_EB) test_rz_electrostatic_sphere_eb_mr # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_electrostatic_sphere_eb_mr # inputs analysis_rz_mr.py # analysis diags/diag1/ # output diff --git a/Examples/Tests/embedded_boundary_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt index 3fd0a0f4c3b..0044ed04ec8 100644 --- a/Examples/Tests/embedded_boundary_cube/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_2d_embedded_boundary_cube # name 2 # dims 1 # nprocs - ON # eb inputs_test_2d_embedded_boundary_cube # inputs analysis_fields_2d.py # analysis diags/diag1000114 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_3d_embedded_boundary_cube # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_embedded_boundary_cube # inputs analysis_fields.py # analysis diags/diag1000208 # output @@ -32,7 +30,6 @@ if(WarpX_EB) test_3d_embedded_boundary_cube_macroscopic # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_embedded_boundary_cube_macroscopic # inputs analysis_fields.py # analysis diags/diag1000208 # output diff --git a/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt index d91a94b539b..6297cf1fa5c 100644 --- a/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_rz_embedded_boundary_diffraction # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_embedded_boundary_diffraction # inputs analysis_fields.py # analysis diags/diag1/ # output diff --git a/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt index cf45d9d56f3..fe820c76f22 100644 --- a/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_embedded_boundary_picmi # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_embedded_boundary_picmi.py # inputs analysis.py # analysis diags/diag1000002 # output diff --git a/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt index c9d3b47cece..fcfe97905d8 100644 --- a/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_2d_embedded_boundary_rotated_cube # name 2 # dims 1 # nprocs - ON # eb inputs_test_2d_embedded_boundary_rotated_cube # inputs analysis_fields_2d.py # analysis diags/diag1000068 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_3d_embedded_boundary_rotated_cube # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_embedded_boundary_rotated_cube # inputs analysis_fields_3d.py # analysis diags/diag1000111 # output diff --git a/Examples/Tests/embedded_circle/CMakeLists.txt b/Examples/Tests/embedded_circle/CMakeLists.txt index 9eb8f23460b..4b9ee426569 100644 --- a/Examples/Tests/embedded_circle/CMakeLists.txt +++ b/Examples/Tests/embedded_circle/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_2d_embedded_circle # name 2 # dims 2 # nprocs - ON # eb inputs_test_2d_embedded_circle # inputs analysis.py # analysis diags/diag1000011 diff --git a/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt index 13012e7605b..c89d439b75e 100644 --- a/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt +++ b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_energy_conserving_thermal_plasma # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_energy_conserving_thermal_plasma # inputs analysis.py # analysis diags/diag1000500 # output diff --git a/Examples/Tests/field_probe/CMakeLists.txt b/Examples/Tests/field_probe/CMakeLists.txt index 4ef61237775..bbddbd7839e 100644 --- a/Examples/Tests/field_probe/CMakeLists.txt +++ b/Examples/Tests/field_probe/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_2d_field_probe # name 2 # dims 2 # nprocs - ON # eb inputs_test_2d_field_probe # inputs analysis.py # analysis diags/diag1000544 # output diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt index 306ff2018bc..d09b83d7618 100644 --- a/Examples/Tests/flux_injection/CMakeLists.txt +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_flux_injection # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_flux_injection # inputs analysis_flux_injection_3d.py # analysis diags/diag1000002 # output @@ -16,7 +15,6 @@ add_warpx_test( test_rz_flux_injection # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_flux_injection # inputs analysis_flux_injection_rz.py # analysis diags/diag1000120 # output diff --git a/Examples/Tests/gaussian_beam/CMakeLists.txt b/Examples/Tests/gaussian_beam/CMakeLists.txt index 35ec08c10e3..ae0cf57ed15 100644 --- a/Examples/Tests/gaussian_beam/CMakeLists.txt +++ b/Examples/Tests/gaussian_beam/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_focusing_gaussian_beam # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_focusing_gaussian_beam # inputs analysis.py # analysis diags/diag1000000 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_gaussian_beam_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_gaussian_beam_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/implicit/CMakeLists.txt b/Examples/Tests/implicit/CMakeLists.txt index 11881ae4972..dabd4de66b8 100644 --- a/Examples/Tests/implicit/CMakeLists.txt +++ b/Examples/Tests/implicit/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_semi_implicit_picard # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_semi_implicit_picard # inputs analysis_1d.py # analysis diags/diag1000100 # output @@ -16,7 +15,6 @@ add_warpx_test( test_1d_theta_implicit_picard # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_theta_implicit_picard # inputs analysis_1d.py # analysis diags/diag1000100 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_theta_implicit_jfnk_vandb # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_theta_implicit_jfnk_vandb # inputs analysis_vandb_jfnk_2d.py # analysis diags/diag1000020 # output @@ -38,7 +35,6 @@ add_warpx_test( test_2d_theta_implicit_jfnk_vandb_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py # inputs analysis_vandb_jfnk_2d.py # analysis diags/diag1000020 # output diff --git a/Examples/Tests/initial_distribution/CMakeLists.txt b/Examples/Tests/initial_distribution/CMakeLists.txt index 14dabd7a67c..04af9708cb2 100644 --- a/Examples/Tests/initial_distribution/CMakeLists.txt +++ b/Examples/Tests/initial_distribution/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_initial_distribution # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_initial_distribution # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/initial_plasma_profile/CMakeLists.txt b/Examples/Tests/initial_plasma_profile/CMakeLists.txt index fab15e8b97f..eb45e64dfab 100644 --- a/Examples/Tests/initial_plasma_profile/CMakeLists.txt +++ b/Examples/Tests/initial_plasma_profile/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_parabolic_channel_initialization # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_parabolic_channel_initialization # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/ion_stopping/CMakeLists.txt b/Examples/Tests/ion_stopping/CMakeLists.txt index 1f203d76fa1..83e15287e18 100644 --- a/Examples/Tests/ion_stopping/CMakeLists.txt +++ b/Examples/Tests/ion_stopping/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_ion_stopping # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_ion_stopping # inputs analysis.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/ionization/CMakeLists.txt b/Examples/Tests/ionization/CMakeLists.txt index 32da653f301..9154173ac5f 100644 --- a/Examples/Tests/ionization/CMakeLists.txt +++ b/Examples/Tests/ionization/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_ionization_boost # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_ionization_boost # inputs analysis.py # analysis diags/diag1000420 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_ionization_lab # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_ionization_lab # inputs analysis.py # analysis diags/diag1001600 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_ionization_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_ionization_picmi.py # inputs analysis.py # analysis diags/diag1001600 # output diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt index 1223a23e4d2..3f44d364276 100644 --- a/Examples/Tests/langmuir/CMakeLists.txt +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_langmuir_multi # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_langmuir_multi # inputs analysis_1d.py # analysis diags/diag1000080 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_langmuir_multi_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_mr # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_langmuir_multi_mr_anisotropic # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_mr_anisotropic # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -38,7 +35,6 @@ add_warpx_test( test_2d_langmuir_multi_mr_momentum_conserving # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_mr_momentum_conserving # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -50,7 +46,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_mr_psatd # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_mr_psatd # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -62,7 +57,6 @@ add_warpx_test( test_2d_langmuir_multi_nodal # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_nodal # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -73,7 +67,6 @@ add_warpx_test( test_2d_langmuir_multi_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000040 # output @@ -85,7 +78,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -98,7 +90,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_current_correction # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_current_correction # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -111,7 +102,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_current_correction_nodal # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_current_correction_nodal # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -124,7 +114,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_momentum_conserving # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_momentum_conserving # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -137,7 +126,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_multiJ # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_multiJ # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -150,7 +138,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_multiJ_nodal # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_multiJ_nodal # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -163,7 +150,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_nodal # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_nodal # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -176,7 +162,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_vay_deposition # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_vay_deposition # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -189,7 +174,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_vay_deposition_nodal # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -202,7 +186,6 @@ if(WarpX_FFT) test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -214,7 +197,6 @@ add_warpx_test( test_3d_langmuir_multi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -225,7 +207,6 @@ add_warpx_test( test_3d_langmuir_multi_nodal # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_nodal # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -236,7 +217,6 @@ add_warpx_test( test_3d_langmuir_multi_picmi # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000040 # output @@ -248,7 +228,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -261,7 +240,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_current_correction # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_current_correction # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -274,7 +252,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_current_correction_nodal # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_current_correction_nodal # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -287,7 +264,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_div_cleaning # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_div_cleaning # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -300,7 +276,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_momentum_conserving # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_momentum_conserving # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -313,7 +288,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_multiJ # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_multiJ # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -326,7 +300,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_multiJ_nodal # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_multiJ_nodal # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -339,7 +312,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_nodal # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_nodal # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -352,7 +324,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_vay_deposition # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_vay_deposition # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -365,7 +336,6 @@ if(WarpX_FFT) test_3d_langmuir_multi_psatd_vay_deposition_nodal # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -377,7 +347,6 @@ add_warpx_test( test_rz_langmuir_multi # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_langmuir_multi # inputs analysis_rz.py # analysis diags/diag1000080 # output @@ -388,7 +357,6 @@ add_warpx_test( test_rz_langmuir_multi_picmi # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_langmuir_multi_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000040 # output @@ -400,7 +368,6 @@ if(WarpX_FFT) test_rz_langmuir_multi_psatd # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_langmuir_multi_psatd # inputs analysis_rz.py # analysis diags/diag1000080 # output @@ -413,7 +380,6 @@ if(WarpX_FFT) test_rz_langmuir_multi_psatd_current_correction # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_langmuir_multi_psatd_current_correction # inputs analysis_rz.py # analysis diags/diag1000080 # output @@ -426,7 +392,6 @@ if(WarpX_FFT) test_rz_langmuir_multi_psatd_multiJ # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_langmuir_multi_psatd_multiJ # inputs analysis_rz.py # analysis diags/diag1000080 # output diff --git a/Examples/Tests/langmuir_fluids/CMakeLists.txt b/Examples/Tests/langmuir_fluids/CMakeLists.txt index 8f3ab3ebc78..054e9c80d3a 100644 --- a/Examples/Tests/langmuir_fluids/CMakeLists.txt +++ b/Examples/Tests/langmuir_fluids/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_langmuir_fluid # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_langmuir_fluid # inputs analysis_1d.py # analysis diags/diag1000080 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_langmuir_fluid # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_langmuir_fluid # inputs analysis_2d.py # analysis diags/diag1000080 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_langmuir_fluid # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_langmuir_fluid # inputs analysis_3d.py # analysis diags/diag1000040 # output @@ -38,7 +35,6 @@ add_warpx_test( test_rz_langmuir_fluid # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_langmuir_fluid # inputs analysis_rz.py # analysis diags/diag1000080 # output diff --git a/Examples/Tests/larmor/CMakeLists.txt b/Examples/Tests/larmor/CMakeLists.txt index 3ddcc394c98..6a3368a4fca 100644 --- a/Examples/Tests/larmor/CMakeLists.txt +++ b/Examples/Tests/larmor/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_larmor # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_larmor # inputs analysis_default_regression.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/laser_injection/CMakeLists.txt b/Examples/Tests/laser_injection/CMakeLists.txt index 577b8bdcebc..cec027deb70 100644 --- a/Examples/Tests/laser_injection/CMakeLists.txt +++ b/Examples/Tests/laser_injection/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_laser_injection # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_laser_injection # inputs analysis_1d.py # analysis diags/diag1000240 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_laser_injection # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_injection # inputs analysis_2d.py # analysis diags/diag1000240 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_laser_injection # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_laser_injection # inputs analysis_3d.py # analysis diags/diag1000020 # output diff --git a/Examples/Tests/laser_injection_from_file/CMakeLists.txt b/Examples/Tests/laser_injection_from_file/CMakeLists.txt index a4f09f6895d..4b4024b9029 100644 --- a/Examples/Tests/laser_injection_from_file/CMakeLists.txt +++ b/Examples/Tests/laser_injection_from_file/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_laser_injection_from_lasy_file_prepare # name 1 # dims 1 # nprocs - OFF # eb inputs_test_1d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis OFF # output @@ -16,7 +15,6 @@ add_warpx_test( test_1d_laser_injection_from_lasy_file # name 1 # dims 1 # nprocs - OFF # eb inputs_test_1d_laser_injection_from_lasy_file # inputs analysis_1d.py # analysis diags/diag1000251 # output @@ -27,7 +25,6 @@ add_warpx_test( test_1d_laser_injection_from_lasy_file_boost_prepare # name 1 # dims 1 # nprocs - OFF # eb inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py # inputs OFF # analysis OFF # output @@ -38,7 +35,6 @@ add_warpx_test( test_1d_laser_injection_from_lasy_file_boost # name 1 # dims 1 # nprocs - OFF # eb inputs_test_1d_laser_injection_from_lasy_file_boost # inputs analysis_1d_boost.py # analysis diags/diag1000001 # output @@ -49,7 +45,6 @@ add_warpx_test( test_2d_laser_injection_from_binary_file_prepare # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_laser_injection_from_binary_file_prepare.py # inputs OFF # analysis OFF # output @@ -60,7 +55,6 @@ add_warpx_test( test_2d_laser_injection_from_binary_file # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_laser_injection_from_binary_file # inputs analysis_2d_binary.py # analysis diags/diag1000250 # output @@ -71,7 +65,6 @@ add_warpx_test( test_2d_laser_injection_from_lasy_file_prepare # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis OFF # output @@ -82,7 +75,6 @@ add_warpx_test( test_2d_laser_injection_from_lasy_file # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_laser_injection_from_lasy_file # inputs analysis_2d.py # analysis diags/diag1000251 # output @@ -93,7 +85,6 @@ add_warpx_test( test_3d_laser_injection_from_lasy_file_prepare # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis OFF # output @@ -104,7 +95,6 @@ add_warpx_test( test_3d_laser_injection_from_lasy_file # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_laser_injection_from_lasy_file # inputs analysis_3d.py # analysis diags/diag1000251 # output @@ -115,7 +105,6 @@ add_warpx_test( test_rz_laser_injection_from_lasy_file_prepare # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis OFF # output @@ -126,7 +115,6 @@ add_warpx_test( test_rz_laser_injection_from_lasy_file # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_laser_injection_from_lasy_file # inputs analysis_rz.py # analysis diags/diag1000252 # output @@ -137,7 +125,6 @@ add_warpx_test( test_rz_laser_injection_from_RZ_lasy_file_prepare # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py # inputs OFF # analysis OFF # output @@ -148,7 +135,6 @@ add_warpx_test( test_rz_laser_injection_from_RZ_lasy_file # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_laser_injection_from_RZ_lasy_file # inputs analysis_from_RZ_file.py # analysis diags/diag1000612 # output diff --git a/Examples/Tests/laser_on_fine/CMakeLists.txt b/Examples/Tests/laser_on_fine/CMakeLists.txt index 794d5e68c66..479374137df 100644 --- a/Examples/Tests/laser_on_fine/CMakeLists.txt +++ b/Examples/Tests/laser_on_fine/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_laser_on_fine # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_laser_on_fine # inputs analysis_default_regression.py # analysis diags/diag1000050 # output diff --git a/Examples/Tests/load_external_field/CMakeLists.txt b/Examples/Tests/load_external_field/CMakeLists.txt index 93b0a1436be..0713dc877df 100644 --- a/Examples/Tests/load_external_field/CMakeLists.txt +++ b/Examples/Tests/load_external_field/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_load_external_field_grid_picmi # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_load_external_field_grid_picmi.py # inputs analysis_3d.py # analysis diags/diag1000300 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_load_external_field_particle_picmi # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_load_external_field_particle_picmi.py # inputs analysis_3d.py # analysis diags/diag1000300 # output @@ -27,7 +25,6 @@ add_warpx_test( test_rz_load_external_field_grid # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_load_external_field_grid # inputs analysis_rz.py # analysis diags/diag1000300 # output @@ -38,7 +35,6 @@ add_warpx_test( test_rz_load_external_field_grid_restart # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_load_external_field_grid_restart # inputs analysis_default_restart.py # analysis diags/diag1000300 # output @@ -49,7 +45,6 @@ add_warpx_test( test_rz_load_external_field_particles # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_load_external_field_particles # inputs analysis_rz.py # analysis diags/diag1000300 # output @@ -60,7 +55,6 @@ add_warpx_test( test_rz_load_external_field_particles_restart # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_load_external_field_particles_restart # inputs analysis_default_restart.py # analysis diags/diag1000300 # output diff --git a/Examples/Tests/magnetostatic_eb/CMakeLists.txt b/Examples/Tests/magnetostatic_eb/CMakeLists.txt index db97a6e11c2..3eb2da03136 100644 --- a/Examples/Tests/magnetostatic_eb/CMakeLists.txt +++ b/Examples/Tests/magnetostatic_eb/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_magnetostatic_eb # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_magnetostatic_eb # inputs analysis_default_regression.py # analysis diags/diag1000001 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_3d_magnetostatic_eb_picmi # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_magnetostatic_eb_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000001 # output @@ -32,7 +30,6 @@ if(WarpX_EB) test_rz_magnetostatic_eb_picmi # name RZ # dims 1 # nprocs - ON # eb inputs_test_rz_magnetostatic_eb_picmi.py # inputs analysis_rz.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt index 9e315b7536d..2c65c0a6ecb 100644 --- a/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt +++ b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_FFT) test_2d_maxwell_hybrid_qed_solver # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_maxwell_hybrid_qed_solver # inputs analysis.py # analysis diags/diag1000300 # output diff --git a/Examples/Tests/nci_fdtd_stability/CMakeLists.txt b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt index 73d0f38beec..e58e5bfb58f 100644 --- a/Examples/Tests/nci_fdtd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_nci_corrector # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_nci_corrector # inputs analysis_ncicorr.py # analysis diags/diag1000600 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_nci_corrector_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_nci_corrector_mr # inputs analysis_ncicorr.py # analysis diags/diag1000600 # output diff --git a/Examples/Tests/nci_psatd_stability/CMakeLists.txt b/Examples/Tests/nci_psatd_stability/CMakeLists.txt index 6a27abdc783..ed087fc4190 100644 --- a/Examples/Tests/nci_psatd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_psatd_stability/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_FFT) test_2d_averaged_galilean_psatd # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_averaged_galilean_psatd # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -19,7 +18,6 @@ if(WarpX_FFT) test_2d_averaged_galilean_psatd_hybrid # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_averaged_galilean_psatd_hybrid # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -32,7 +30,6 @@ if(WarpX_FFT) test_2d_comoving_psatd_hybrid # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_comoving_psatd_hybrid # inputs analysis_default_regression.py # analysis diags/diag1000400 # output @@ -45,7 +42,6 @@ if(WarpX_FFT) test_2d_galilean_psatd # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_galilean_psatd # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -58,7 +54,6 @@ if(WarpX_FFT) test_2d_galilean_psatd_current_correction # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_galilean_psatd_current_correction # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -71,7 +66,6 @@ if(WarpX_FFT) test_2d_galilean_psatd_current_correction_psb # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_galilean_psatd_current_correction_psb # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -84,7 +78,6 @@ if(WarpX_FFT) test_2d_galilean_psatd_hybrid # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_galilean_psatd_hybrid # inputs analysis_default_regression.py # analysis diags/diag1000400 # output @@ -97,7 +90,6 @@ if(WarpX_FFT) test_3d_averaged_galilean_psatd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_averaged_galilean_psatd # inputs analysis_galilean.py # analysis diags/diag1000160 # output @@ -110,7 +102,6 @@ if(WarpX_FFT) test_3d_averaged_galilean_psatd_hybrid # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_averaged_galilean_psatd_hybrid # inputs analysis_galilean.py # analysis diags/diag1000160 # output @@ -123,7 +114,6 @@ if(WarpX_FFT) test_3d_galilean_psatd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_galilean_psatd # inputs analysis_galilean.py # analysis diags/diag1000300 # output @@ -136,7 +126,6 @@ if(WarpX_FFT) test_3d_galilean_psatd_current_correction # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_galilean_psatd_current_correction # inputs analysis_galilean.py # analysis diags/diag1000300 # output @@ -149,7 +138,6 @@ if(WarpX_FFT) test_3d_galilean_psatd_current_correction_psb # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_galilean_psatd_current_correction_psb # inputs analysis_galilean.py # analysis diags/diag1000300 # output @@ -162,7 +150,6 @@ if(WarpX_FFT) test_3d_uniform_plasma_multiJ # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_uniform_plasma_multiJ # inputs analysis_multiJ.py # analysis diags/diag1000300 # output @@ -175,7 +162,6 @@ if(WarpX_FFT) test_rz_galilean_psatd # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_galilean_psatd # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -188,7 +174,6 @@ if(WarpX_FFT) test_rz_galilean_psatd_current_correction # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_galilean_psatd_current_correction # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -201,7 +186,6 @@ if(WarpX_FFT) test_rz_galilean_psatd_current_correction_psb # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_galilean_psatd_current_correction_psb # inputs analysis_galilean.py # analysis diags/diag1000400 # output @@ -214,7 +198,6 @@ if(WarpX_FFT) test_rz_multiJ_psatd # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_multiJ_psatd # inputs analysis_default_regression.py # analysis diags/diag1000050 # output diff --git a/Examples/Tests/nodal_electrostatic/CMakeLists.txt b/Examples/Tests/nodal_electrostatic/CMakeLists.txt index 62627eb576a..915298f15ab 100644 --- a/Examples/Tests/nodal_electrostatic/CMakeLists.txt +++ b/Examples/Tests/nodal_electrostatic/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_nodal_electrostatic_solver # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_nodal_electrostatic_solver # inputs analysis.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/nuclear_fusion/CMakeLists.txt b/Examples/Tests/nuclear_fusion/CMakeLists.txt index 4ed47607c8d..c3ee8848e59 100644 --- a/Examples/Tests/nuclear_fusion/CMakeLists.txt +++ b/Examples/Tests/nuclear_fusion/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_proton_boron_fusion # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_proton_boron_fusion # inputs analysis_proton_boron_fusion.py # analysis diags/diag1000001 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_deuterium_deuterium_fusion # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_deuterium_deuterium_fusion # inputs analysis_two_product_fusion.py # analysis diags/diag1000001 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_deuterium_deuterium_fusion_intraspecies # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_deuterium_deuterium_fusion_intraspecies # inputs analysis_deuterium_deuterium_3d_intraspecies.py # analysis diags/diag1000010 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_deuterium_tritium_fusion # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_deuterium_tritium_fusion # inputs analysis_two_product_fusion.py # analysis diags/diag1000001 # output @@ -49,7 +45,6 @@ add_warpx_test( test_3d_proton_boron_fusion # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_proton_boron_fusion # inputs analysis_proton_boron_fusion.py # analysis diags/diag1000001 # output @@ -60,7 +55,6 @@ add_warpx_test( test_rz_deuterium_tritium_fusion # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_deuterium_tritium_fusion # inputs analysis_two_product_fusion.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt index ce5bed2c587..e689c83a1e4 100644 --- a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_ohm_solver_em_modes_picmi # name 1 # dims 2 # nprocs - OFF # eb "inputs_test_1d_ohm_solver_em_modes_picmi.py --test --dim 1 --bdir z" # inputs analysis.py # analysis diags/field_diag000250 # output @@ -16,7 +15,6 @@ add_warpx_test( test_rz_ohm_solver_em_modes_picmi # name RZ # dims 2 # nprocs - OFF # eb "inputs_test_rz_ohm_solver_em_modes_picmi.py --test" # inputs analysis_rz.py # analysis diags/diag1000100 # output diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt index e5017318f19..3b2d0bb794b 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_ohm_solver_landau_damping_picmi # name 2 # dims 2 # nprocs - OFF # eb "inputs_test_2d_ohm_solver_landau_damping_picmi.py --test --dim 2 --temp_ratio 0.1" # inputs analysis.py # analysis diags/diag1000100 # output diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt index a6c978ba3ef..53a9bbdeada 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_ohm_solver_ion_beam_picmi # name 1 # dims 2 # nprocs - OFF # eb "inputs_test_1d_ohm_solver_ion_beam_picmi.py --test --dim 1 --resonant" # inputs analysis.py # analysis diags/diag1002500 # output diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt index 849d4c3b2a3..cef47a7d95e 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_ohm_solver_magnetic_reconnection_picmi # name 2 # dims 2 # nprocs - OFF # eb "inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py --test" # inputs analysis.py # analysis diags/diag1000020 # output diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index 1f921ae98b2..c5ec4583da1 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_FFT) test_3d_open_bc_poisson_solver # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_open_bc_poisson_solver # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/particle_boundary_interaction/CMakeLists.txt b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt index b7517ef9bc4..5bbb34c0d95 100644 --- a/Examples/Tests/particle_boundary_interaction/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_rz_particle_boundary_interaction_picmi # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_particle_boundary_interaction_picmi.py # inputs analysis.py # analysis diags/diag1/ # output diff --git a/Examples/Tests/particle_boundary_process/CMakeLists.txt b/Examples/Tests/particle_boundary_process/CMakeLists.txt index a674c72abe3..a7081fe9090 100644 --- a/Examples/Tests/particle_boundary_process/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_process/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_particle_reflection_picmi # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_particle_reflection_picmi.py # inputs analysis_reflection.py # analysis diags/diag1000010 # output @@ -17,7 +16,6 @@ if(WarpX_EB) test_3d_particle_absorption # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_particle_absorption # inputs analysis_absorption.py # analysis diags/diag1000060 # output diff --git a/Examples/Tests/particle_boundary_scrape/CMakeLists.txt b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt index 361f99bfb09..9b303afcc0f 100644 --- a/Examples/Tests/particle_boundary_scrape/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_particle_scrape # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_particle_scrape # inputs analysis_scrape.py # analysis diags/diag1000060 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_3d_particle_scrape_picmi # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_particle_scrape_picmi.py # inputs analysis_scrape.py # analysis diags/diag1000060 # output diff --git a/Examples/Tests/particle_data_python/CMakeLists.txt b/Examples/Tests/particle_data_python/CMakeLists.txt index 45bed4e9cf6..e58fe72670a 100644 --- a/Examples/Tests/particle_data_python/CMakeLists.txt +++ b/Examples/Tests/particle_data_python/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_particle_attr_access_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_particle_attr_access_picmi.py # inputs analysis.py # analysis diags/diag1000010 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_particle_attr_access_unique_picmi # name 2 # dims 2 # nprocs - OFF # eb "inputs_test_2d_particle_attr_access_picmi.py --unique" # inputs analysis.py # analysis diags/diag1000010 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_prev_positions_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_prev_positions_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/particle_fields_diags/CMakeLists.txt b/Examples/Tests/particle_fields_diags/CMakeLists.txt index b35ffe46713..a83818b6966 100644 --- a/Examples/Tests/particle_fields_diags/CMakeLists.txt +++ b/Examples/Tests/particle_fields_diags/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_particle_fields_diags # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_particle_fields_diags # inputs analysis_particle_diags.py # analysis diags/diag1000200 # output @@ -17,8 +16,7 @@ add_warpx_test( # test_3d_particle_fields_diags_single_precision # name # 3 # dims # 2 # nprocs -# OFF # eb -# inputs_test_3d_particle_fields_diags # inputs +## inputs_test_3d_particle_fields_diags # inputs # analysis_particle_diags_single.py # analysis # diags/diag1000200 # output # OFF # dependency diff --git a/Examples/Tests/particle_pusher/CMakeLists.txt b/Examples/Tests/particle_pusher/CMakeLists.txt index 583106014a5..3d8f1496587 100644 --- a/Examples/Tests/particle_pusher/CMakeLists.txt +++ b/Examples/Tests/particle_pusher/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_particle_pusher # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_particle_pusher # inputs analysis.py # analysis diags/diag1010000 # output diff --git a/Examples/Tests/particle_thermal_boundary/CMakeLists.txt b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt index 26478b59c07..eeae6660e02 100644 --- a/Examples/Tests/particle_thermal_boundary/CMakeLists.txt +++ b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_particle_thermal_boundary # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_particle_thermal_boundary # inputs analysis.py # analysis diags/diag1002000 # output diff --git a/Examples/Tests/particles_in_pml/CMakeLists.txt b/Examples/Tests/particles_in_pml/CMakeLists.txt index e8f1a13601d..c1782dc4d1f 100644 --- a/Examples/Tests/particles_in_pml/CMakeLists.txt +++ b/Examples/Tests/particles_in_pml/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_particles_in_pml # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_particles_in_pml # inputs analysis_particles_in_pml.py # analysis diags/diag1000180 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_particles_in_pml_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_particles_in_pml_mr # inputs analysis_particles_in_pml.py # analysis diags/diag1000300 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_particles_in_pml # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_particles_in_pml # inputs analysis_particles_in_pml.py # analysis diags/diag1000120 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_particles_in_pml_mr # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_particles_in_pml_mr # inputs analysis_particles_in_pml.py # analysis diags/diag1000200 # output diff --git a/Examples/Tests/pass_mpi_communicator/CMakeLists.txt b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt index f68986d363a..ac60636b931 100644 --- a/Examples/Tests/pass_mpi_communicator/CMakeLists.txt +++ b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt @@ -9,7 +9,6 @@ add_warpx_test( test_2d_pass_mpi_comm_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pass_mpi_comm_picmi.py # inputs OFF #analysis.py # analysis OFF # output diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt index 69c68ec5329..ec710f7d919 100644 --- a/Examples/Tests/pec/CMakeLists.txt +++ b/Examples/Tests/pec/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_pec_field # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_pec_field # inputs analysis_pec.py # analysis diags/diag1000125 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_pec_field_mr # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_pec_field_mr # inputs analysis_pec_mr.py # analysis diags/diag1000125 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_pec_particle # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_pec_particle # inputs analysis_default_regression.py # analysis diags/diag1000020 # output diff --git a/Examples/Tests/photon_pusher/CMakeLists.txt b/Examples/Tests/photon_pusher/CMakeLists.txt index 491906e0466..7926d8faeaf 100644 --- a/Examples/Tests/photon_pusher/CMakeLists.txt +++ b/Examples/Tests/photon_pusher/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_photon_pusher # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_photon_pusher # inputs analysis.py # analysis diags/diag1000050 # output diff --git a/Examples/Tests/plasma_lens/CMakeLists.txt b/Examples/Tests/plasma_lens/CMakeLists.txt index cdba552db9e..bc13ae433bc 100644 --- a/Examples/Tests/plasma_lens/CMakeLists.txt +++ b/Examples/Tests/plasma_lens/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_plasma_lens # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_lens # inputs analysis.py # analysis diags/diag1000084 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_plasma_lens_boosted # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_lens_boosted # inputs analysis.py # analysis diags/diag1000084 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_plasma_lens_hard_edged # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_lens_hard_edged # inputs analysis.py # analysis diags/diag1000084 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_plasma_lens_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_lens_picmi.py # inputs analysis.py # analysis diags/diag1000084 # output @@ -49,7 +45,6 @@ add_warpx_test( test_3d_plasma_lens_short # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_plasma_lens_short # inputs analysis.py # analysis diags/diag1000084 # output diff --git a/Examples/Tests/pml/CMakeLists.txt b/Examples/Tests/pml/CMakeLists.txt index 92847dfff24..c63412dc763 100644 --- a/Examples/Tests/pml/CMakeLists.txt +++ b/Examples/Tests/pml/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_pml_x_ckc # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_ckc # inputs analysis_pml_ckc.py # analysis diags/diag1000300 # output @@ -17,7 +16,6 @@ if(WarpX_FFT) test_2d_pml_x_galilean # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_galilean # inputs analysis_pml_psatd.py # analysis diags/diag1000300 # output @@ -30,7 +28,6 @@ if(WarpX_FFT) test_2d_pml_x_psatd # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_psatd # inputs analysis_pml_psatd.py # analysis diags/diag1000300 # output @@ -43,7 +40,6 @@ if(WarpX_FFT) test_2d_pml_x_psatd_restart # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_psatd_restart # inputs analysis_default_restart.py # analysis diags/diag1000300 # output @@ -55,7 +51,6 @@ add_warpx_test( test_2d_pml_x_yee # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_yee # inputs analysis_pml_yee.py # analysis diags/diag1000300 # output @@ -66,7 +61,6 @@ add_warpx_test( test_2d_pml_x_yee_restart # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_pml_x_yee_restart # inputs analysis_default_restart.py # analysis diags/diag1000300 # output @@ -78,7 +72,6 @@ if(WarpX_FFT) test_3d_pml_psatd_dive_divb_cleaning # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_pml_psatd_dive_divb_cleaning # inputs analysis_default_regression.py # analysis diags/diag1000100 # output @@ -91,7 +84,6 @@ if(WarpX_FFT) test_rz_pml_psatd # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_pml_psatd # inputs analysis_pml_psatd_rz.py # analysis diags/diag1000500 # output diff --git a/Examples/Tests/point_of_contact_eb/CMakeLists.txt b/Examples/Tests/point_of_contact_eb/CMakeLists.txt index 25bf4b977de..b8d7ba1131f 100644 --- a/Examples/Tests/point_of_contact_eb/CMakeLists.txt +++ b/Examples/Tests/point_of_contact_eb/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_point_of_contact_eb # name 3 # dims 2 # nprocs - ON # eb inputs_test_3d_point_of_contact_eb # inputs analysis.py # analysis diags/diag1/ # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_rz_point_of_contact_eb # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_point_of_contact_eb # inputs analysis.py # analysis diags/diag1/ # output diff --git a/Examples/Tests/projection_divb_cleaner/CMakeLists.txt b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt index 91dd6bdc592..307ae7656c5 100644 --- a/Examples/Tests/projection_divb_cleaner/CMakeLists.txt +++ b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_projection_divb_cleaner_callback_picmi # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_projection_divb_cleaner_callback_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000001 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_projection_divb_cleaner_picmi # name 3 # dims 1 # nprocs - OFF # eb inputs_test_3d_projection_divb_cleaner_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000001 # output @@ -27,7 +25,6 @@ add_warpx_test( test_rz_projection_divb_cleaner # name RZ # dims 1 # nprocs - OFF # eb inputs_test_rz_projection_divb_cleaner # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/python_wrappers/CMakeLists.txt b/Examples/Tests/python_wrappers/CMakeLists.txt index 83fc6e16f7d..0045a181606 100644 --- a/Examples/Tests/python_wrappers/CMakeLists.txt +++ b/Examples/Tests/python_wrappers/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_FFT) test_2d_python_wrappers_picmi # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_python_wrappers_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000100 # output diff --git a/Examples/Tests/qed/CMakeLists.txt b/Examples/Tests/qed/CMakeLists.txt index 77690642f07..5dd786f26a1 100644 --- a/Examples/Tests/qed/CMakeLists.txt +++ b/Examples/Tests/qed/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_qed_breit_wheeler # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_qed_breit_wheeler # inputs analysis_breit_wheeler_yt.py # analysis diags/diag1000002 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_qed_breit_wheeler_opmd # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_qed_breit_wheeler_opmd # inputs analysis_breit_wheeler_opmd.py # analysis diags/diag1/ # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_qed_quantum_sync # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_qed_quantum_sync # inputs analysis_quantum_sync.py # analysis diags/diag1000002 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_qed_breit_wheeler # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_breit_wheeler # inputs analysis_breit_wheeler_yt.py # analysis diags/diag1000002 # output @@ -49,7 +45,6 @@ add_warpx_test( test_3d_qed_breit_wheeler_opmd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_breit_wheeler_opmd # inputs analysis_breit_wheeler_opmd.py # analysis diags/diag1/ # output @@ -60,7 +55,6 @@ add_warpx_test( test_3d_qed_quantum_sync # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_quantum_sync # inputs analysis_quantum_sync.py # analysis diags/diag1000002 # output @@ -71,7 +65,6 @@ add_warpx_test( test_3d_qed_schwinger_1 # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_schwinger_1 # inputs analysis_schwinger.py # analysis diags/diag1000001 # output @@ -82,7 +75,6 @@ add_warpx_test( test_3d_qed_schwinger_2 # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_schwinger_2 # inputs analysis_schwinger.py # analysis diags/diag1000001 # output @@ -93,7 +85,6 @@ add_warpx_test( test_3d_qed_schwinger_3 # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_schwinger_3 # inputs analysis_schwinger.py # analysis diags/diag1000001 # output @@ -104,7 +95,6 @@ add_warpx_test( test_3d_qed_schwinger_4 # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_qed_schwinger_4 # inputs analysis_schwinger.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/radiation_reaction/CMakeLists.txt b/Examples/Tests/radiation_reaction/CMakeLists.txt index 63814f30f29..8696cf0f9b7 100644 --- a/Examples/Tests/radiation_reaction/CMakeLists.txt +++ b/Examples/Tests/radiation_reaction/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_radiation_reaction # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_radiation_reaction # inputs analysis.py # analysis diags/diag1000064 # output diff --git a/Examples/Tests/reduced_diags/CMakeLists.txt b/Examples/Tests/reduced_diags/CMakeLists.txt index a09d5403270..cd4f6392892 100644 --- a/Examples/Tests/reduced_diags/CMakeLists.txt +++ b/Examples/Tests/reduced_diags/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_reduced_diags # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_reduced_diags # inputs analysis_reduced_diags.py # analysis diags/diag1000200 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_reduced_diags_load_balance_costs_heuristic # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_reduced_diags_load_balance_costs_heuristic # inputs analysis_reduced_diags_load_balance_costs.py # analysis diags/diag1000003 # output @@ -27,7 +25,6 @@ add_warpx_test( test_3d_reduced_diags_load_balance_costs_timers # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_reduced_diags_load_balance_costs_timers # inputs analysis_reduced_diags_load_balance_costs.py # analysis diags/diag1000003 # output @@ -38,7 +35,6 @@ add_warpx_test( test_3d_reduced_diags_load_balance_costs_timers_picmi # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py # inputs analysis_reduced_diags_load_balance_costs.py # analysis diags/diag1000003 # output @@ -50,7 +46,6 @@ if(WarpX_FFT) test_3d_reduced_diags_load_balance_costs_timers_psatd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd # inputs analysis_reduced_diags_load_balance_costs.py # analysis diags/diag1000003 # output diff --git a/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt index 9ee2a63d2d2..d89fb8b31b6 100644 --- a/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt +++ b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_3d_relativistic_space_charge_initialization # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_relativistic_space_charge_initialization # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/repelling_particles/CMakeLists.txt b/Examples/Tests/repelling_particles/CMakeLists.txt index ed662b67332..056f670a860 100644 --- a/Examples/Tests/repelling_particles/CMakeLists.txt +++ b/Examples/Tests/repelling_particles/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_repelling_particles # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_repelling_particles # inputs analysis.py # analysis diags/diag1000200 # output diff --git a/Examples/Tests/resampling/CMakeLists.txt b/Examples/Tests/resampling/CMakeLists.txt index 10d51e0ea47..46e34858014 100644 --- a/Examples/Tests/resampling/CMakeLists.txt +++ b/Examples/Tests/resampling/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_resample_velocity_coincidence_thinning # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_resample_velocity_coincidence_thinning # inputs analysis_default_regression.py # analysis diags/diag1000004 # output @@ -16,7 +15,6 @@ add_warpx_test( test_1d_resample_velocity_coincidence_thinning_cartesian # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_resample_velocity_coincidence_thinning_cartesian # inputs analysis_default_regression.py # analysis diags/diag1000004 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_leveling_thinning # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_leveling_thinning # inputs analysis.py # analysis diags/diag1000008 # output diff --git a/Examples/Tests/restart/CMakeLists.txt b/Examples/Tests/restart/CMakeLists.txt index 33770495dc6..bb3e90059c9 100644 --- a/Examples/Tests/restart/CMakeLists.txt +++ b/Examples/Tests/restart/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_id_cpu_read_picmi # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_id_cpu_read_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -19,7 +18,6 @@ add_warpx_test( test_2d_runtime_components_picmi # name 2 # dims 1 # nprocs - OFF # eb inputs_test_2d_runtime_components_picmi.py # inputs OFF #analysis_default_regression.py # analysis OFF #diags/diag1000010 # output @@ -33,7 +31,6 @@ add_warpx_test( test_2d_runtime_components_picmi_restart # name 2 # dims 1 # nprocs - OFF # eb "inputs_test_2d_runtime_components_picmi.py amr.restart='../test_2d_runtime_components_picmi/diags/chk000005'" # inputs OFF #analysis_default_restart.py # analysis OFF #diags/diag1000010 # output @@ -44,7 +41,6 @@ add_warpx_test( test_3d_acceleration # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -55,7 +51,6 @@ add_warpx_test( test_3d_acceleration_restart # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration_restart # inputs analysis_default_restart.py # analysis diags/diag1000010 # output @@ -67,7 +62,6 @@ if(WarpX_FFT) test_3d_acceleration_psatd # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration_psatd # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -80,7 +74,6 @@ if(WarpX_FFT) test_3d_acceleration_psatd_restart # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration_psatd_restart # inputs analysis_default_restart.py # analysis diags/diag1000010 # output @@ -93,7 +86,6 @@ if(WarpX_FFT) test_3d_acceleration_psatd_time_avg # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration_psatd_time_avg # inputs analysis_default_regression.py # analysis diags/diag1000010 # output @@ -106,7 +98,6 @@ if(WarpX_FFT) test_3d_acceleration_psatd_time_avg_restart # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_acceleration_psatd_time_avg_restart # inputs analysis_default_restart.py # analysis diags/diag1000010 # output diff --git a/Examples/Tests/restart_eb/CMakeLists.txt b/Examples/Tests/restart_eb/CMakeLists.txt index 54d1d3ea574..50f808c3e1f 100644 --- a/Examples/Tests/restart_eb/CMakeLists.txt +++ b/Examples/Tests/restart_eb/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_3d_eb_picmi # name 3 # dims 1 # nprocs - ON # eb inputs_test_3d_eb_picmi.py # inputs analysis_default_regression.py # analysis diags/diag1000060 # output @@ -20,8 +19,7 @@ endif() # test_3d_eb_picmi_restart # name # 3 # dims # 1 # nprocs -# ON # eb -# "inputs_test_3d_eb_picmi.py amr.restart='../test_3d_eb_picmi/diags/chk000030'" # inputs +## "inputs_test_3d_eb_picmi.py amr.restart='../test_3d_eb_picmi/diags/chk000030'" # inputs # analysis_default_restart.py # analysis # diags/diag1000060 # output # test_3d_eb_picmi # dependency diff --git a/Examples/Tests/rigid_injection/CMakeLists.txt b/Examples/Tests/rigid_injection/CMakeLists.txt index 210cc86418f..21004c3248c 100644 --- a/Examples/Tests/rigid_injection/CMakeLists.txt +++ b/Examples/Tests/rigid_injection/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_rigid_injection_btd # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_rigid_injection_btd # inputs analysis_rigid_injection_btd.py # analysis diags/diag1000001 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_rigid_injection_lab # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_rigid_injection_lab # inputs analysis_rigid_injection_lab.py # analysis diags/diag1000289 # output diff --git a/Examples/Tests/scraping/CMakeLists.txt b/Examples/Tests/scraping/CMakeLists.txt index 94ec04e35d7..a0fd04b6b3f 100644 --- a/Examples/Tests/scraping/CMakeLists.txt +++ b/Examples/Tests/scraping/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_EB) test_rz_scraping # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_scraping # inputs analysis_rz.py # analysis diags/diag1000037 # output @@ -19,7 +18,6 @@ if(WarpX_EB) test_rz_scraping_filter # name RZ # dims 2 # nprocs - ON # eb inputs_test_rz_scraping_filter # inputs analysis_rz_filter.py # analysis diags/diag1000037 # output diff --git a/Examples/Tests/silver_mueller/CMakeLists.txt b/Examples/Tests/silver_mueller/CMakeLists.txt index 5b9cd278ef0..7866d23dc1f 100644 --- a/Examples/Tests/silver_mueller/CMakeLists.txt +++ b/Examples/Tests/silver_mueller/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_1d_silver_mueller # name 1 # dims 2 # nprocs - OFF # eb inputs_test_1d_silver_mueller # inputs analysis.py # analysis diags/diag1000500 # output @@ -16,7 +15,6 @@ add_warpx_test( test_2d_silver_mueller_x # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_silver_mueller_x # inputs analysis.py # analysis diags/diag1000500 # output @@ -27,7 +25,6 @@ add_warpx_test( test_2d_silver_mueller_z # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_silver_mueller_z # inputs analysis.py # analysis diags/diag1000500 # output @@ -38,7 +35,6 @@ add_warpx_test( test_rz_silver_mueller_z # name RZ # dims 2 # nprocs - OFF # eb inputs_test_rz_silver_mueller_z # inputs analysis.py # analysis diags/diag1000500 # output diff --git a/Examples/Tests/single_particle/CMakeLists.txt b/Examples/Tests/single_particle/CMakeLists.txt index b2719bee681..bee870f0b17 100644 --- a/Examples/Tests/single_particle/CMakeLists.txt +++ b/Examples/Tests/single_particle/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_bilinear_filter # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_bilinear_filter # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/space_charge_initialization/CMakeLists.txt b/Examples/Tests/space_charge_initialization/CMakeLists.txt index af07d677775..6ca1f4ad04c 100644 --- a/Examples/Tests/space_charge_initialization/CMakeLists.txt +++ b/Examples/Tests/space_charge_initialization/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_space_charge_initialization # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_space_charge_initialization # inputs analysis.py # analysis diags/diag1000001 # output @@ -16,7 +15,6 @@ add_warpx_test( test_3d_space_charge_initialization # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_space_charge_initialization # inputs analysis.py # analysis diags/diag1000001 # output diff --git a/Examples/Tests/subcycling/CMakeLists.txt b/Examples/Tests/subcycling/CMakeLists.txt index ccea031f5a4..688f54ac01c 100644 --- a/Examples/Tests/subcycling/CMakeLists.txt +++ b/Examples/Tests/subcycling/CMakeLists.txt @@ -5,7 +5,6 @@ add_warpx_test( test_2d_subcycling_mr # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_subcycling_mr # inputs analysis_default_regression.py # analysis diags/diag1000250 # output diff --git a/Examples/Tests/vay_deposition/CMakeLists.txt b/Examples/Tests/vay_deposition/CMakeLists.txt index 9ebe4ec0dba..ce8d51d3c2a 100644 --- a/Examples/Tests/vay_deposition/CMakeLists.txt +++ b/Examples/Tests/vay_deposition/CMakeLists.txt @@ -6,7 +6,6 @@ if(WarpX_FFT) test_2d_vay_deposition # name 2 # dims 2 # nprocs - OFF # eb inputs_test_2d_vay_deposition # inputs analysis.py # analysis diags/diag1000050 # output @@ -19,7 +18,6 @@ if(WarpX_FFT) test_3d_vay_deposition # name 3 # dims 2 # nprocs - OFF # eb inputs_test_3d_vay_deposition # inputs analysis.py # analysis diags/diag1000025 # output diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 8f3ca1da1d7..ba9e2c3ab5d 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -147,6 +147,7 @@ public: bool do_pml_dive_cleaning, bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, + bool eb_enabled, int max_guard_EB, amrex::Real v_sigma_sb, amrex::IntVect do_pml_Lo = amrex::IntVect::TheUnitVector(), amrex::IntVect do_pml_Hi = amrex::IntVect::TheUnitVector()); @@ -260,7 +261,7 @@ private: } #ifdef AMREX_USE_EB - amrex::EBFArrayBoxFactory const& fieldEBFactory () const noexcept { + [[nodiscard]] amrex::EBFArrayBoxFactory const& fieldEBFactory () const noexcept { return static_cast(*pml_field_factory); } #endif diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 340005e9211..f413831c74d 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -556,6 +556,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, + bool eb_enabled, int max_guard_EB, const amrex::Real v_sigma_sb, const amrex::IntVect do_pml_Lo, const amrex::IntVect do_pml_Hi) : m_dive_cleaning(do_pml_dive_cleaning), @@ -565,6 +566,10 @@ PML::PML (const int lev, const BoxArray& grid_ba, m_geom(geom), m_cgeom(cgeom) { +#ifndef AMREX_USE_EB + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, "PML: eb_enabled is true but was not compiled in."); +#endif + // When `do_pml_in_domain` is true, the PML overlap with the last `ncell` of the physical domain or fine patch(es) // (instead of extending `ncell` outside of the physical domain or fine patch(es)) // In order to implement this, we define a new reduced Box Array ensuring that it does not @@ -673,13 +678,20 @@ PML::PML (const int lev, const BoxArray& grid_ba, } #ifdef AMREX_USE_EB - pml_field_factory = amrex::makeEBFabFactory(*geom, ba, dm, - {max_guard_EB, max_guard_EB, max_guard_EB}, - amrex::EBSupport::full); -#else - amrex::ignore_unused(max_guard_EB); - pml_field_factory = std::make_unique(); + if (eb_enabled) { + pml_field_factory = amrex::makeEBFabFactory( + *geom, + ba, + dm, + {max_guard_EB, max_guard_EB, max_guard_EB}, + amrex::EBSupport::full + ); + } else #endif + { + amrex::ignore_unused(max_guard_EB); + pml_field_factory = std::make_unique(); + } // Allocate diagonal components (xx,yy,zz) only with divergence cleaning const int ncompe = (m_dive_cleaning) ? 3 : 2; @@ -707,20 +719,22 @@ PML::PML (const int lev, const BoxArray& grid_ba, WarpX::AllocInitMultiFab(pml_j_fp[2], ba_jz, dm, 1, ngb, lev, "pml_j_fp[z]", 0.0_rt); #ifdef AMREX_USE_EB - const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); - WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); + if (eb_enabled) { + const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); + WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || - WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || - WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - auto const eb_fact = fieldEBFactory(); + auto const eb_fact = fieldEBFactory(); - WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); - WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); + WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + } } #endif diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index ec696689373..bbe969052a3 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -269,13 +269,16 @@ WarpX::DampJPML (int lev, PatchType patch_type) const Real* sigma_star_cumsum_fac_j_z = sigba[mfi].sigma_star_cumsum_fac[1].data(); #endif -#ifdef AMREX_USE_EB - const auto& pml_edge_lenghts = pml[lev]->Get_edge_lengths(); - - auto const& pml_lxfab = pml_edge_lenghts[0]->array(mfi); - auto const& pml_lyfab = pml_edge_lenghts[1]->array(mfi); - auto const& pml_lzfab = pml_edge_lenghts[2]->array(mfi); -#endif + amrex::Array4 pml_lxfab, pml_lyfab, pml_lzfab; + if (m_eb_enabled) { + const auto &pml_edge_lenghts = pml[lev]->Get_edge_lengths(); + + pml_lxfab = pml_edge_lenghts[0]->array(mfi); + pml_lyfab = pml_edge_lenghts[1]->array(mfi); + pml_lzfab = pml_edge_lenghts[2]->array(mfi); + } else { + amrex::ignore_unused(pml_lxfab, pml_lyfab, pml_lzfab); + } const Box& tjx = mfi.tilebox( pml_j[0]->ixType().toIntVect() ); const Box& tjy = mfi.tilebox( pml_j[1]->ixType().toIntVect() ); @@ -301,27 +304,21 @@ WarpX::DampJPML (int lev, PatchType patch_type) amrex::ParallelFor( tjx, tjy, tjz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lxfab(i, j, k) <= 0) return; -#endif + if (pml_lxfab && pml_lxfab(i, j, k) <= 0) { return; } damp_jx_pml(i, j, k, pml_jxfab, sigma_star_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_cumsum_fac_j_z, xs_lo,y_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lyfab(i, j, k) <= 0) return; -#endif + if (pml_lyfab && pml_lyfab(i, j, k) <= 0) { return; } damp_jy_pml(i, j, k, pml_jyfab, sigma_cumsum_fac_j_x, sigma_star_cumsum_fac_j_y, sigma_cumsum_fac_j_z, x_lo,ys_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lzfab(i, j, k)<=0) return; -#endif + if (pml_lzfab && pml_lzfab(i, j, k)<=0) { return; } damp_jz_pml(i, j, k, pml_jzfab, sigma_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_star_cumsum_fac_j_z, diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index da1e5fdcc00..3757082ab4d 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -6,6 +6,7 @@ */ #include "BoundaryScrapingDiagnostics.H" +#include "EmbeddedBoundary/Enabled.H" #include "ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Diagnostics/Diagnostics.H" #include "Diagnostics/FlushFormats/FlushFormat.H" @@ -39,11 +40,11 @@ BoundaryScrapingDiagnostics::ReadParameters () // num_buffers corresponds to the number of boundaries // (upper/lower domain boundary in each dimension) - // + the EB boundary if available m_num_buffers = AMREX_SPACEDIM*2; -#ifdef AMREX_USE_EB - m_num_buffers += 1; -#endif + + // + the EB boundary if available + bool const eb_enabled = EB::enabled(); + if (eb_enabled) { m_num_buffers += 1; } // Do a few checks #ifndef WARPX_USE_OPENPMD diff --git a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp index 2991831420e..190822ded9c 100644 --- a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp +++ b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp @@ -8,6 +8,7 @@ #include "ChargeOnEB.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" @@ -24,11 +25,13 @@ #include #include +#include #include using namespace amrex; using namespace warpx::fields; + // constructor ChargeOnEB::ChargeOnEB (const std::string& rd_name) : ReducedDiags{rd_name} @@ -44,6 +47,10 @@ ChargeOnEB::ChargeOnEB (const std::string& rd_name) "ChargeOnEB reduced diagnostics only works when compiling with EB support"); #endif + if (!EB::enabled()) { + throw std::runtime_error("ChargeOnEB reduced diagnostics only works when EBs are enabled at runtime"); + } + // resize data array m_data.resize(1, 0.0_rt); @@ -87,6 +94,9 @@ void ChargeOnEB::ComputeDiags (const int step) // Judge whether the diags should be done if (!m_intervals.contains(step+1)) { return; } + if (!EB::enabled()) { + throw std::runtime_error("ComputeDiags only works when EBs are enabled at runtime"); + } #if ((defined WARPX_DIM_3D) && (defined AMREX_USE_EB)) // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); @@ -132,9 +142,9 @@ void ChargeOnEB::ComputeDiags (const int step) // Skip boxes that do not intersect with the embedded boundary // (i.e. either fully covered or fully regular) - amrex::FabType fab_type = eb_flag[mfi].getType(box); - if (fab_type == amrex::FabType::regular) continue; - if (fab_type == amrex::FabType::covered) continue; + const amrex::FabType fab_type = eb_flag[mfi].getType(box); + if (fab_type == amrex::FabType::regular) { continue; } + if (fab_type == amrex::FabType::covered) { continue; } // Extract data for electric field const amrex::Array4 & Ex_arr = Ex.array(mfi); @@ -153,7 +163,7 @@ void ChargeOnEB::ComputeDiags (const int step) [=] AMREX_GPU_DEVICE (int i, int j, int k) { // Only cells that are partially covered do contribute to the integral - if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) return; + if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) { return; } // Find nodal point which is outside of the EB // (eb_normal points towards the *interior* of the EB) @@ -164,14 +174,14 @@ void ChargeOnEB::ComputeDiags (const int step) // Find cell-centered point which is outside of the EB // (eb_normal points towards the *interior* of the EB) int i_c = i; - if ((eb_bnd_normal_arr(i,j,k,0)>0) && (eb_bnd_cent_arr(i,j,k,0)<=0)) i_c -= 1; - if ((eb_bnd_normal_arr(i,j,k,0)<0) && (eb_bnd_cent_arr(i,j,k,0)>=0)) i_c += 1; + if ((eb_bnd_normal_arr(i,j,k,0)>0) && (eb_bnd_cent_arr(i,j,k,0)<=0)) { i_c -= 1; } + if ((eb_bnd_normal_arr(i,j,k,0)<0) && (eb_bnd_cent_arr(i,j,k,0)>=0)) { i_c += 1; } int j_c = j; - if ((eb_bnd_normal_arr(i,j,k,1)>0) && (eb_bnd_cent_arr(i,j,k,1)<=0)) j_c -= 1; - if ((eb_bnd_normal_arr(i,j,k,1)<0) && (eb_bnd_cent_arr(i,j,k,1)>=0)) j_c += 1; + if ((eb_bnd_normal_arr(i,j,k,1)>0) && (eb_bnd_cent_arr(i,j,k,1)<=0)) { j_c -= 1; } + if ((eb_bnd_normal_arr(i,j,k,1)<0) && (eb_bnd_cent_arr(i,j,k,1)>=0)) { j_c += 1; } int k_c = k; - if ((eb_bnd_normal_arr(i,j,k,2)>0) && (eb_bnd_cent_arr(i,j,k,2)<=0)) k_c -= 1; - if ((eb_bnd_normal_arr(i,j,k,2)<0) && (eb_bnd_cent_arr(i,j,k,2)>=0)) k_c += 1; + if ((eb_bnd_normal_arr(i,j,k,2)>0) && (eb_bnd_cent_arr(i,j,k,2)<=0)) { k_c -= 1; } + if ((eb_bnd_normal_arr(i,j,k,2)<0) && (eb_bnd_cent_arr(i,j,k,2)>=0)) { k_c += 1; } // Compute contribution to the surface integral $\int dS \cdot E$) amrex::Real local_integral_contribution = 0; @@ -182,9 +192,9 @@ void ChargeOnEB::ComputeDiags (const int step) // Add weighting if requested by user if (do_parser_weighting) { // Get the 3D position of the centroid of surface element - amrex::Real x = (i + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0] + real_box.lo(0); - amrex::Real y = (j + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1] + real_box.lo(1); - amrex::Real z = (k + 0.5_rt + eb_bnd_cent_arr(i,j,k,2))*dx[2] + real_box.lo(2); + const amrex::Real x = (i + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0] + real_box.lo(0); + const amrex::Real y = (j + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1] + real_box.lo(1); + const amrex::Real z = (k + 0.5_rt + eb_bnd_cent_arr(i,j,k,2))*dx[2] + real_box.lo(2); // Apply weighting local_integral_contribution *= fun_weightingparser(x, y, z); } diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index a17abe04178..4082ff7b3d5 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -393,7 +393,7 @@ WarpX::InitFromCheckpoint () } } - InitializeEBGridData(maxLevel()); + if (m_eb_enabled) { InitializeEBGridData(maxLevel()); } // Initialize particles mypc->AllocData(); diff --git a/Source/EmbeddedBoundary/CMakeLists.txt b/Source/EmbeddedBoundary/CMakeLists.txt index a102087f8c9..2fa5e3e602b 100644 --- a/Source/EmbeddedBoundary/CMakeLists.txt +++ b/Source/EmbeddedBoundary/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + Enabled.cpp WarpXInitEB.cpp WarpXFaceExtensions.cpp WarpXFaceInfoBox.H diff --git a/Source/EmbeddedBoundary/DistanceToEB.H b/Source/EmbeddedBoundary/DistanceToEB.H index 7ee47c1172c..0c13724380c 100644 --- a/Source/EmbeddedBoundary/DistanceToEB.H +++ b/Source/EmbeddedBoundary/DistanceToEB.H @@ -14,8 +14,6 @@ #include #include -#ifdef AMREX_USE_EB - namespace DistanceToEB { @@ -28,7 +26,8 @@ amrex::Real dot_product (const amrex::RealVect& a, const amrex::RealVect& b) noe AMREX_GPU_HOST_DEVICE AMREX_INLINE void normalize (amrex::RealVect& a) noexcept { - amrex::Real inv_norm = 1.0/std::sqrt(dot_product(a,a)); + using namespace amrex::literals; + amrex::Real const inv_norm = 1.0_rt / std::sqrt(dot_product(a, a)); AMREX_D_DECL(a[0] *= inv_norm, a[1] *= inv_norm, a[2] *= inv_norm); @@ -46,6 +45,7 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP amrex::Array4 const& phi, amrex::GpuArray const& dxi) noexcept { + using namespace amrex::literals; #if (defined WARPX_DIM_3D) amrex::RealVect normal{0.0, 0.0, 0.0}; @@ -53,11 +53,11 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP for (int kk = 0; kk < 2; ++kk) { for (int jj=0; jj< 2; ++jj) { for (int ii = 0; ii < 2; ++ii) { - int icstart = ic + iic; - amrex::Real sign = (ii%2)*2. - 1.; - int wccomp = static_cast(iic%2); - int w1comp = static_cast(jj%2); - int w2comp = static_cast(kk%2); + int const icstart = ic + iic; + amrex::Real const sign = (ii%2)*2._rt - 1._rt; + int const wccomp = static_cast(iic%2); + int const w1comp = static_cast(jj%2); + int const w2comp = static_cast(kk%2); normal[0] += sign * phi(icstart + ii, j + jj, k + kk) * dxi[0] * Wc[0][wccomp] * W[1][w1comp] * W[2][w2comp]; } } @@ -67,11 +67,11 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP for (int kk = 0; kk < 2; ++kk) { for (int ii=0; ii< 2; ++ii) { for (int jj = 0; jj < 2; ++jj) { - int jcstart = jc + iic; - amrex::Real sign = (jj%2)*2. - 1.; - int wccomp = static_cast(iic%2); - int w1comp = static_cast(ii%2); - int w2comp = static_cast(kk%2); + int const jcstart = jc + iic; + amrex::Real const sign = (jj%2)*2._rt - 1._rt; + int const wccomp = static_cast(iic%2); + int const w1comp = static_cast(ii%2); + int const w2comp = static_cast(kk%2); normal[1] += sign * phi(i + ii, jcstart + jj, k + kk) * dxi[1] * W[0][w1comp] * Wc[1][wccomp] * W[2][w2comp]; } } @@ -81,11 +81,11 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP for (int jj = 0; jj < 2; ++jj) { for (int ii=0; ii< 2; ++ii) { for (int kk = 0; kk < 2; ++kk) { - int kcstart = kc + iic; - amrex::Real sign = (kk%2)*2. - 1.; - int wccomp = static_cast(iic%2); - int w1comp = static_cast(ii%2); - int w2comp = static_cast(jj%2); + int const kcstart = kc + iic; + amrex::Real const sign = (kk%2)*2._rt - 1._rt; + int const wccomp = static_cast(iic%2); + int const w1comp = static_cast(ii%2); + int const w2comp = static_cast(jj%2); normal[2] += sign * phi(i + ii, j + jj, kcstart + kk) * dxi[2] * W[0][w1comp] * W[1][w2comp] * Wc[2][wccomp]; } } @@ -97,10 +97,10 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP for (int iic = 0; iic < 2; ++iic) { for (int jj=0; jj< 2; ++jj) { for (int ii = 0; ii < 2; ++ii) { - int icstart = ic + iic; - amrex::Real sign = (ii%2)*2. - 1.; - int wccomp = static_cast(iic%2); - int w1comp = static_cast(jj%2); + int const icstart = ic + iic; + amrex::Real const sign = (ii%2)*2._rt - 1._rt; + int const wccomp = static_cast(iic%2); + int const w1comp = static_cast(jj%2); normal[0] += sign * phi(icstart + ii, j + jj, k) * dxi[0] * Wc[0][wccomp] * W[1][w1comp]; } } @@ -108,10 +108,10 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP for (int iic = 0; iic < 2; ++iic) { for (int ii=0; ii< 2; ++ii) { for (int jj = 0; jj < 2; ++jj) { - int jcstart = jc + iic; - amrex::Real sign = (jj%2)*2. - 1.; - int wccomp = static_cast(iic%2); - int w1comp = static_cast(ii%2); + int const jcstart = jc + iic; + amrex::Real const sign = (jj%2)*2._rt - 1._rt; + int const wccomp = static_cast(iic%2); + int const w1comp = static_cast(ii%2); normal[1] += sign * phi(i + ii, jcstart + jj, k) * dxi[1] * W[0][w1comp] * Wc[1][wccomp]; } } @@ -120,13 +120,11 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP #else amrex::ignore_unused(i, j, k, ic, jc, kc, W, Wc, phi, dxi); - amrex::RealVect normal{0.0, 0.0}; + amrex::RealVect normal(0.0); WARPX_ABORT_WITH_MESSAGE("Error: interp_distance not yet implemented in 1D"); #endif return normal; } } - -#endif // AMREX_USE_EB #endif // WARPX_DISTANCETOEB_H_ diff --git a/Source/EmbeddedBoundary/Enabled.H b/Source/EmbeddedBoundary/Enabled.H index 90ea5f35101..af01272e262 100644 --- a/Source/EmbeddedBoundary/Enabled.H +++ b/Source/EmbeddedBoundary/Enabled.H @@ -7,7 +7,7 @@ #ifndef WARPX_EB_ENABLED_H_ #define WARPX_EB_ENABLED_H_ -#include +#include namespace EB { diff --git a/Source/EmbeddedBoundary/ParticleScraper.H b/Source/EmbeddedBoundary/ParticleScraper.H index 1e915b39381..c5d9cc68c60 100644 --- a/Source/EmbeddedBoundary/ParticleScraper.H +++ b/Source/EmbeddedBoundary/ParticleScraper.H @@ -176,7 +176,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan [=] AMREX_GPU_DEVICE (const int ip, amrex::RandomEngine const& engine) noexcept { // skip particles that are already flagged for removal - if (!amrex::ParticleIDWrapper{ptd.m_idcpu[ip]}.is_valid()) return; + if (!amrex::ParticleIDWrapper{ptd.m_idcpu[ip]}.is_valid()) { return; } amrex::ParticleReal xp, yp, zp; getPosition(ip, xp, yp, zp); @@ -185,7 +185,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan amrex::Real W[AMREX_SPACEDIM][2]; ablastr::particles::compute_weights( xp, yp, zp, plo, dxi, i, j, k, W); - amrex::Real phi_value = ablastr::particles::interp_field_nodal(i, j, k, W, phi); + amrex::Real const phi_value = ablastr::particles::interp_field_nodal(i, j, k, W, phi); if (phi_value < 0.0) { diff --git a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp index 21c13f23845..717aa26b021 100644 --- a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp +++ b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp @@ -6,6 +6,7 @@ */ #include "WarpXFaceInfoBox.H" +#include "EmbeddedBoundary/Enabled.H" #include "Utils/TextMsg.H" #include "WarpX.H" @@ -163,43 +164,49 @@ ComputeSStab(const int i, const int j, const int k, amrex::Array1D -WarpX::CountExtFaces() { +WarpX::CountExtFaces () { amrex::Array1D sums{0, 0, 0}; #ifdef AMREX_USE_EB + if (EB::enabled()) { #ifndef WARPX_DIM_RZ #ifdef WARPX_DIM_XZ - // In 2D we change the extrema of the for loop so that we only have the case idim=1 - for(int idim = 1; idim < AMREX_SPACEDIM; ++idim) { + // In 2D we change the extrema of the for loop so that we only have the case idim=1 + for(int idim = 1; idim < AMREX_SPACEDIM; ++idim) { #elif defined(WARPX_DIM_3D) for(int idim = 0; idim < AMREX_SPACEDIM; ++idim) { #else - WARPX_ABORT_WITH_MESSAGE( - "CountExtFaces: Only implemented in 2D3V and 3D3V"); + WARPX_ABORT_WITH_MESSAGE( + "CountExtFaces: Only implemented in 2D3V and 3D3V"); #endif - amrex::ReduceOps reduce_ops; - amrex::ReduceData reduce_data(reduce_ops); - for (amrex::MFIter mfi(*m_flag_ext_face[maxLevel()][idim]); mfi.isValid(); ++mfi) { - amrex::Box const &box = mfi.validbox(); - auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); - reduce_ops.eval(box, reduce_data, - [=] AMREX_GPU_DEVICE(int i, int j, int k) -> amrex::GpuTuple { - return flag_ext_face(i, j, k); - }); - } + amrex::ReduceOps reduce_ops; + amrex::ReduceData reduce_data(reduce_ops); + for (amrex::MFIter mfi(*m_flag_ext_face[maxLevel()][idim]); mfi.isValid(); ++mfi) { + amrex::Box const &box = mfi.validbox(); + auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); + reduce_ops.eval(box, reduce_data, + [=] AMREX_GPU_DEVICE(int i, int j, int k) -> amrex::GpuTuple { + return flag_ext_face(i, j, k); + }); + } - auto r = reduce_data.value(); - sums(idim) = amrex::get<0>(r); - } + auto r = reduce_data.value(); + sums(idim) = amrex::get<0>(r); + } - amrex::ParallelDescriptor::ReduceIntSum(&(sums(0)), AMREX_SPACEDIM); + amrex::ParallelDescriptor::ReduceIntSum(&(sums(0)), AMREX_SPACEDIM); #endif + } #endif return sums; } void -WarpX::ComputeFaceExtensions(){ +WarpX::ComputeFaceExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeFaceExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB amrex::Array1D N_ext_faces = CountExtFaces(); ablastr::warn_manager::WMRecordWarning("Embedded Boundary", @@ -421,7 +428,11 @@ ComputeNBorrowEightFacesExtension(const amrex::Dim3 cell, const amrex::Real S_ex void -WarpX::ComputeOneWayExtensions() { +WarpX::ComputeOneWayExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeOneWayExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB #ifndef WARPX_DIM_RZ auto const eb_fact = fieldEBFactory(maxLevel()); @@ -452,7 +463,7 @@ WarpX::ComputeOneWayExtensions() { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; auto const &borrowing_inds_pointer = borrowing.inds_pointer.array(); auto const &borrowing_size = borrowing.size.array(); - amrex::Long ncells = box.numPts(); + amrex::Long const ncells = box.numPts(); int* borrowing_inds = borrowing.inds.data(); FaceInfoBox::Neighbours* borrowing_neigh_faces = borrowing.neigh_faces.data(); amrex::Real* borrowing_area = borrowing.area.data(); @@ -503,7 +514,7 @@ WarpX::ComputeOneWayExtensions() { for (int i_n = -1; i_n < 2; i_n++) { for (int j_n = -1; j_n < 2; j_n++) { //This if makes sure that we don't visit the "diagonal neighbours" - if( !(i_n == j_n || i_n == -j_n)){ + if (i_n != j_n && i_n != -j_n){ // Here a face is available if it doesn't need to be extended itself and if its // area exceeds Sz_ext. Here we need to take into account if the intruded face // has given away already some area, so we use Sz_red rather than Sz. @@ -545,8 +556,14 @@ WarpX::ComputeOneWayExtensions() { void -WarpX::ComputeEightWaysExtensions() { +WarpX::ComputeEightWaysExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeEightWaysExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB + using namespace amrex::literals; + #ifndef WARPX_DIM_RZ auto const &cell_size = CellSize(maxLevel()); @@ -574,7 +591,7 @@ WarpX::ComputeEightWaysExtensions() { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; auto const &borrowing_inds_pointer = borrowing.inds_pointer.array(); auto const &borrowing_size = borrowing.size.array(); - amrex::Long ncells = box.numPts(); + amrex::Long const ncells = box.numPts(); int* borrowing_inds = borrowing.inds.data(); FaceInfoBox::Neighbours* borrowing_neigh_faces = borrowing.neigh_faces.data(); amrex::Real* borrowing_area = borrowing.area.data(); @@ -650,7 +667,7 @@ WarpX::ComputeEightWaysExtensions() { neg_face = false; for (int i_n = -1; i_n < 2; i_n++) { for (int j_n = -1; j_n < 2; j_n++) { - if(local_avail(i_n + 1, j_n + 1)){ + if (local_avail(i_n + 1, j_n + 1) != 0_rt){ const amrex::Real patch = S_ext * GetNeigh(S, i, j, k, i_n, j_n, idim) / denom; if(GetNeigh(S_mod, i, j, k, i_n, j_n, idim) - patch <= 0) { neg_face = true; @@ -675,7 +692,7 @@ WarpX::ComputeEightWaysExtensions() { int count = 0; for (int i_n = -1; i_n < 2; i_n++) { for (int j_n = -1; j_n < 2; j_n++) { - if(local_avail(i_n + 1, j_n + 1)){ + if(local_avail(i_n + 1, j_n + 1) != 0_rt){ const amrex::Real patch = S_ext * GetNeigh(S, i, j, k, i_n, j_n, idim) / denom; borrowing_inds[ps + count] = ps + count; FaceInfoBox::addConnectedNeighbor(i_n, j_n, ps + count, @@ -703,7 +720,11 @@ WarpX::ComputeEightWaysExtensions() { } void -WarpX::ApplyBCKCorrection(const int idim) { +WarpX::ApplyBCKCorrection (const int idim) +{ + if (!EB::enabled()) { + throw std::runtime_error("ApplyBCKCorrection only works when EBs are enabled at runtime"); + } #if defined(AMREX_USE_EB) and !defined(WARPX_DIM_RZ) const std::array &cell_size = CellSize(maxLevel()); @@ -736,7 +757,8 @@ WarpX::ApplyBCKCorrection(const int idim) { } void -WarpX::ShrinkBorrowing() { +WarpX::ShrinkBorrowing () +{ for(int idim = 0; idim < AMREX_SPACEDIM; idim++) { for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 655bec0dc29..b3e6290ad6c 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -7,6 +7,7 @@ #include "WarpX.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB # include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" @@ -57,6 +58,8 @@ namespace { ParserIF& operator= (const ParserIF& rhs) = delete; ParserIF& operator= (ParserIF&& rhs) = delete; + ~ParserIF() = default; + AMREX_GPU_HOST_DEVICE inline amrex::Real operator() (AMREX_D_DECL(amrex::Real x, amrex::Real y, amrex::Real z)) const noexcept { @@ -80,6 +83,9 @@ namespace { void WarpX::InitEB () { + if (!EB::enabled()) { + throw std::runtime_error("InitEB only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB BL_PROFILE("InitEB"); @@ -88,7 +94,7 @@ WarpX::InitEB () pp_warpx.query("eb_implicit_function", impf); if (! impf.empty()) { auto eb_if_parser = utils::parser::makeParser(impf, {"x", "y", "z"}); - ParserIF pif(eb_if_parser.compile<3>()); + ParserIF const pif(eb_if_parser.compile<3>()); auto gshop = amrex::EB2::makeShop(pif, eb_if_parser); // The last argument of amrex::EB2::Build is the maximum coarsening level // to which amrex should try to coarsen the EB. It will stop after coarsening @@ -100,13 +106,12 @@ WarpX::InitEB () } else { amrex::ParmParse pp_eb2("eb2"); if (!pp_eb2.contains("geom_type")) { - std::string geom_type = "all_regular"; + std::string const geom_type = "all_regular"; pp_eb2.add("geom_type", geom_type); // use all_regular by default } // See the comment above on amrex::EB2::Build for the hard-wired number 20. amrex::EB2::Build(Geom(maxLevel()), maxLevel(), maxLevel()+20); } - #endif } @@ -124,16 +129,16 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) for (int idim = 0; idim < 3; ++idim){ - if(idim == 1) continue; + if(idim == 1) { continue; } #elif defined(WARPX_DIM_3D) for (int idim = 0; idim < AMREX_SPACEDIM; ++idim){ #else WARPX_ABORT_WITH_MESSAGE( "ComputeEdgeLengths: Only implemented in 2D3V and 3D3V"); #endif - amrex::Box box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), - edge_lengths[idim]->nGrowVect()); - amrex::FabType fab_type = flags[mfi].getType(box); + amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), + edge_lengths[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); if (fab_type == amrex::FabType::regular) { @@ -149,7 +154,7 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed } else { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) int idim_amrex = idim; - if(idim == 2) idim_amrex = 1; + if(idim == 2) { idim_amrex = 1; } auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); #elif defined(WARPX_DIM_3D) auto const &edge_cent = edge_centroid[idim]->const_array(mfi); @@ -207,9 +212,9 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face WARPX_ABORT_WITH_MESSAGE( "ComputeFaceAreas: Only implemented in 2D3V and 3D3V"); #endif - amrex::Box box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), - face_areas[idim]->nGrowVect()); - amrex::FabType fab_type = flags[mfi].getType(box); + amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), + face_areas[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); auto const &face_areas_dim = face_areas[idim]->array(mfi); if (fab_type == amrex::FabType::regular) { // every cell in box is all regular @@ -247,7 +252,7 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) for (int idim = 0; idim < 3; ++idim){ - if(idim == 1) continue; + if(idim == 1) { continue; } #elif defined(WARPX_DIM_3D) for (int idim = 0; idim < AMREX_SPACEDIM; ++idim){ #else @@ -397,7 +402,11 @@ WarpX::MarkCells(){ #endif void -WarpX::ComputeDistanceToEB () { +WarpX::ComputeDistanceToEB () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeDistanceToEB only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB BL_PROFILE("ComputeDistanceToEB"); const amrex::EB2::IndexSpace& eb_is = amrex::EB2::IndexSpace::top(); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 5a2dbdf2f30..bc18d0da75d 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -525,11 +525,12 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num } // interact the particles with EB walls (if present) -#ifdef AMREX_USE_EB - mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); - m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries(*mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); - mypc->deleteInvalidParticles(); -#endif + if (m_eb_enabled) { + mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); + m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries( + *mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); + mypc->deleteInvalidParticles(); + } if (sort_intervals.contains(step+1)) { if (verbose) { diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 189f2f2bb0a..80110f5eb18 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -289,11 +289,10 @@ WarpX::AddSpaceChargeFieldLabFrame () // Compute the electric field. Note that if an EB is used the electric // field will be calculated in the computePhi call. -#ifndef AMREX_USE_EB - computeE( Efield_fp, phi_fp, beta ); -#else - if ( IsPythonCallbackInstalled("poissonsolver") ) computeE( Efield_fp, phi_fp, beta ); -#endif + if (!m_eb_enabled) { computeE( Efield_fp, phi_fp, beta ); } + else { + if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } + } // Compute the magnetic field computeB( Bfield_fp, phi_fp, beta ); @@ -323,64 +322,66 @@ WarpX::computePhi (const amrex::Vector >& rho, Real const required_precision, Real absolute_tolerance, int const max_iters, - int const verbosity) const -{ + int const verbosity) const { // create a vector to our fields, sorted by level - amrex::Vector sorted_rho; - amrex::Vector sorted_phi; + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; for (int lev = 0; lev <= finest_level; ++lev) { sorted_rho.emplace_back(rho[lev].get()); sorted_phi.emplace_back(phi[lev].get()); } -#if defined(AMREX_USE_EB) - std::optional post_phi_calculation; - - // EB: use AMReX to directly calculate the electric field since with EB's the - // simple finite difference scheme in WarpX::computeE sometimes fails - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > const eb_farray_box_factory; +#endif + if (m_eb_enabled) { - // TODO: maybe make this a helper function or pass Efield_fp directly - amrex::Vector< - amrex::Array - > e_field; - for (int lev = 0; lev <= finest_level; ++lev) { - e_field.push_back( + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + { + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev <= finest_level; ++lev) { + e_field.push_back( # if defined(WARPX_DIM_1D_Z) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 0), + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # elif defined(WARPX_DIM_3D) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 0), + getFieldPointer(FieldType::Efield_fp, lev, 1), + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # endif - ); + ); + } + post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); } - post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); - } - std::optional > eb_farray_box_factory; - amrex::Vector< - amrex::EBFArrayBoxFactory const * - > factories; - for (int lev = 0; lev <= finest_level; ++lev) { - factories.push_back(&WarpX::fieldEBFactory(lev)); - } - eb_farray_box_factory = factories; -#else - const std::optional post_phi_calculation; - const std::optional > eb_farray_box_factory; +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev <= finest_level; ++lev) { + factories.push_back(&WarpX::fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; #endif + } bool const is_solver_igf_on_lev0 = WarpX::poisson_solver_id == PoissonSolverAlgo::IntegratedGreenFunction; @@ -399,6 +400,7 @@ WarpX::computePhi (const amrex::Vector >& rho, WarpX::grid_type, this->m_poisson_boundary_handler, is_solver_igf_on_lev0, + m_eb_enabled, WarpX::do_single_precision_comms, this->ref_ratio, post_phi_calculation, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index fbc1397b413..4fe9fc76e10 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -87,12 +87,9 @@ void FiniteDifferenceSolver::EvolveB ( } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); -#ifdef AMREX_USE_EB } else if (m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { - EvolveBCartesianECT(Bfield, face_areas, area_mod, ECTRhofield, Venl, flag_info_cell, borrowing, lev, dt); -#endif #endif } else { WARPX_ABORT_WITH_MESSAGE("EvolveB: Unknown algorithm"); @@ -245,9 +242,9 @@ void FiniteDifferenceSolver::EvolveBCartesianECT ( amrex::Array4 const &S = face_areas[idim]->array(mfi); amrex::Array4 const &S_mod = area_mod[idim]->array(mfi); - auto &borrowing_dim = (*borrowing[idim])[mfi]; - auto borrowing_dim_neigh_faces = borrowing_dim.neigh_faces.data(); - auto borrowing_dim_area = borrowing_dim.area.data(); + auto & borrowing_dim = (*borrowing[idim])[mfi]; + auto * borrowing_dim_neigh_faces = borrowing_dim.neigh_faces.data(); + auto * borrowing_dim_area = borrowing_dim.area.data(); auto const &borrowing_inds = (*borrowing[idim])[mfi].inds.data(); auto const &borrowing_size = (*borrowing[idim])[mfi].size.array(); @@ -259,24 +256,23 @@ void FiniteDifferenceSolver::EvolveBCartesianECT ( //Take care of the unstable cells amrex::ParallelFor(tb, [=] AMREX_GPU_DEVICE(int i, int j, int k) { - if (S(i, j, k) <= 0) return; + if (S(i, j, k) <= 0) { return; } - if (!(flag_info_cell_dim(i, j, k) == 0)) - return; + if (!(flag_info_cell_dim(i, j, k) == 0)) { return; } Venl_dim(i, j, k) = Rho(i, j, k) * S(i, j, k); amrex::Real rho_enl; // First we compute the rho of the enlarged face for (int offset = 0; offset const& Ffield, int lev, amrex::Real const dt ) { -#ifdef AMREX_USE_EB if (m_fdtd_algo != ElectromagneticSolverAlgo::ECT) { amrex::ignore_unused(face_areas, ECTRhofield); } -#else - amrex::ignore_unused(face_areas, ECTRhofield); -#endif // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) @@ -129,11 +126,12 @@ void FiniteDifferenceSolver::EvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -152,10 +150,9 @@ void FiniteDifferenceSolver::EvolveECartesian ( amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } + Ex(i, j, k) += c2 * dt * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) + T_Algo::DownwardDy(Bz, coefs_y, n_coefs_y, i, j, k) @@ -163,16 +160,15 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (ly && ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; -#endif + if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } #endif + Ey(i, j, k) += c2 * dt * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) + T_Algo::DownwardDz(Bx, coefs_z, n_coefs_z, i, j, k) @@ -180,10 +176,8 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (lz && lz(i,j,k) <= 0) { return; } Ez(i, j, k) += c2 * dt * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) + T_Algo::DownwardDx(By, coefs_x, n_coefs_x, i, j, k) @@ -265,10 +259,11 @@ void FiniteDifferenceSolver::EvolveECylindrical ( Array4 const& jt = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lr, lz; + if (EB::enabled()) { + lr = edge_lengths[0]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -292,10 +287,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( amrex::ParallelFor(ter, tet, tez, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (lr && lr(i, j, 0) <= 0) { return; } + Real const r = rmin + (i + 0.5_rt)*dr; // r on cell-centered point (Er is cell-centered in r) Er(i, j, 0, 0) += c2 * dt*( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -313,11 +307,10 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries // The Et field is at a node, so we need to check if the node is covered - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + Real const r = rmin + i*dr; // r on a nodal grid (Et is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Et(i, j, 0, 0) += c2 * dt*( @@ -359,10 +352,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i, j, 0) <= 0) return; -#endif + if (lz && lz(i, j, 0) <= 0) { return; } + Real const r = rmin + i*dr; // r on a nodal grid (Ez is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Ez(i, j, 0, 0) += c2 * dt*( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp index 95f899c98e1..8abdab71300 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp @@ -112,7 +112,7 @@ void FiniteDifferenceSolver::EvolveRhoCartesianECT ( amrex::ParallelFor(trhox, trhoy, trhoz, [=] AMREX_GPU_DEVICE(int i, int j, int k) { - if (Sx(i, j, k) <= 0) return; + if (Sx(i, j, k) <= 0) { return; } // If we implement ECT in 1D we will need to take care of this #ifndef differently #ifndef WARPX_DIM_XZ @@ -122,7 +122,7 @@ void FiniteDifferenceSolver::EvolveRhoCartesianECT ( }, [=] AMREX_GPU_DEVICE(int i, int j, int k) { - if (Sy(i, j, k) <= 0) return; + if (Sy(i, j, k) <= 0) { return; } #ifdef WARPX_DIM_XZ Rhoy(i, j, k) = (Ez(i, j, k) * lz(i, j, k) - Ez(i + 1, j, k) * lz(i + 1, j, k) + @@ -136,7 +136,7 @@ void FiniteDifferenceSolver::EvolveRhoCartesianECT ( }, [=] AMREX_GPU_DEVICE(int i, int j, int k) { - if (Sz(i, j, k) <= 0) return; + if (Sz(i, j, k) <= 0) { return; } // If we implement ECT in 1D we will need to take care of this #ifndef differently #ifndef WARPX_DIM_XZ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index 93352ce9896..a1ba6e44a8c 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -16,6 +16,7 @@ #else # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -109,11 +110,12 @@ void FiniteDifferenceSolver::EvolveEPMLCartesian ( Array4 const& By = Bfield[1]->array(mfi); Array4 const& Bz = Bfield[2]->array(mfi); -#ifdef AMREX_USE_EB - Array4 const& lx = edge_lengths[0]->array(mfi); - Array4 const& ly = edge_lengths[1]->array(mfi); - Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -132,9 +134,7 @@ void FiniteDifferenceSolver::EvolveEPMLCartesian ( amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB - if(lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } Ex(i, j, k, PMLComp::xz) -= c2 * dt * ( T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k, PMLComp::yx) @@ -145,16 +145,15 @@ void FiniteDifferenceSolver::EvolveEPMLCartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (ly && ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; -#endif + if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } #endif + Ey(i, j, k, PMLComp::yx) -= c2 * dt * ( T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k, PMLComp::zx) + T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k, PMLComp::zy) ); @@ -164,9 +163,7 @@ void FiniteDifferenceSolver::EvolveEPMLCartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB - if(lz(i, j, k) <= 0) return; -#endif + if (lz && lz(i, j, k) <= 0) { return; } Ez(i, j, k, PMLComp::zy) -= c2 * dt * ( T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k, PMLComp::xy) @@ -244,13 +241,7 @@ void FiniteDifferenceSolver::EvolveEPMLCartesian ( } ); } - - } - -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - + } // MFIter } -#endif // corresponds to ifndef WARPX_DIM_RZ +#endif // ifndef WARPX_DIM_RZ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 6a72bb3569c..652dbfe7cdd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -9,6 +9,7 @@ #include "HybridPICModel.H" +#include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" #include "WarpX.H" @@ -226,23 +227,23 @@ void HybridPICModel::InitData () // Initialize external current - note that this approach skips the check // if the current is time dependent which is what needs to be done to // write time independent fields on the first step. - for (int lev = 0; lev <= warpx.finestLevel(); ++lev) - { + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + auto edge_lengths = std::array, 3>(); #ifdef AMREX_USE_EB - auto& edge_lengths_x = warpx.getField(FieldType::edge_lengths, lev, 0); - auto& edge_lengths_y = warpx.getField(FieldType::edge_lengths, lev, 1); - auto& edge_lengths_z = warpx.getField(FieldType::edge_lengths, lev, 2); - - const auto edge_lengths = std::array< std::unique_ptr, 3 >{ - std::make_unique( - edge_lengths_x, amrex::make_alias, 0, edge_lengths_x.nComp()), - std::make_unique( - edge_lengths_y, amrex::make_alias, 0, edge_lengths_y.nComp()), - std::make_unique( - edge_lengths_z, amrex::make_alias, 0, edge_lengths_z.nComp()) - }; -#else - const auto edge_lengths = std::array< std::unique_ptr, 3 >(); + if (EB::enabled()) { + auto const & edge_lengths_x = warpx.getField(FieldType::edge_lengths, lev, 0); + auto const & edge_lengths_y = warpx.getField(FieldType::edge_lengths, lev, 1); + auto const & edge_lengths_z = warpx.getField(FieldType::edge_lengths, lev, 2); + + edge_lengths = std::array< std::unique_ptr, 3 >{ + std::make_unique( + edge_lengths_x, amrex::make_alias, 0, edge_lengths_x.nComp()), + std::make_unique( + edge_lengths_y, amrex::make_alias, 0, edge_lengths_y.nComp()), + std::make_unique( + edge_lengths_z, amrex::make_alias, 0, edge_lengths_z.nComp()) + }; + } #endif GetCurrentExternal(edge_lengths, lev); } @@ -289,31 +290,26 @@ void HybridPICModel::GetCurrentExternal ( for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); + const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - auto const& mfxfab = mfx->array(mfi); - auto const& mfyfab = mfy->array(mfi); - auto const& mfzfab = mfz->array(mfi); + auto const& mfxfab = mfx->array(mfi); + auto const& mfyfab = mfy->array(mfi); + auto const& mfzfab = mfz->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(ly); -#endif -#else - amrex::ignore_unused(edge_lengths); -#endif + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } + // Shift required in the x-, y-, or z- position // depending on the index type of the multifab #if defined(WARPX_DIM_1D_Z) @@ -340,9 +336,8 @@ void HybridPICModel::GetCurrentExternal ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (ly(i, j, k) <= 0) return; -#endif + if (ly && ly(i, j, k) <= 0) { return; } + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -367,9 +362,8 @@ void HybridPICModel::GetCurrentExternal ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (lz(i, j, k) <= 0) return; -#endif + if (lz && lz(i, j, k) <= 0) { return; } + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 456c542a534..5da46f23e54 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -9,6 +9,7 @@ #include "FiniteDifferenceSolver.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef WARPX_DIM_RZ # include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #else @@ -67,10 +68,6 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // reset Jfield Jfield[0]->setVal(0); Jfield[1]->setVal(0); @@ -95,11 +92,13 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( Array4 const& Bt = Bfield[1]->array(mfi); Array4 const& Bz = Bfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lt = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lr, lt, lz; + + if (EB::enabled()) { + lr = edge_lengths[0]->array(mfi); + lt = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -125,10 +124,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jr calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (lr && lr(i, j, 0) <= 0) { return; } // Mode m=0 Jr(i, j, 0, 0) = one_over_mu0 * ( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -151,11 +148,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jt calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // In RZ Jt is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(lt); - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + // r on a nodal point (Jt is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -199,10 +194,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lz(i, j, 0) <= 0) return; -#endif + if (lz && lz(i, j, 0) <= 0) { return; } // r on a nodal point (Jz is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -258,10 +251,6 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // reset Jfield Jfield[0]->setVal(0); Jfield[1]->setVal(0); @@ -272,25 +261,25 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif for ( MFIter mfi(*Jfield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) - { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { amrex::Gpu::synchronize(); } auto wt = static_cast(amrex::second()); // Extract field data for this grid/tile - Array4 const& Jx = Jfield[0]->array(mfi); - Array4 const& Jy = Jfield[1]->array(mfi); - Array4 const& Jz = Jfield[2]->array(mfi); - Array4 const& Bx = Bfield[0]->const_array(mfi); - Array4 const& By = Bfield[1]->const_array(mfi); - Array4 const& Bz = Bfield[2]->const_array(mfi); - -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + Array4 const &Jx = Jfield[0]->array(mfi); + Array4 const &Jy = Jfield[1]->array(mfi); + Array4 const &Jz = Jfield[2]->array(mfi); + Array4 const &Bx = Bfield[0]->const_array(mfi); + Array4 const &By = Bfield[1]->const_array(mfi); + Array4 const &Bz = Bfield[2]->const_array(mfi); + + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -313,10 +302,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jx calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } + Jx(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) + T_Algo::DownwardDy(Bz, coefs_y, n_coefs_y, i, j, k) @@ -325,15 +313,13 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jy calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (ly && ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) // In XZ Jy is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; -#endif + if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } #endif Jy(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) @@ -343,10 +329,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (lz && lz(i,j,k) <= 0) { return; } + Jz(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) + T_Algo::DownwardDx(By, coefs_x, n_coefs_x, i, j, k) @@ -415,10 +400,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( int lev, HybridPICModel const* hybrid_model, const bool include_resistivity_term ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // Both steps below do not currently support m > 0 and should be // modified if such support wants to be added WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -561,11 +542,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& rho = rhofield->const_array(mfi); Array4 const& Pe = Pefield->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lt = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lr, lz; + if (EB::enabled()) { + lr = edge_lengths[0]->array(mfi); + // edge_lengths[1] is `lt` and is not needed + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -586,10 +568,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Er calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (lr && lr(i, j, 0) <= 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); @@ -627,11 +608,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Et calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // In RZ Et is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(lt); - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + // r on a nodal grid (Et is nodal in r) Real const r = rmin + i*dr; // Mode m=0: // Ensure that Et remains 0 on axis @@ -672,10 +651,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field solve if this cell is fully covered by embedded boundaries - if (lz(i,j,0) <= 0) { return; } -#endif + if (lz && lz(i,j,0) <= 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); @@ -733,10 +711,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( int lev, HybridPICModel const* hybrid_model, const bool include_resistivity_term ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); @@ -873,11 +847,12 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& rho = rhofield->const_array(mfi); Array4 const& Pe = Pefield->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -896,10 +871,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ex calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); @@ -933,16 +907,14 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( }, // Ey calculation - [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB + [=] AMREX_GPU_DEVICE (int i, int j, int k) { // Skip field solve if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) { return; } + if (ly && ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) { return; } -#endif + if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } #endif // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); @@ -980,7 +952,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ #ifdef AMREX_USE_EB // Skip field solve if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) { return; } + if (lz && lz(i,j,k) <= 0) { return; } #endif // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 3aee7697073..1a9b79a8acb 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -7,6 +7,7 @@ # include "FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" # include "FiniteDifferenceAlgorithms/FieldAccessorFunctors.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "MacroscopicProperties/MacroscopicProperties.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -139,10 +140,14 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); + amrex::Array4 lx, ly, lz; + if (EB::enabled()) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } +#ifdef WARPX_DIM_XZ + amrex::ignore_unused(ly); #endif // material prop // @@ -174,10 +179,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( // Loop over the cells and update the fields amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (lx && lx(i, j, k) <= 0) { return; } + // Interpolate conductivity, sigma, to Ex position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ex_stag, macro_cr, i, j, k, scomp); @@ -193,15 +197,13 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (ly && ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0) return; -#endif + if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0)) { return; } #endif + // Interpolate conductivity, sigma, to Ey position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ey_stag, macro_cr, i, j, k, scomp); @@ -218,10 +220,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (lz && lz(i,j,k) <= 0) { return; } + // Interpolate conductivity, sigma, to Ez position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ez_stag, macro_cr, i, j, k, scomp); diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 26ac1ac96c8..d715e64cdaa 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -184,6 +184,7 @@ WarpX::computeVectorPotential (const amrex::Vectordmap, this->grids, this->m_vector_poisson_boundary_handler, + m_eb_enabled, WarpX::do_single_precision_comms, this->ref_ratio, post_A_calculation, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index a27a46b7e88..5e5ebb19921 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -717,6 +717,7 @@ WarpX::InitPML () psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), + m_eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[0], do_pml_Hi[0]); @@ -756,6 +757,7 @@ WarpX::InitPML () do_moving_window, pml_has_particles, do_pml_in_domain, psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), + m_eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[lev], do_pml_Hi[lev]); @@ -934,7 +936,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) } #ifdef AMREX_USE_EB - InitializeEBGridData(lev); + if (m_eb_enabled) { InitializeEBGridData(lev); } #endif // if the input string for the B-field is "parse_b_ext_grid_function", @@ -979,11 +981,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) && (lev <= maxlevel_extEMfield_init)) { #ifdef AMREX_USE_EB - // We initialize ECTRhofield consistently with the Efield - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - m_fdtd_solver_fp[lev]->EvolveECTRho( - Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + if (m_eb_enabled) { + // We initialize ECTRhofield consistently with the Efield + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + m_fdtd_solver_fp[lev]->EvolveECTRho( + Efield_fp[lev], m_edge_lengths[lev], + m_face_areas[lev], ECTRhofield[lev], lev); + } } #endif @@ -1012,11 +1016,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) 'E', lev, PatchType::coarse); #ifdef AMREX_USE_EB - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - // We initialize ECTRhofield consistently with the Efield - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + if (m_eb_enabled) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + // We initialize ECTRhofield consistently with the Efield + m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], + m_face_areas[lev], ECTRhofield[lev], lev); + } } #endif } @@ -1041,7 +1047,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( ParserExecutor<3> const& zfield_parser, std::array< std::unique_ptr, 3 > const& edge_lengths, std::array< std::unique_ptr, 3 > const& face_areas, - const char field, + [[maybe_unused]] const char field, const int lev, PatchType patch_type) { @@ -1057,49 +1063,45 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - - auto const& mfxfab = mfx->array(mfi); - auto const& mfyfab = mfy->array(mfi); - auto const& mfzfab = mfz->array(mfi); - -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); - amrex::Array4 const& Sx = face_areas[0]->array(mfi); - amrex::Array4 const& Sy = face_areas[1]->array(mfi); - amrex::Array4 const& Sz = face_areas[2]->array(mfi); + for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const amrex::Box &tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect()); + const amrex::Box &tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect()); + const amrex::Box &tbz = mfi.tilebox(z_nodal_flag, mfz->nGrowVect()); + + auto const &mfxfab = mfx->array(mfi); + auto const &mfyfab = mfy->array(mfi); + auto const &mfzfab = mfz->array(mfi); + + amrex::Array4 lx, ly, lz, Sx, Sy, Sz; + if (m_eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + Sx = face_areas[0]->array(mfi); + Sy = face_areas[1]->array(mfi); + Sz = face_areas[2]->array(mfi); + } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Dim3 lx_lo = amrex::lbound(lx); - const amrex::Dim3 lx_hi = amrex::ubound(lx); - const amrex::Dim3 lz_lo = amrex::lbound(lz); - const amrex::Dim3 lz_hi = amrex::ubound(lz); + amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; #endif - + if (m_eb_enabled) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(ly, Sx, Sz); -#elif defined(WARPX_DIM_1D_Z) - amrex::ignore_unused(lx, ly, lz, Sx, Sy, Sz); -#endif - -#else - amrex::ignore_unused(edge_lengths, face_areas, field); + lx_lo = amrex::lbound(lx); + lx_hi = amrex::ubound(lx); + lz_lo = amrex::lbound(lz); + lz_hi = amrex::ubound(lz); #endif + } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0)) return; + if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0)) return; + if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0))) { return; } #endif #endif // Shift required in the x-, y-, or z- position @@ -1129,14 +1131,15 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0)) return; + if(ly && ((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + if(lx && + ((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (field=='B' and Sy(i,j,k)<=0)) return; + (field=='B' and Sy(i,j,k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1164,10 +1167,10 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0)) return; + if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0)) return; + if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1268,9 +1271,7 @@ void WarpX::InitializeEBGridData (int lev) if (lev == maxLevel()) { // Throw a warning if EB is on and particle_shape > 1 - bool flag_eb_on = not fieldEBFactory(lev).isAllRegular(); - - if ((nox > 1 or noy > 1 or noz > 1) and flag_eb_on) + if ((nox > 1 or noy > 1 or noz > 1) and m_eb_enabled) { ablastr::warn_manager::WMRecordWarning("Particles", "when algo.particle_shape > 1, numerical artifacts will be present when\n" diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index d33bd17ccdd..112db68f488 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -215,20 +215,20 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->current_fp_ampere[lev][idim], false); RemakeMultiFab(m_hybrid_pic_model->current_fp_external[lev][idim],true); } -#ifdef AMREX_USE_EB - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - RemakeMultiFab(m_edge_lengths[lev][idim], false); - RemakeMultiFab(m_face_areas[lev][idim], false); - if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT){ - RemakeMultiFab(Venl[lev][idim], false); - RemakeMultiFab(m_flag_info_face[lev][idim], false); - RemakeMultiFab(m_flag_ext_face[lev][idim], false); - RemakeMultiFab(m_area_mod[lev][idim], false); - RemakeMultiFab(ECTRhofield[lev][idim], false); - m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); + if (m_eb_enabled) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + RemakeMultiFab(m_edge_lengths[lev][idim], false); + RemakeMultiFab(m_face_areas[lev][idim], false); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + RemakeMultiFab(Venl[lev][idim], false); + RemakeMultiFab(m_flag_info_face[lev][idim], false); + RemakeMultiFab(m_flag_ext_face[lev][idim], false); + RemakeMultiFab(m_area_mod[lev][idim], false); + RemakeMultiFab(ECTRhofield[lev][idim], false); + m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); + } } } -#endif } RemakeMultiFab(F_fp[lev], true); @@ -242,18 +242,19 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->electron_pressure_fp[lev], false); } -#ifdef AMREX_USE_EB - RemakeMultiFab(m_distance_to_eb[lev], false); - - int max_guard = guard_cells.ng_FieldSolver.max(); - m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, - {max_guard, max_guard, max_guard}, - amrex::EBSupport::full); + if (m_eb_enabled) { + RemakeMultiFab(m_distance_to_eb[lev], false); - InitializeEBGridData(lev); -#else - m_field_factory[lev] = std::make_unique(); +#ifdef AMREX_USE_EB + int const max_guard = guard_cells.ng_FieldSolver.max(); + m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, + {max_guard, max_guard, max_guard}, + amrex::EBSupport::full); #endif + InitializeEBGridData(lev); + } else { + m_field_factory[lev] = std::make_unique(); + } #ifdef WARPX_USE_FFT if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index fc496217388..cc3cd89cd7d 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -39,10 +39,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" #include "Utils/WarpXUtil.H" -#ifdef AMREX_USE_EB -# include "EmbeddedBoundary/ParticleScraper.H" -# include "EmbeddedBoundary/ParticleBoundaryProcess.H" -#endif +#include "EmbeddedBoundary/ParticleScraper.H" +#include "EmbeddedBoundary/ParticleBoundaryProcess.H" #include "WarpX.H" @@ -958,13 +956,9 @@ void MultiParticleContainer::CheckIonizationProductSpecies() void MultiParticleContainer::ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb) { -#ifdef AMREX_USE_EB for (auto& pc : allcontainers) { scrapeParticlesAtEB(*pc, distance_to_eb, ParticleBoundaryProcess::Absorb()); } -#else - amrex::ignore_unused(distance_to_eb); -#endif } #ifdef WARPX_QED diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index 7b2ebce92b9..bc113e8e3a3 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/DistanceToEB.H" #include "Particles/ParticleBoundaryBuffer.H" #include "Particles/MultiParticleContainer.H" @@ -23,8 +24,10 @@ #include #include #include + using namespace amrex::literals; + struct IsOutsideDomainBoundary { amrex::GpuArray m_plo; amrex::GpuArray m_phi; @@ -46,7 +49,6 @@ struct IsOutsideDomainBoundary { } }; -#ifdef AMREX_USE_EB struct FindEmbeddedBoundaryIntersection { const int m_step_index; const int m_delta_index; @@ -86,13 +88,13 @@ struct FindEmbeddedBoundaryIntersection { amrex::ParticleReal const uz = dst.m_rdata[PIdx::uz][dst_i]; // Temporary variables to avoid implicit capture - amrex::Real dt = m_dt; - amrex::Array4 phiarr = m_phiarr; - amrex::GpuArray dxi = m_dxi; - amrex::GpuArray plo = m_plo; + amrex::Real const dt = m_dt; + amrex::Array4 const phiarr = m_phiarr; + amrex::GpuArray const dxi = m_dxi; + amrex::GpuArray const plo = m_plo; // Bisection algorithm to find the point where phi(x,y,z)=0 (i.e. on the embedded boundary) - amrex::Real dt_fraction = amrex::bisect( 0.0, 1.0, + amrex::Real const dt_fraction = amrex::bisect( 0.0, 1.0, [=] (amrex::Real dt_frac) { int i, j, k; amrex::Real W[AMREX_SPACEDIM][2]; @@ -100,7 +102,7 @@ struct FindEmbeddedBoundaryIntersection { UpdatePosition(x_temp, y_temp, z_temp, ux, uy, uz, -dt_frac*dt); ablastr::particles::compute_weights( x_temp, y_temp, z_temp, plo, dxi, i, j, k, W); - amrex::Real phi_value = ablastr::particles::interp_field_nodal(i, j, k, W, phiarr); + amrex::Real const phi_value = ablastr::particles::interp_field_nodal(i, j, k, W, phiarr); return phi_value; } ); @@ -146,7 +148,7 @@ struct FindEmbeddedBoundaryIntersection { dst.m_rdata[PIdx::z][dst_i] = z_temp; dst.m_rdata[PIdx::theta][dst_i] = std::atan2(y_temp, x_temp); //save normal components - amrex::Real theta=std::atan2(y_temp, x_temp); + amrex::Real const theta = std::atan2(y_temp, x_temp); dst.m_runtime_rdata[m_normal_index][dst_i] = normal[0]*std::cos(theta); dst.m_runtime_rdata[m_normal_index+1][dst_i] = normal[0]*std::sin(theta); dst.m_runtime_rdata[m_normal_index+2][dst_i] = normal[1]; @@ -165,7 +167,6 @@ struct FindEmbeddedBoundaryIntersection { amrex::ParticleIDWrapper{dst.m_idcpu[dst_i]}.make_valid(); } }; -#endif struct CopyAndTimestamp { int m_step_index; @@ -239,6 +240,8 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () constexpr auto idx_zhi = 5; #endif + bool const eb_enabled = EB::enabled(); + for (int ispecies = 0; ispecies < numSpecies(); ++ispecies) { const amrex::ParmParse pp_species(getSpeciesNames()[ispecies]); @@ -258,9 +261,9 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[idx_zlo][ispecies]); pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[idx_zhi][ispecies]); #endif -#ifdef AMREX_USE_EB - pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); -#endif + + if (eb_enabled) { pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); } + // Set the flag whether the boundary is active or any species for (int i = 0; i < numBoundaries(); ++i) { if (m_do_boundary_buffer[i][ispecies]) { m_do_any_boundary[i] = 1; } @@ -283,10 +286,7 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () m_boundary_names[idx_zlo] = "zlo"; m_boundary_names[idx_zhi] = "zhi"; #endif -#ifdef AMREX_USE_EB - m_boundary_names[AMREX_SPACEDIM*2] = "eb"; -#endif - + if (eb_enabled) { m_boundary_names[AMREX_SPACEDIM*2] = "eb"; } } void ParticleBoundaryBuffer::printNumParticles () const { @@ -306,17 +306,17 @@ void ParticleBoundaryBuffer::printNumParticles () const { } } } -#ifdef AMREX_USE_EB - auto& buffer = m_particle_containers[2*AMREX_SPACEDIM]; - for (int i = 0; i < numSpecies(); ++i) - { - const auto np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; - amrex::Print() << Utils::TextMsg::Info( - "Species " + getSpeciesNames()[i] + " has " - + std::to_string(np) + " particles in the EB boundary buffer" - ); + + if (EB::enabled()) { + auto const & buffer = m_particle_containers[2 * AMREX_SPACEDIM]; + for (int i = 0; i < numSpecies(); ++i) { + const auto np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; + amrex::Print() << Utils::TextMsg::Info( + "Species " + getSpeciesNames()[i] + " has " + + std::to_string(np) + " particles in the EB boundary buffer" + ); + } } -#endif } void ParticleBoundaryBuffer::redistribute () { @@ -464,107 +464,108 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb) { -#ifdef AMREX_USE_EB - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); + if (EB::enabled()) { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); - using PIter = amrex::ParConstIterSoA; - const auto& warpx_instance = WarpX::GetInstance(); - const amrex::Geometry& geom = warpx_instance.Geom(0); - auto plo = geom.ProbLoArray(); - auto& buffer = m_particle_containers[m_particle_containers.size()-1]; - for (int i = 0; i < numSpecies(); ++i) - { - if (!m_do_boundary_buffer[AMREX_SPACEDIM*2][i]) continue; - const auto& pc = mypc.GetParticleContainer(i); - if (!buffer[i].isDefined()) + using PIter = amrex::ParConstIterSoA; + const auto &warpx_instance = WarpX::GetInstance(); + const amrex::Geometry &geom = warpx_instance.Geom(0); + auto plo = geom.ProbLoArray(); + + auto& buffer = m_particle_containers[m_particle_containers.size()-1]; + for (int i = 0; i < numSpecies(); ++i) { - buffer[i] = pc.make_alike(); - buffer[i].AddIntComp("stepScraped", false); - buffer[i].AddRealComp("deltaTimeScraped", false); - buffer[i].AddRealComp("nx", false); - buffer[i].AddRealComp("ny", false); - buffer[i].AddRealComp("nz", false); + if (!m_do_boundary_buffer[AMREX_SPACEDIM*2][i]) { continue; } + const auto& pc = mypc.GetParticleContainer(i); + if (!buffer[i].isDefined()) + { + buffer[i] = pc.make_alike(); + buffer[i].AddIntComp("stepScraped", false); + buffer[i].AddRealComp("deltaTimeScraped", false); + buffer[i].AddRealComp("nx", false); + buffer[i].AddRealComp("ny", false); + buffer[i].AddRealComp("nz", false); - } + } - auto& species_buffer = buffer[i]; - for (int lev = 0; lev < pc.numLevels(); ++lev){ - for(PIter pti(pc, lev); pti.isValid(); ++pti){ - species_buffer.DefineAndReturnParticleTile( - lev, pti.index(), pti.LocalTileIndex()); + auto& species_buffer = buffer[i]; + for (int lev = 0; lev < pc.numLevels(); ++lev) { + for (PIter pti(pc, lev); pti.isValid(); ++pti) { + species_buffer.DefineAndReturnParticleTile( + lev, pti.index(), pti.LocalTileIndex()); + } } - } - for (int lev = 0; lev < pc.numLevels(); ++lev) - { - const auto& plevel = pc.GetParticles(lev); - auto dxi = warpx_instance.Geom(lev).InvCellSizeArray(); + for (int lev = 0; lev < pc.numLevels(); ++lev) + { + const auto& plevel = pc.GetParticles(lev); + auto dxi = warpx_instance.Geom(lev).InvCellSizeArray(); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for(PIter pti(pc, lev); pti.isValid(); ++pti) - { - auto phiarr = (*distance_to_eb[lev])[pti].array(); // signed distance function - auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); - if(plevel.find(index) == plevel.end()) continue; - - const auto getPosition = GetParticlePosition(pti); - auto& ptile_buffer = species_buffer.DefineAndReturnParticleTile(lev, pti.index(), - pti.LocalTileIndex()); - const auto& ptile = plevel.at(index); - auto np = ptile.numParticles(); - if (np == 0) { continue; } - - using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; - auto predicate = [=] AMREX_GPU_HOST_DEVICE (const SrcData& /*src*/, const int ip) - /* NVCC 11.3.109 chokes in C++17 on this: noexcept */ - { - amrex::ParticleReal xp, yp, zp; - getPosition(ip, xp, yp, zp); - - amrex::Real phi_value = ablastr::particles::doGatherScalarFieldNodal( - xp, yp, zp, phiarr, dxi, plo - ); - return phi_value < 0.0 ? 1 : 0; - }; - - const auto ptile_data = ptile.getConstParticleTileData(); - - amrex::ReduceOps reduce_op; - amrex::ReduceData reduce_data(reduce_op); - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::count_out_of_boundsEB"); - reduce_op.eval(np, reduce_data, [=] AMREX_GPU_HOST_DEVICE (int ip) - { return predicate(ptile_data, ip) ? 1 : 0; }); - } + for (PIter pti(pc, lev); pti.isValid(); ++pti) { + auto phiarr = (*distance_to_eb[lev])[pti].array(); // signed distance function + auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); + if (plevel.find(index) == plevel.end()) { continue; } + + const auto getPosition = GetParticlePosition(pti); + auto &ptile_buffer = species_buffer.DefineAndReturnParticleTile(lev, pti.index(), + pti.LocalTileIndex()); + const auto &ptile = plevel.at(index); + auto np = ptile.numParticles(); + if (np == 0) { continue; } + + using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; + auto predicate = [=] AMREX_GPU_HOST_DEVICE(const SrcData & /*src*/, const int ip) + /* NVCC 11.3.109 chokes in C++17 on this: noexcept */ + { + amrex::ParticleReal xp, yp, zp; + getPosition(ip, xp, yp, zp); - auto dst_index = ptile_buffer.numParticles(); - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::resize_eb"); - ptile_buffer.resize(dst_index + amrex::get<0>(reduce_data.value())); - } - auto& warpx = WarpX::GetInstance(); - const auto dt = warpx.getdt(pti.GetLevel()); - auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); - const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); - auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); - const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); - const int normal_index = string_to_index_realcomp.at("nx"); - const int step = warpx_instance.getistep(0); + amrex::Real const phi_value = ablastr::particles::doGatherScalarFieldNodal( + xp, yp, zp, phiarr, dxi, plo + ); + return phi_value < 0.0 ? 1 : 0; + }; - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::filterTransformEB"); - amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, - FindEmbeddedBoundaryIntersection{step_scraped_index,delta_index, normal_index, step, dt, phiarr, dxi, plo}, 0, dst_index); + const auto ptile_data = ptile.getConstParticleTileData(); + amrex::ReduceOps reduce_op; + amrex::ReduceData reduce_data(reduce_op); + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::count_out_of_boundsEB"); + reduce_op.eval(np, reduce_data, + [=] AMREX_GPU_HOST_DEVICE(int ip) { return predicate(ptile_data, ip) ? 1 : 0; }); + } + + auto dst_index = ptile_buffer.numParticles(); + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::resize_eb"); + ptile_buffer.resize(dst_index + amrex::get<0>(reduce_data.value())); + } + auto &warpx = WarpX::GetInstance(); + const auto dt = warpx.getdt(pti.GetLevel()); + auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); + const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); + auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); + const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); + const int normal_index = string_to_index_realcomp.at("nx"); + const int step = warpx_instance.getistep(0); + + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::filterTransformEB"); + amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, + FindEmbeddedBoundaryIntersection{step_scraped_index, + delta_index, normal_index, + step, dt, phiarr, dxi, plo}, + 0, dst_index); + + } } } } } -#else - amrex::ignore_unused(mypc, distance_to_eb); -#endif } int ParticleBoundaryBuffer::getNumParticlesInContainer( diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 3a30467c20d..1ad43755464 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -39,6 +39,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB # include "EmbeddedBoundary/ParticleBoundaryProcess.H" # include "EmbeddedBoundary/ParticleScraper.H" @@ -1483,8 +1484,11 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + if (EB::enabled()) + { + auto &distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB(*this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + } #endif // The function that calls this is responsible for redistributing particles. @@ -1980,8 +1984,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + if (EB::enabled()) + { + auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + } #endif // Redistribute the new particles that were added to the temporary container. diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 3fd70ee5795..05f8dd609f0 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -13,6 +13,7 @@ #include "Deposition/ChargeDeposition.H" #include "Deposition/CurrentDeposition.H" #include "Deposition/SharedDepositionUtils.H" +#include "EmbeddedBoundary/Enabled.H" #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" #include "ParticleBoundaries_K.H" @@ -300,9 +301,11 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); - deleteInvalidParticles(); + if (EB::enabled()) { + auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + deleteInvalidParticles(); + } #endif } diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index 9e2360315b8..edcf5991c71 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -154,7 +154,7 @@ const std::map ReductionType_algo_to_int = { }; int -GetAlgorithmInteger(const amrex::ParmParse& pp, const char* pp_search_key ) +GetAlgorithmInteger (const amrex::ParmParse& pp, const char* pp_search_key ) { // Read user input ; use "default" if it is not found std::string algo = "default"; diff --git a/Source/WarpX.H b/Source/WarpX.H index 4573808461e..903e97549dd 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -997,6 +997,8 @@ public: */ [[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;} + /** Enable embedded boundaries */ + bool m_eb_enabled = false; bool m_boundary_potential_specified = false; ElectrostaticSolver::PoissonBoundaryHandler m_poisson_boundary_handler; void ComputeSpaceChargeField (bool reset_fields); @@ -1063,7 +1065,7 @@ public: amrex::ParserExecutor<3> const& zfield_parser, std::array< std::unique_ptr, 3 > const& edge_lengths, std::array< std::unique_ptr, 3 > const& face_areas, - char field, + [[maybe_unused]] char field, int lev, PatchType patch_type); /** @@ -1130,7 +1132,7 @@ public: void BuildBufferMasksInBox ( amrex::Box tbx, amrex::IArrayBox &buffer_mask, const amrex::IArrayBox &guard_mask, int ng ); #ifdef AMREX_USE_EB - amrex::EBFArrayBoxFactory const& fieldEBFactory (int lev) const noexcept { + [[nodiscard]] amrex::EBFArrayBoxFactory const& fieldEBFactory (int lev) const noexcept { return static_cast(*m_field_factory[lev]); } #endif diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 02f85422d12..272c7aa0aff 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -84,6 +84,7 @@ #include #include #include +#include #include #include @@ -259,7 +260,7 @@ WarpX::WarpX () BackwardCompatibility(); - InitEB(); + if (m_eb_enabled) { InitEB(); } ablastr::utils::SignalHandling::InitSignalHandling(); @@ -540,10 +541,14 @@ WarpX::ReadParameters () { const ParmParse pp_algo("algo"); electromagnetic_solver_id = static_cast(GetAlgorithmInteger(pp_algo, "maxwell_solver")); + + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT && !EB::enabled()) { + throw std::runtime_error("ECP Solver requires to enable embedded boundaries at runtime."); + } } { - const ParmParse pp_warpx("warpx"); + ParmParse const pp_warpx("warpx"); //"Synthetic" warning messages may be injected in the Warning Manager via // inputfile for debug&testing purposes. @@ -783,6 +788,14 @@ WarpX::ReadParameters () "The FFT Poisson solver is not implemented in labframe-electromagnetostatic mode yet." ); + m_eb_enabled = EB::enabled(); +#if !defined(AMREX_USE_EB) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !m_eb_enabled, + "Embedded boundaries are requested via warpx.eb_enabled but were not compiled!" + ); +#endif + // Parse the input file for domain boundary potentials const ParmParse pp_boundary("boundary"); bool potential_specified = false; @@ -793,16 +806,9 @@ WarpX::ReadParameters () potential_specified |= pp_boundary.query("potential_hi_y", m_poisson_boundary_handler.potential_yhi_str); potential_specified |= pp_boundary.query("potential_lo_z", m_poisson_boundary_handler.potential_zlo_str); potential_specified |= pp_boundary.query("potential_hi_z", m_poisson_boundary_handler.potential_zhi_str); -#if defined(AMREX_USE_EB) - potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); - - if (!EB::enabled()) { - throw std::runtime_error( - "Currently, users MUST use EB if it was compiled in. " - "This will change with https://github.com/ECP-WarpX/WarpX/pull/4865 ." - ); + if (m_eb_enabled) { + potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); } -#endif m_boundary_potential_specified = potential_specified; if (potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { ablastr::warn_manager::WMRecordWarning( @@ -2203,14 +2209,17 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d bilinear_filter.stencil_length_each_dir); + + if (m_eb_enabled) { #ifdef AMREX_USE_EB - int max_guard = guard_cells.ng_FieldSolver.max(); - m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, - {max_guard, max_guard, max_guard}, - amrex::EBSupport::full); -#else - m_field_factory[lev] = std::make_unique(); + int const max_guard = guard_cells.ng_FieldSolver.max(); + m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, + {max_guard, max_guard, max_guard}, + amrex::EBSupport::full); #endif + } else { + m_field_factory[lev] = std::make_unique(); + } if (mypc->nSpeciesDepositOnMainGrid() && n_current_deposition_buffer == 0) { @@ -2429,51 +2438,81 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[z]", 0.0_rt); } -#ifdef AMREX_USE_EB - constexpr int nc_ls = 1; - amrex::IntVect ng_ls(2); - AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, "m_distance_to_eb"); - - // EB info are needed only at the finest level - if (lev == maxLevel()) - { - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); - } - if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); - AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); - AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[y]"); - AllocInitMultiFab(m_flag_info_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[z]"); - AllocInitMultiFab(m_flag_ext_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[x]"); - AllocInitMultiFab(m_flag_ext_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); - AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); - AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); - AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); - AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); - m_borrowing[lev][0] = std::make_unique>(amrex::convert(ba, Bx_nodal_flag), dm); - m_borrowing[lev][1] = std::make_unique>(amrex::convert(ba, By_nodal_flag), dm); - m_borrowing[lev][2] = std::make_unique>(amrex::convert(ba, Bz_nodal_flag), dm); - AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[x]"); - AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[y]"); - AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[z]"); - - AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + if (m_eb_enabled) { + constexpr int nc_ls = 1; + amrex::IntVect const ng_ls(2); + AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, + "m_distance_to_eb"); + + // EB info are needed only at the finest level + if (lev == maxLevel()) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + } + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); + AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[y]"); + AllocInitMultiFab(m_flag_info_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[z]"); + AllocInitMultiFab(m_flag_ext_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[x]"); + AllocInitMultiFab(m_flag_ext_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); + AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); + AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); + AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); + AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); + m_borrowing[lev][0] = std::make_unique>( + amrex::convert(ba, Bx_nodal_flag), dm); + m_borrowing[lev][1] = std::make_unique>( + amrex::convert(ba, By_nodal_flag), dm); + m_borrowing[lev][2] = std::make_unique>( + amrex::convert(ba, Bz_nodal_flag), dm); + AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[x]"); + AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[y]"); + AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[z]"); + + AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + } } } -#endif int rho_ncomps = 0; if( (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) || diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 26a4c72208d..fbd8e5f14be 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -46,9 +46,7 @@ #include #include #include -#if defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ) -# include -#endif +#include #ifdef AMREX_USE_EB # include #endif @@ -85,6 +83,7 @@ namespace ablastr::fields { * \param[in] grid_type Integer that corresponds to the type of grid used in the simulation (collocated, staggered, hybrid) * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels + * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) @@ -100,17 +99,18 @@ void computePhi (amrex::Vector const & rho, amrex::Vector & phi, std::array const beta, - amrex::Real const relative_tolerance, + amrex::Real relative_tolerance, amrex::Real absolute_tolerance, - int const max_iters, - int const verbosity, + int max_iters, + int verbosity, amrex::Vector const& geom, amrex::Vector const& dmap, amrex::Vector const& grids, utils::enums::GridType grid_type, T_BoundaryHandler const boundary_handler, bool is_solver_igf_on_lev0, - bool const do_single_precision_comms = false, + bool eb_enabled = false, + bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB @@ -127,6 +127,11 @@ computePhi (amrex::Vector const & rho, rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + auto const finest_level = static_cast(rho.size() - 1); // determine if rho is zero everywhere @@ -146,21 +151,17 @@ computePhi (amrex::Vector const & rho, ); } -#if !(defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ)) amrex::LPInfo info; -#else - const amrex::LPInfo info; -#endif for (int lev=0; lev<=finest_level; lev++) { // Set the value of beta - amrex::Array beta_solver = + amrex::Array beta_solver = #if defined(WARPX_DIM_1D_Z) - {{ beta[2] }}; // beta_x and beta_z + {{ beta[2] }}; // beta_x and beta_z #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - {{ beta[0], beta[2] }}; // beta_x and beta_z + {{ beta[0], beta[2] }}; // beta_x and beta_z #else - {{ beta[0], beta[1], beta[2] }}; + {{ beta[0], beta[1], beta[2] }}; #endif #if !defined(ABLASTR_USE_FFT) @@ -173,13 +174,13 @@ computePhi (amrex::Vector const & rho, "The FFT Poisson solver is currently only implemented for 3D!"); #endif -#if (defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D)) +#if (defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D)) // Use the Integrated Green Function solver (FFT) on the coarsest level if it was selected if(is_solver_igf_on_lev0 && lev==0){ amrex::Array const dx_igf {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), - geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), - geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; + geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), + geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; if ( max_norm_b == 0 ) { phi[lev]->setVal(0); } else { @@ -192,77 +193,106 @@ computePhi (amrex::Vector const & rho, // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately using namespace ablastr::constant::SI; - rho[lev]->mult(-1._rt/ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! - -#if !(defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ)) - // Determine whether to use semi-coarsening - amrex::Array dx_scaled - {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), - geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), - geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; - int max_semicoarsening_level = 0; - int semicoarsening_direction = -1; - const auto min_dir = static_cast(std::distance(dx_scaled.begin(), - std::min_element(dx_scaled.begin(),dx_scaled.end()))); - const auto max_dir = static_cast(std::distance(dx_scaled.begin(), - std::max_element(dx_scaled.begin(),dx_scaled.end()))); - if (dx_scaled[max_dir] > dx_scaled[min_dir]) { - semicoarsening_direction = max_dir; - max_semicoarsening_level = static_cast - (std::log2(dx_scaled[max_dir]/dx_scaled[min_dir])); - } - if (max_semicoarsening_level > 0) { - info.setSemicoarsening(true); - info.setMaxSemicoarseningLevel(max_semicoarsening_level); - info.setSemicoarseningDirection(semicoarsening_direction); - } + rho[lev]->mult(-1._rt / ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + +#ifdef WARPX_DIM_RZ + constexpr bool is_rz = true; +#else + constexpr bool is_rz = false; #endif -#if defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ) - // In the presence of EB or RZ: the solver assumes that the beam is - // propagating along one of the axes of the grid, i.e. that only *one* - // of the components of `beta` is non-negligible. - amrex::MLEBNodeFDLaplacian linop( {geom[lev]}, {grids[lev]}, {dmap[lev]}, info + if (!eb_enabled && !is_rz) { + // Determine whether to use semi-coarsening + amrex::Array dx_scaled + {AMREX_D_DECL(geom[lev].CellSize(0) / std::sqrt(1._rt - beta_solver[0] * beta_solver[0]), + geom[lev].CellSize(1) / std::sqrt(1._rt - beta_solver[1] * beta_solver[1]), + geom[lev].CellSize(2) / std::sqrt(1._rt - beta_solver[2] * beta_solver[2]))}; + int max_semicoarsening_level = 0; + int semicoarsening_direction = -1; + const auto min_dir = static_cast(std::distance(dx_scaled.begin(), + std::min_element(dx_scaled.begin(), dx_scaled.end()))); + const auto max_dir = static_cast(std::distance(dx_scaled.begin(), + std::max_element(dx_scaled.begin(), dx_scaled.end()))); + if (dx_scaled[max_dir] > dx_scaled[min_dir]) { + semicoarsening_direction = max_dir; + max_semicoarsening_level = static_cast(std::log2(dx_scaled[max_dir] / dx_scaled[min_dir])); + } + if (max_semicoarsening_level > 0) { + info.setSemicoarsening(true); + info.setMaxSemicoarseningLevel(max_semicoarsening_level); + info.setSemicoarseningDirection(semicoarsening_direction); + } + } + + std::unique_ptr linop; + if (eb_enabled || is_rz) { + // In the presence of EB or RZ: the solver assumes that the beam is + // propagating along one of the axes of the grid, i.e. that only *one* + // of the components of `beta` is non-negligible. + auto linop_nodelap = std::make_unique(); + if (eb_enabled) { #if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); #endif - ); + } + else { + // TODO: rather use MLNodeTensorLaplacian (for RZ w/o EB) here? Semi-Coarsening would be nice here + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + } - // Note: this assumes that the beam is propagating along - // one of the axes of the grid, i.e. that only *one* of the - // components of `beta` is non-negligible. // we use this + // Note: this assumes that the beam is propagating along + // one of the axes of the grid, i.e. that only *one* of the + // components of `beta` is non-negligible. // we use this #if defined(WARPX_DIM_RZ) - linop.setSigma({0._rt, 1._rt-beta_solver[1]*beta_solver[1]}); + linop_nodelap->setRZ(true); + linop_nodelap->setSigma({0._rt, 1._rt-beta_solver[1]*beta_solver[1]}); #else - linop.setSigma({AMREX_D_DECL( - 1._rt-beta_solver[0]*beta_solver[0], - 1._rt-beta_solver[1]*beta_solver[1], - 1._rt-beta_solver[2]*beta_solver[2])}); + linop_nodelap->setSigma({AMREX_D_DECL( + 1._rt-beta_solver[0]*beta_solver[0], + 1._rt-beta_solver[1]*beta_solver[1], + 1._rt-beta_solver[2]*beta_solver[2])}); #endif #if defined(AMREX_USE_EB) - // if the EB potential only depends on time, the potential can be passed - // as a float instead of a callable - if (boundary_handler.phi_EB_only_t) { - linop.setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); - } - else - linop.setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); -#endif -#else - // In the absence of EB and RZ: use a more generic solver - // that can handle beams propagating in any direction - amrex::MLNodeTensorLaplacian linop( {geom[lev]}, {grids[lev]}, - {dmap[lev]}, info ); - linop.setBeta( beta_solver ); // for the non-axis-aligned solver + if (eb_enabled) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } #endif + linop = std::move(linop_nodelap); + } else { + // In the absence of EB and RZ: use a more generic solver + // that can handle beams propagating in any direction + auto linop_tenslap = std::make_unique( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_tenslap->setBeta(beta_solver); // for the non-axis-aligned solver + linop = std::move(linop_tenslap); + } // Solve the Poisson equation - linop.setDomainBC( boundary_handler.lobc, boundary_handler.hibc ); -#ifdef WARPX_DIM_RZ - linop.setRZ(true); -#endif - amrex::MLMG mlmg(linop); // actual solver defined here + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + + amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); mlmg.setAlwaysUseBNorm(always_use_bnorm); @@ -287,7 +317,7 @@ computePhi (amrex::Vector const & rho, amrex::BoxArray ba = phi[lev+1]->boxArray(); const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; ba.coarsen(refratio); - const int ncomp = linop.getNComp(); + const int ncomp = linop->getNComp(); const int ng = (grid_type == utils::enums::GridType::Collocated) ? 1 : 0; amrex::MultiFab phi_cp(ba, phi[lev+1]->DistributionMap(), ncomp, ng); if (ng > 0) { diff --git a/Source/ablastr/fields/VectorPoissonSolver.H b/Source/ablastr/fields/VectorPoissonSolver.H index 3ef96c30c84..f6dd2a99cf1 100644 --- a/Source/ablastr/fields/VectorPoissonSolver.H +++ b/Source/ablastr/fields/VectorPoissonSolver.H @@ -71,6 +71,7 @@ namespace ablastr::fields { * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] boundary_handler a handler for boundary conditions, for example @see MagnetostaticSolver::VectorPoissonBoundaryHandler + * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_A_calculation perform a calculation per level directly after A was calculated; required for embedded boundaries (default: none) @@ -85,15 +86,16 @@ template< void computeVectorPotential ( amrex::Vector > const & curr, amrex::Vector > & A, - amrex::Real const relative_tolerance, + amrex::Real relative_tolerance, amrex::Real absolute_tolerance, - int const max_iters, - int const verbosity, + int max_iters, + int verbosity, amrex::Vector const& geom, amrex::Vector const& dmap, amrex::Vector const& grids, T_BoundaryHandler const boundary_handler, - bool const do_single_precision_comms = false, + bool eb_enabled = false, + bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostACalculationFunctor post_A_calculation = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB @@ -108,6 +110,11 @@ computeVectorPotential ( amrex::Vector > co rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + auto const finest_level = static_cast(curr.size()) - 1; // scale J appropriately; also determine if current is zero everywhere @@ -134,24 +141,18 @@ computeVectorPotential ( amrex::Vector > co // Loop over dimensions of A to solve each component individually for (int lev=0; lev<=finest_level; lev++) { - amrex::MLEBNodeFDLaplacian linopx( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} -#endif - ); - amrex::MLEBNodeFDLaplacian linopy( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} -#endif - ); - amrex::MLEBNodeFDLaplacian linopz( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} + amrex::MLEBNodeFDLaplacian linopx, linopy, linopz; + if (eb_enabled) { +#ifdef AMREX_USE_EB + linopx.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); + linopy.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); + linopz.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); #endif - ); + } else { + linopx.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + linopy.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + linopz.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + } amrex::Array linop = {&linopx,&linopy,&linopz}; amrex::Array,3> mlmg; @@ -163,9 +164,9 @@ computeVectorPotential ( amrex::Vector > co // Note: this assumes that beta is zero linop[adim]->setSigma({AMREX_D_DECL(1._rt, 1._rt, 1._rt)}); -#if defined(AMREX_USE_EB) // Set Homogeneous Dirichlet Boundary on EB - linop[adim]->setEBDirichlet(0_rt); +#if defined(AMREX_USE_EB) + if (eb_enabled) { linop[adim]->setEBDirichlet(0_rt); } #endif #ifdef WARPX_DIM_RZ diff --git a/setup.py b/setup.py index 713fb788319..efc18d900cf 100644 --- a/setup.py +++ b/setup.py @@ -195,7 +195,7 @@ def build_extension(self, ext): # consistent across platforms (especially Windows) WARPX_COMPUTE = env.pop("WARPX_COMPUTE", "OMP") WARPX_MPI = env.pop("WARPX_MPI", "OFF") -WARPX_EB = env.pop("WARPX_EB", "OFF") +WARPX_EB = env.pop("WARPX_EB", "ON") WARPX_OPENPMD = env.pop("WARPX_OPENPMD", "ON") WARPX_PRECISION = env.pop("WARPX_PRECISION", "DOUBLE") WARPX_PARTICLE_PRECISION = env.pop("WARPX_PARTICLE_PRECISION", WARPX_PRECISION) From d254be696977c1bcfa5740740f52d05bb151d934 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:04:07 -0700 Subject: [PATCH 20/91] Minor clean-up in `WarpX::ComputeDivB` (#5225) Signed-off-by: roelof-groenewald --- Source/WarpX.cpp | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 272c7aa0aff..c55eedb87a5 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -3054,36 +3054,7 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, const std::array& B, const std::array& dx) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(grid_type != GridType::Collocated, - "ComputeDivB not implemented with warpx.grid_type=Collocated."); - - const Real dxinv = 1._rt/dx[0], dyinv = 1._rt/dx[1], dzinv = 1._rt/dx[2]; - -#ifdef WARPX_DIM_RZ - const Real rmin = GetInstance().Geom(0).ProbLo(0); -#endif - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for (MFIter mfi(divB, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const Box& bx = mfi.tilebox(); - amrex::Array4 const& Bxfab = B[0]->array(mfi); - amrex::Array4 const& Byfab = B[1]->array(mfi); - amrex::Array4 const& Bzfab = B[2]->array(mfi); - amrex::Array4 const& divBfab = divB.array(mfi); - - ParallelFor(bx, - [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept - { - warpx_computedivb(i, j, k, dcomp, divBfab, Bxfab, Byfab, Bzfab, dxinv, dyinv, dzinv -#ifdef WARPX_DIM_RZ - ,rmin -#endif - ); - }); - } + ComputeDivB(divB, dcomp, B, dx, IntVect::TheZeroVector()); } void From 50e1326e10cdd50790d53c0da026227d8f315f72 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:29:08 -0700 Subject: [PATCH 21/91] Hybrid-PIC: Only calculate grad Pe when longitudinal part of E matters (#5224) * rename `include_resistivity_term` to `solve_for_Faraday` * only include grad Pe term when the longitudinal part of E matters Signed-off-by: roelof-groenewald * reset 2d benchmarks --------- Signed-off-by: roelof-groenewald --- ...st_2d_ohm_solver_landau_damping_picmi.json | 20 +++--- ...hm_solver_magnetic_reconnection_picmi.json | 20 +++--- .../FiniteDifferenceSolver.H | 8 +-- .../HybridPICModel/HybridPICModel.H | 15 ++-- .../HybridPICModel/HybridPICModel.cpp | 33 ++++----- .../HybridPICSolveE.cpp | 68 +++++++++++-------- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 8 +-- 7 files changed, 84 insertions(+), 88 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json index e61206d19e3..b8328651b2e 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_landau_damping_picmi.json @@ -1,21 +1,21 @@ { "lev=0": { "Bx": 0.0, - "By": 7.079679609748623e-06, + "By": 0.0, "Bz": 0.0, - "Ex": 2726044.0536666242, + "Ex": 2726044.0531522455, "Ey": 0.0, - "Ez": 4060168.6414095857, - "jx": 177543428.8941278, - "jy": 187432087.03814715, - "jz": 594259755.4658135 + "Ez": 4060168.641739453, + "jx": 177543428.93227622, + "jy": 187432087.0391676, + "jz": 594259755.4796929 }, "ions": { - "particle_momentum_x": 9.141594694084731e-17, + "particle_momentum_x": 9.14159469411586e-17, "particle_momentum_y": 9.135546407258978e-17, - "particle_momentum_z": 9.137866220861254e-17, - "particle_position_x": 1197.3344862524336, - "particle_position_y": 153269.17690371818, + "particle_momentum_z": 9.137866220831626e-17, + "particle_position_x": 1197.3344862525519, + "particle_position_y": 153269.1769037183, "particle_weight": 8.032598963696067e+16 } } diff --git a/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json index 2fab9d1f95b..9a4e9056250 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_2d_ohm_solver_magnetic_reconnection_picmi.json @@ -1,18 +1,18 @@ { "lev=0": { - "Bx": 1524.8789585503644, + "Bx": 1524.8789586064377, "By": 639.8314135126764, - "Bz": 7.476021044429126, - "Ex": 56499974.120022975, - "Ey": 75064070.52054195, - "Ez": 309548487.2375785 + "Bz": 7.475443144751569, + "Ex": 56498215.028332025, + "Ey": 75045238.66546318, + "Ez": 309589044.22395706 }, "ions": { - "particle_momentum_x": 7.142955224072218e-15, - "particle_momentum_y": 7.138078059799475e-15, - "particle_momentum_z": 7.141134511766018e-15, - "particle_position_x": 11170689.202742986, - "particle_position_y": 5585328.083267269, + "particle_momentum_x": 7.142955285069654e-15, + "particle_momentum_y": 7.138077993721551e-15, + "particle_momentum_z": 7.141133937530391e-15, + "particle_position_x": 11170689.202379484, + "particle_position_y": 5585328.091626547, "particle_weight": 9.036667901693183e+18 } } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 00e87525a75..6a33c89c184 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -149,7 +149,7 @@ class FiniteDifferenceSolver * \param[in] edge_lengths length of edges along embedded boundaries * \param[in] lev level number for the calculation * \param[in] hybrid_model instance of the hybrid-PIC model - * \param[in] include_resistivity_term boolean flag for whether to include resistivity + * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ void HybridPICSolveE ( std::array< std::unique_ptr, 3>& Efield, std::array< std::unique_ptr, 3>& Jfield, @@ -160,7 +160,7 @@ class FiniteDifferenceSolver std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - bool include_resistivity_term ); + bool solve_for_Faraday ); /** * \brief Calculation of total current using Ampere's law (without @@ -245,7 +245,7 @@ class FiniteDifferenceSolver std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - bool include_resistivity_term ); + bool solve_for_Faraday ); template void CalculateCurrentAmpereCylindrical ( @@ -350,7 +350,7 @@ class FiniteDifferenceSolver std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - bool include_resistivity_term ); + bool solve_for_Faraday ); template void CalculateCurrentAmpereCartesian ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 15208727559..3a49d5fad4b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -91,7 +91,7 @@ public: amrex::Vector, 3>> const& Bfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - bool include_resistivity_term); + bool solve_for_Faraday); void HybridPICSolveE ( std::array< std::unique_ptr, 3>& Efield, @@ -99,7 +99,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, bool include_resistivity_term); + int lev, bool solve_for_Faraday); void HybridPICSolveE ( std::array< std::unique_ptr, 3>& Efield, @@ -107,7 +107,7 @@ public: std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, PatchType patch_type, bool include_resistivity_term); + int lev, PatchType patch_type, bool solve_for_Faraday); void BfieldEvolveRK ( amrex::Vector, 3>>& Bfield, @@ -138,12 +138,11 @@ public: /** * \brief - * Function to calculate the electron pressure at a given timestep type - * using the simulation charge density. Used in the Ohm's law solver - * (kinetic-fluid hybrid model). + * Function to calculate the electron pressure using the simulation charge + * density. Used in the Ohm's law solver (kinetic-fluid hybrid model). */ - void CalculateElectronPressure ( DtType a_dt_type); - void CalculateElectronPressure (int lev, DtType a_dt_type); + void CalculateElectronPressure (); + void CalculateElectronPressure (int lev); /** * \brief Fill the electron pressure multifab given the kinetic particle diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 652dbfe7cdd..70efc04e259 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -426,14 +426,14 @@ void HybridPICModel::HybridPICSolveE ( amrex::Vector, 3>> const& Bfield, amrex::Vector> const& rhofield, amrex::Vector, 3>> const& edge_lengths, - const bool include_resistivity_term) + const bool solve_for_Faraday) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { HybridPICSolveE( Efield[lev], Jfield[lev], Bfield[lev], rhofield[lev], - edge_lengths[lev], lev, include_resistivity_term + edge_lengths[lev], lev, solve_for_Faraday ); } } @@ -444,13 +444,13 @@ void HybridPICModel::HybridPICSolveE ( std::array< std::unique_ptr, 3> const& Bfield, std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, - const int lev, const bool include_resistivity_term) + const int lev, const bool solve_for_Faraday) { WARPX_PROFILE("WarpX::HybridPICSolveE()"); HybridPICSolveE( Efield, Jfield, Bfield, rhofield, edge_lengths, lev, - PatchType::fine, include_resistivity_term + PatchType::fine, solve_for_Faraday ); if (lev > 0) { @@ -466,7 +466,7 @@ void HybridPICModel::HybridPICSolveE ( std::unique_ptr const& rhofield, std::array< std::unique_ptr, 3> const& edge_lengths, const int lev, PatchType patch_type, - const bool include_resistivity_term) + const bool solve_for_Faraday) { auto& warpx = WarpX::GetInstance(); @@ -475,36 +475,29 @@ void HybridPICModel::HybridPICSolveE ( Efield, current_fp_ampere[lev], Jfield, current_fp_external[lev], Bfield, rhofield, electron_pressure_fp[lev], - edge_lengths, lev, this, include_resistivity_term + edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } -void HybridPICModel::CalculateElectronPressure(DtType a_dt_type) +void HybridPICModel::CalculateElectronPressure() { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculateElectronPressure(lev, a_dt_type); + CalculateElectronPressure(lev); } } -void HybridPICModel::CalculateElectronPressure(const int lev, DtType a_dt_type) +void HybridPICModel::CalculateElectronPressure(const int lev) { WARPX_PROFILE("WarpX::CalculateElectronPressure()"); auto& warpx = WarpX::GetInstance(); - // The full step uses rho^{n+1}, otherwise use the old or averaged - // charge density. - if (a_dt_type == DtType::Full) { - FillElectronPressureMF( - electron_pressure_fp[lev], warpx.getFieldPointer(FieldType::rho_fp, lev) - ); - } else { - FillElectronPressureMF( - electron_pressure_fp[lev], rho_fp_temp[lev].get() - ); - } + // Calculate the electron pressure using rho^{n+1}. + FillElectronPressureMF( + electron_pressure_fp[lev], warpx.getFieldPointer(FieldType::rho_fp, lev) + ); warpx.ApplyElectronPressureBoundary(lev, PatchType::fine); electron_pressure_fp[lev]->FillBoundary(warpx.Geom(lev).periodicity()); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 5da46f23e54..baeaf7a6c18 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -360,7 +360,7 @@ void FiniteDifferenceSolver::HybridPICSolveE ( std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - const bool include_resistivity_term) + const bool solve_for_Faraday) { // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) @@ -369,14 +369,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( HybridPICSolveECylindrical ( Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, include_resistivity_term + edge_lengths, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, include_resistivity_term + edge_lengths, lev, hybrid_model, solve_for_Faraday ); #endif @@ -398,7 +398,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - const bool include_resistivity_term ) + const bool solve_for_Faraday ) { // Both steps below do not currently support m > 0 and should be // modified if such support wants to be added @@ -417,7 +417,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const auto rho_floor = hybrid_model->m_n_floor * PhysConst::q_e; const auto resistivity_has_J_dependence = hybrid_model->m_resistivity_has_J_dependence; - const bool include_hyper_resistivity_term = (eta_h > 0.0) && include_resistivity_term; + const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -576,7 +576,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); const Real jt_val = Interp(Jt, Jt_stag, Er_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Er_stag, coarsen, i, j, 0, 0); @@ -586,8 +586,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // safety condition since we divide by rho_val later if (rho_val < rho_floor) { rho_val = rho_floor; } - // Get the gradient of the electron pressure - auto grad_Pe = T_Algo::UpwardDr(Pe, coefs_r, n_coefs_r, i, j, 0, 0); + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + Real grad_Pe = 0._rt; + if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDr(Pe, coefs_r, n_coefs_r, i, j, 0, 0); } // interpolate the nodal neE values to the Yee grid auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); @@ -595,7 +597,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Er(i, j, 0) = (enE_r - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } + if (solve_for_Faraday) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } if (include_hyper_resistivity_term) { // r on cell-centered point (Jr is cell-centered in r) @@ -624,7 +626,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jr_val = Interp(Jr, Jr_stag, Et_stag, coarsen, i, j, 0, 0); const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Et_stag, coarsen, i, j, 0, 0); @@ -644,7 +646,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Et(i, j, 0) = (enE_t - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } + if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } // Note: Hyper-resisitivity should be revisited here when modal decomposition is implemented }, @@ -659,7 +661,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jr_val = Interp(Jr, Jr_stag, Ez_stag, coarsen, i, j, 0, 0); const Real jt_val = Interp(Jt, Jt_stag, Ez_stag, coarsen, i, j, 0, 0); const Real jz_val = Interp(Jz, Jz_stag, Ez_stag, coarsen, i, j, 0, 0); @@ -669,8 +671,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // safety condition since we divide by rho_val later if (rho_val < rho_floor) { rho_val = rho_floor; } - // Get the gradient of the electron pressure - auto grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, 0, 0); + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + Real grad_Pe = 0._rt; + if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, 0, 0); } // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); @@ -678,7 +682,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } + if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } if (include_hyper_resistivity_term) { auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); @@ -709,7 +713,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( std::unique_ptr const& Pefield, std::array< std::unique_ptr, 3 > const& edge_lengths, int lev, HybridPICModel const* hybrid_model, - const bool include_resistivity_term ) + const bool solve_for_Faraday ) { // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); @@ -722,7 +726,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const auto rho_floor = hybrid_model->m_n_floor * PhysConst::q_e; const auto resistivity_has_J_dependence = hybrid_model->m_resistivity_has_J_dependence; - const bool include_hyper_resistivity_term = (eta_h > 0.) && include_resistivity_term; + const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations @@ -879,7 +883,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jx_val = Interp(Jx, Jx_stag, Ex_stag, coarsen, i, j, k, 0); const Real jy_val = Interp(Jy, Jy_stag, Ex_stag, coarsen, i, j, k, 0); const Real jz_val = Interp(Jz, Jz_stag, Ex_stag, coarsen, i, j, k, 0); @@ -889,8 +893,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // safety condition since we divide by rho_val later if (rho_val < rho_floor) { rho_val = rho_floor; } - // Get the gradient of the electron pressure - auto grad_Pe = T_Algo::UpwardDx(Pe, coefs_x, n_coefs_x, i, j, k); + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + Real grad_Pe = 0._rt; + if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDx(Pe, coefs_x, n_coefs_x, i, j, k); } // interpolate the nodal neE values to the Yee grid auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); @@ -898,7 +904,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ex(i, j, k) = (enE_x - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } + if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } if (include_hyper_resistivity_term) { auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k); @@ -921,7 +927,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jx_val = Interp(Jx, Jx_stag, Ey_stag, coarsen, i, j, k, 0); const Real jy_val = Interp(Jy, Jy_stag, Ey_stag, coarsen, i, j, k, 0); const Real jz_val = Interp(Jz, Jz_stag, Ey_stag, coarsen, i, j, k, 0); @@ -931,8 +937,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // safety condition since we divide by rho_val later if (rho_val < rho_floor) { rho_val = rho_floor; } - // Get the gradient of the electron pressure - auto grad_Pe = T_Algo::UpwardDy(Pe, coefs_y, n_coefs_y, i, j, k); + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + Real grad_Pe = 0._rt; + if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDy(Pe, coefs_y, n_coefs_y, i, j, k); } // interpolate the nodal neE values to the Yee grid auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); @@ -940,7 +948,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ey(i, j, k) = (enE_y - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } + if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } if (include_hyper_resistivity_term) { auto nabla2Jy = T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k); @@ -959,7 +967,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; - if (include_resistivity_term && resistivity_has_J_dependence) { + if (solve_for_Faraday && resistivity_has_J_dependence) { const Real jx_val = Interp(Jx, Jx_stag, Ez_stag, coarsen, i, j, k, 0); const Real jy_val = Interp(Jy, Jy_stag, Ez_stag, coarsen, i, j, k, 0); const Real jz_val = Interp(Jz, Jz_stag, Ez_stag, coarsen, i, j, k, 0); @@ -969,8 +977,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // safety condition since we divide by rho_val later if (rho_val < rho_floor) { rho_val = rho_floor; } - // Get the gradient of the electron pressure - auto grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, k); + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + Real grad_Pe = 0._rt; + if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, k); } // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); @@ -978,7 +988,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez(i, j, k) = (enE_z - grad_Pe) / rho_val; // Add resistivity only if E field value is used to update B - if (include_resistivity_term) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } + if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } if (include_hyper_resistivity_term) { auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 4dbe10c4e5a..c16f0193b8d 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -88,9 +88,6 @@ void WarpX::HybridPICEvolveFields () } } - // Calculate the electron pressure at t=n using rho^n - m_hybrid_pic_model->CalculateElectronPressure(DtType::FirstHalf); - // Push the B field from t=n to t=n+1/2 using the current and density // at t=n, while updating the E field along with B using the electron // momentum equation @@ -116,9 +113,6 @@ void WarpX::HybridPICEvolveFields () ); } - // Calculate the electron pressure at t=n+1/2 - m_hybrid_pic_model->CalculateElectronPressure(DtType::SecondHalf); - // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities for (int sub_step = 0; sub_step < sub_steps; sub_step++) { @@ -149,7 +143,7 @@ void WarpX::HybridPICEvolveFields () } // Calculate the electron pressure at t=n+1 - m_hybrid_pic_model->CalculateElectronPressure(DtType::Full); + m_hybrid_pic_model->CalculateElectronPressure(); // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculateCurrentAmpere(Bfield_fp, m_edge_lengths); From 9c5c72348e3408e72d1e9248e1f49b3d12ee8c2f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:53:49 -0700 Subject: [PATCH 22/91] [pre-commit.ci] pre-commit autoupdate (#5233) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.3 → v0.6.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.3...v0.6.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d35edbedc07..c581183703a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.3 + rev: v0.6.4 hooks: # Run the linter - id: ruff From 10d161550b153af9bb0ff7359a3fc98dacde24b3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 9 Sep 2024 16:06:41 -0700 Subject: [PATCH 23/91] Constify: `DownwardD*` (#5234) --- .../CartesianCKCAlgorithm.H | 8 ++++---- .../CartesianNodalAlgorithm.H | 12 ++++++------ .../CartesianYeeAlgorithm.H | 6 +++--- .../CylindricalYeeAlgorithm.H | 12 ++++++------ 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H index 737146f24a3..cf27898cb86 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H @@ -26,7 +26,7 @@ struct CartesianCKCAlgorithm { static void InitializeStencilCoefficients ( - std::array& cell_size, + std::array& cell_size, amrex::Vector& stencil_coefs_x, amrex::Vector& stencil_coefs_y, amrex::Vector& stencil_coefs_z ) { @@ -129,7 +129,7 @@ struct CartesianCKCAlgorithm { * Perform derivative along x on a cell-centered grid, from a nodal field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDx ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_x, int const /*n_coefs_x*/, int const i, int const j, int const k, int const ncomp=0 ) { @@ -186,7 +186,7 @@ struct CartesianCKCAlgorithm { * Perform derivative along y on a cell-centered grid, from a nodal field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDy ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_y, int const n_coefs_y, int const i, int const j, int const k, int const ncomp=0 ) { @@ -244,7 +244,7 @@ struct CartesianCKCAlgorithm { * Perform derivative along z on a cell-centered grid, from a nodal field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const n_coefs_z, int const i, int const j, int const k, int const ncomp=0 ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianNodalAlgorithm.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianNodalAlgorithm.H index b693ed8785f..46940e7e306 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianNodalAlgorithm.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianNodalAlgorithm.H @@ -69,7 +69,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDx ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_x, int const /*n_coefs_x*/, int const i, int const j, int const k, int const ncomp=0 ) { @@ -89,7 +89,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDx ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_x, int const n_coefs_x, int const i, int const j, int const k, int const ncomp=0 ) { @@ -109,7 +109,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDy ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_y, int const /*n_coefs_y*/, int const i, int const j, int const k, int const ncomp=0 ) { @@ -129,7 +129,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDy ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_y, int const n_coefs_y, int const i, int const j, int const k, int const ncomp=0 ) { @@ -143,7 +143,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const /*n_coefs_z*/, int const i, int const j, int const k, int const ncomp=0 ) { @@ -164,7 +164,7 @@ struct CartesianNodalAlgorithm { * account the staggering; but for `CartesianNodalAlgorithm`, they are equivalent) */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const n_coefs_z, int const i, int const j, int const k, int const ncomp=0 ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H index c4978287aec..485698802b6 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H @@ -67,7 +67,7 @@ struct CartesianYeeAlgorithm { * Perform derivative along x on a cell-centered grid, from a nodal field `F`*/ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDx ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_x, int const /*n_coefs_x*/, int const i, int const j, int const k, int const ncomp=0 ) { @@ -123,7 +123,7 @@ struct CartesianYeeAlgorithm { * Perform derivative along y on a cell-centered grid, from a nodal field `F`*/ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDy ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_y, int const n_coefs_y, int const i, int const j, int const k, int const ncomp=0 ) { @@ -189,7 +189,7 @@ struct CartesianYeeAlgorithm { * Perform derivative along z on a cell-centered grid, from a nodal field `F`*/ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const /*n_coefs_z*/, int const i, int const j, int const k, int const ncomp=0 ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H index 436f0a83f31..d20e1ef829b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H @@ -74,7 +74,7 @@ struct CylindricalYeeAlgorithm { * The input parameter `r` is given at the cell-centered position */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDrr_over_r ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const r, amrex::Real const dr, amrex::Real const * const coefs_r, int const n_coefs_r, int const i, int const j, int const k, int const comp ) { @@ -92,7 +92,7 @@ struct CylindricalYeeAlgorithm { * The input parameter `r` is given at the cell-centered position */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDrr_over_r ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const r, amrex::Real const dr, amrex::Real const * const coefs_r, int const n_coefs_r, int const i, int const j, int const k, int const comp ) { @@ -108,7 +108,7 @@ struct CylindricalYeeAlgorithm { * Perform derivative along r on a cell-centered grid, from a nodal field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDr ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_r, int const n_coefs_r, int const i, int const j, int const k, int const comp ) { @@ -123,7 +123,7 @@ struct CylindricalYeeAlgorithm { * Perform derivative along r on a nodal grid, from a cell-centered field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDr ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_r, int const n_coefs_r, int const i, int const j, int const k, int const comp ) { @@ -156,7 +156,7 @@ struct CylindricalYeeAlgorithm { * Perform derivative along z on a cell-centered grid, from a nodal field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real UpwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const n_coefs_z, int const i, int const j, int const k, int const comp ) { @@ -170,7 +170,7 @@ struct CylindricalYeeAlgorithm { * Perform derivative along z on a nodal grid, from a cell-centered field `F` */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE static amrex::Real DownwardDz ( - amrex::Array4 const& F, + amrex::Array4 const& F, amrex::Real const * const coefs_z, int const n_coefs_z, int const i, int const j, int const k, int const comp ) { From ab1943893406e096266113fd1fca5f489acc32b1 Mon Sep 17 00:00:00 2001 From: Thomas Marks Date: Mon, 9 Sep 2024 17:47:35 -0700 Subject: [PATCH 24/91] Improvements to maxParticleVelocity (#5169) * Improve maxParticleVelocity * gpu fix 1 * gpu fix 2 * gpu fix 3 - change to general reduction * Remove seperate OMP bit * Add const qualifier for clang tidy * Refactor maxVelocity function --------- Co-authored-by: Remi Lehe --- Source/Particles/MultiParticleContainer.H | 2 ++ Source/Particles/MultiParticleContainer.cpp | 9 +++++++ Source/Particles/WarpXParticleContainer.cpp | 27 +++++++++++++-------- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index f007d5088e3..97e4e1bc4da 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -89,6 +89,8 @@ public: return allcontainers[index]->meanParticleVelocity(); } + amrex::ParticleReal maxParticleVelocity(); + void AllocData (); void InitData (); diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index cc3cd89cd7d..23af4177228 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -397,6 +397,15 @@ MultiParticleContainer::GetParticleContainerFromName (const std::string& name) c return *allcontainers[i]; } +amrex::ParticleReal +MultiParticleContainer::maxParticleVelocity() { + amrex::ParticleReal max_v = 0.0_prt; + for (const auto &pc : allcontainers) { + max_v = std::max(max_v, pc->maxParticleVelocity()); + } + return max_v; +} + void MultiParticleContainer::AllocData () { diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 05f8dd609f0..591190a7a19 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -1425,26 +1425,33 @@ std::array WarpXParticleContainer::meanParticleVelocity(bool lo amrex::ParticleReal WarpXParticleContainer::maxParticleVelocity(bool local) { - amrex::ParticleReal max_v = 0.0; + const amrex::ParticleReal inv_clight_sq = 1.0_prt/(PhysConst::c*PhysConst::c); + ReduceOps reduce_op; + ReduceData reduce_data(reduce_op); const int nLevels = finestLevel(); - for (int lev = 0; lev <= nLevels; ++lev) - { #ifdef AMREX_USE_OMP -#pragma omp parallel reduction(max:max_v) +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif + for (int lev = 0; lev <= nLevels; ++lev) { for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) { - auto& ux = pti.GetAttribs(PIdx::ux); - auto& uy = pti.GetAttribs(PIdx::uy); - auto& uz = pti.GetAttribs(PIdx::uz); - for (unsigned long i = 0; i < ux.size(); i++) { - max_v = std::max(max_v, std::sqrt(ux[i]*ux[i] + uy[i]*uy[i] + uz[i]*uz[i])); - } + auto *const ux = pti.GetAttribs(PIdx::ux).data(); + auto *const uy = pti.GetAttribs(PIdx::uy).data(); + auto *const uz = pti.GetAttribs(PIdx::uz).data(); + + reduce_op.eval(pti.numParticles(), reduce_data, + [=] AMREX_GPU_DEVICE (int ip) + { return (ux[ip]*ux[ip] + uy[ip]*uy[ip] + uz[ip]*uz[ip]) * inv_clight_sq; }); } } + const amrex::ParticleReal max_usq = amrex::get<0>(reduce_data.value()); + + const amrex::ParticleReal gaminv = 1.0_prt/std::sqrt(1.0_prt + max_usq); + amrex::ParticleReal max_v = gaminv * std::sqrt(max_usq) * PhysConst::c; + if (!local) { ParallelAllReduce::Max(max_v, ParallelDescriptor::Communicator()); } return max_v; } From 335cd75a6ff59748f9da3152685cfc432b1a05ce Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 9 Sep 2024 19:26:37 -0700 Subject: [PATCH 25/91] Cleanup dimension macros around ignore_unused (#5238) --- Source/BoundaryConditions/WarpX_PEC.cpp | 50 ------------------- .../FiniteDifferenceSolver/EvolveB.cpp | 29 ++++------- Source/Particles/Pusher/GetAndSetPosition.H | 14 +----- 3 files changed, 13 insertions(+), 80 deletions(-) diff --git a/Source/BoundaryConditions/WarpX_PEC.cpp b/Source/BoundaryConditions/WarpX_PEC.cpp index 0067f54d3ff..3ad0ab4663e 100644 --- a/Source/BoundaryConditions/WarpX_PEC.cpp +++ b/Source/BoundaryConditions/WarpX_PEC.cpp @@ -510,12 +510,7 @@ PEC::ApplyPECtoEfield ( amrex::ParallelFor( tex, nComp_x, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 0; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -523,12 +518,7 @@ PEC::ApplyPECtoEfield ( }, tey, nComp_y, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 1; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -536,12 +526,7 @@ PEC::ApplyPECtoEfield ( }, tez, nComp_z, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 2; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -602,12 +587,7 @@ PEC::ApplyPECtoBfield ( amrex::ParallelFor( tbx, nComp_x, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 0; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -615,12 +595,7 @@ PEC::ApplyPECtoBfield ( }, tby, nComp_y, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 1; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -628,12 +603,7 @@ PEC::ApplyPECtoBfield ( }, tbz, nComp_z, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 2; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, @@ -727,11 +697,7 @@ PEC::ApplyReflectiveBoundarytoRhofield ( // Loop over valid cells (i.e. cells inside the domain) amrex::ParallelFor(mfi.validbox(), nComp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif // Store the array index const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); @@ -859,11 +825,7 @@ PEC::ApplyReflectiveBoundarytoJfield( // Loop over valid cells (i.e. cells inside the domain) amrex::ParallelFor(mfi.validbox(), Jx->nComp(), [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif // Store the array index const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); @@ -894,11 +856,7 @@ PEC::ApplyReflectiveBoundarytoJfield( // Loop over valid cells (i.e. cells inside the domain) amrex::ParallelFor(mfi.validbox(), Jy->nComp(), [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif // Store the array index const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); @@ -929,11 +887,7 @@ PEC::ApplyReflectiveBoundarytoJfield( // Loop over valid cells (i.e. cells inside the domain) amrex::ParallelFor(mfi.validbox(), Jz->nComp(), [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif // Store the array index const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); @@ -1000,11 +954,7 @@ PEC::ApplyPECtoElectronPressure ( // Loop over valid cells (i.e. cells inside the domain) amrex::ParallelFor(mfi.validbox(), nComp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { -#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif (defined WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif // Store the array index const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 4fe9fc76e10..4a71afda671 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -48,32 +48,25 @@ using namespace amrex; * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveB ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, - std::array< std::unique_ptr, 3 >& flag_info_cell, - std::array< std::unique_ptr >, 3 >& borrowing, - int lev, amrex::Real const dt ) { - -#if defined(WARPX_DIM_RZ) || !defined(AMREX_USE_EB) - amrex::ignore_unused(area_mod, ECTRhofield, Venl, flag_info_cell, borrowing); -#endif + [[maybe_unused]] std::array< std::unique_ptr, 3 >& Bfield, + [[maybe_unused]] std::array< std::unique_ptr, 3 > const& Efield, + [[maybe_unused]] std::unique_ptr const& Gfield, + [[maybe_unused]] std::array< std::unique_ptr, 3 > const& face_areas, + [[maybe_unused]] std::array< std::unique_ptr, 3 > const& area_mod, + [[maybe_unused]] std::array< std::unique_ptr, 3 >& ECTRhofield, + [[maybe_unused]] std::array< std::unique_ptr, 3 >& Venl, + [[maybe_unused]] std::array< std::unique_ptr, 3 >& flag_info_cell, + [[maybe_unused]] std::array< std::unique_ptr >, 3 >& borrowing, + [[maybe_unused]] int lev, + [[maybe_unused]] amrex::Real const dt ) { // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ if ((m_fdtd_algo == ElectromagneticSolverAlgo::Yee)|| (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC)){ - ignore_unused(Gfield, face_areas); EvolveBCylindrical ( Bfield, Efield, lev, dt ); #else - if(m_grid_type == GridType::Collocated || m_fdtd_algo != ElectromagneticSolverAlgo::ECT){ - amrex::ignore_unused(face_areas); - } if (m_grid_type == GridType::Collocated) { diff --git a/Source/Particles/Pusher/GetAndSetPosition.H b/Source/Particles/Pusher/GetAndSetPosition.H index 44641557756..b9e7dc91684 100644 --- a/Source/Particles/Pusher/GetAndSetPosition.H +++ b/Source/Particles/Pusher/GetAndSetPosition.H @@ -216,12 +216,7 @@ struct SetParticlePosition AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator() (const long i, RType x, RType y, RType z) const noexcept { -#if defined(WARPX_DIM_XZ) - amrex::ignore_unused(y); -#endif -#if defined(WARPX_DIM_1D_Z) - amrex::ignore_unused(x,y); -#endif + amrex::ignore_unused(x,y,z); #ifdef WARPX_DIM_RZ m_theta[i] = std::atan2(y, x); m_x[i] = std::sqrt(x*x + y*y); @@ -245,12 +240,7 @@ struct SetParticlePosition AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void AsStored (const long i, RType x, RType y, RType z) const noexcept { -#if defined(WARPX_DIM_XZ) - amrex::ignore_unused(y); -#endif -#if defined(WARPX_DIM_1D_Z) - amrex::ignore_unused(x,y); -#endif + amrex::ignore_unused(x,y,z); #ifdef WARPX_DIM_RZ m_x[i] = x; m_theta[i] = y; From 7f33c8f3a77a65b571c59f94fc75af3ec1e9ec6b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 9 Sep 2024 20:47:04 -0700 Subject: [PATCH 26/91] EB: Cleanup & Simplify (#5223) * Review Comments by Remi Co-authored-by: Remi Lehe * WarpX Class: Remove `m_eb_enabled` Remove double book-keeping. * Review comment from Edo * Fix compilation errors --------- Co-authored-by: Remi Lehe --- .github/workflows/source/check_inputs.py | 2 +- Source/BoundaryConditions/WarpXEvolvePML.cpp | 20 +++++++------ .../Diagnostics/ReducedDiags/ChargeOnEB.cpp | 2 +- Source/Diagnostics/WarpXIO.cpp | 3 +- Source/Evolve/WarpXEvolve.cpp | 3 +- Source/FieldSolver/ElectrostaticSolver.cpp | 8 ++--- .../MacroscopicEvolveE.cpp | 18 +++++------ .../MagnetostaticSolver.cpp | 3 +- Source/Initialization/WarpXInitData.cpp | 21 ++++++++----- Source/Parallelization/WarpXRegrid.cpp | 6 ++-- Source/WarpX.H | 1 - Source/WarpX.cpp | 30 +++++++++---------- 12 files changed, 64 insertions(+), 53 deletions(-) diff --git a/.github/workflows/source/check_inputs.py b/.github/workflows/source/check_inputs.py index 2012d6ed672..84ea70cba60 100755 --- a/.github/workflows/source/check_inputs.py +++ b/.github/workflows/source/check_inputs.py @@ -27,7 +27,7 @@ # skip lines related to other function arguments # NOTE: update range call to reflect changes # in the interface of 'add_warpx_test' - for _ in range(2): # skip over: dims, numprocs + for _ in range(2): # skip over: dims, nprocs next(f) # strip leading whitespaces, remove end-of-line comments testinput = next(f).lstrip().split(" ")[0] diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index bbe969052a3..c6a89d80c07 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -11,6 +11,7 @@ #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) # include "BoundaryConditions/PML_RZ.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "PML_current.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX_PML_kernels.H" @@ -269,15 +270,16 @@ WarpX::DampJPML (int lev, PatchType patch_type) const Real* sigma_star_cumsum_fac_j_z = sigba[mfi].sigma_star_cumsum_fac[1].data(); #endif - amrex::Array4 pml_lxfab, pml_lyfab, pml_lzfab; - if (m_eb_enabled) { + // Skip the field update if this gridpoint is inside the embedded boundary + amrex::Array4 eb_lxfab, eb_lyfab, eb_lzfab; + if (EB::enabled()) { const auto &pml_edge_lenghts = pml[lev]->Get_edge_lengths(); - pml_lxfab = pml_edge_lenghts[0]->array(mfi); - pml_lyfab = pml_edge_lenghts[1]->array(mfi); - pml_lzfab = pml_edge_lenghts[2]->array(mfi); + eb_lxfab = pml_edge_lenghts[0]->array(mfi); + eb_lyfab = pml_edge_lenghts[1]->array(mfi); + eb_lzfab = pml_edge_lenghts[2]->array(mfi); } else { - amrex::ignore_unused(pml_lxfab, pml_lyfab, pml_lzfab); + amrex::ignore_unused(eb_lxfab, eb_lyfab, eb_lzfab); } const Box& tjx = mfi.tilebox( pml_j[0]->ixType().toIntVect() ); @@ -304,21 +306,21 @@ WarpX::DampJPML (int lev, PatchType patch_type) amrex::ParallelFor( tjx, tjy, tjz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - if (pml_lxfab && pml_lxfab(i, j, k) <= 0) { return; } + if (eb_lxfab && eb_lxfab(i, j, k) <= 0) { return; } damp_jx_pml(i, j, k, pml_jxfab, sigma_star_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_cumsum_fac_j_z, xs_lo,y_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - if (pml_lyfab && pml_lyfab(i, j, k) <= 0) { return; } + if (eb_lyfab && eb_lyfab(i, j, k) <= 0) { return; } damp_jy_pml(i, j, k, pml_jyfab, sigma_cumsum_fac_j_x, sigma_star_cumsum_fac_j_y, sigma_cumsum_fac_j_z, x_lo,ys_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - if (pml_lzfab && pml_lzfab(i, j, k)<=0) { return; } + if (eb_lzfab && eb_lzfab(i, j, k) <= 0) { return; } damp_jz_pml(i, j, k, pml_jzfab, sigma_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_star_cumsum_fac_j_z, diff --git a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp index 190822ded9c..01a2b5d8b23 100644 --- a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp +++ b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp @@ -95,7 +95,7 @@ void ChargeOnEB::ComputeDiags (const int step) if (!m_intervals.contains(step+1)) { return; } if (!EB::enabled()) { - throw std::runtime_error("ComputeDiags only works when EBs are enabled at runtime"); + throw std::runtime_error("ChargeOnEB::ComputeDiags only works when EBs are enabled at runtime"); } #if ((defined WARPX_DIM_3D) && (defined AMREX_USE_EB)) // get a reference to WarpX instance diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 4082ff7b3d5..a3b902386f6 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -11,6 +11,7 @@ #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) # include "BoundaryConditions/PML_RZ.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "FieldIO.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" @@ -393,7 +394,7 @@ WarpX::InitFromCheckpoint () } } - if (m_eb_enabled) { InitializeEBGridData(maxLevel()); } + if (EB::enabled()) { InitializeEBGridData(maxLevel()); } // Initialize particles mypc->AllocData(); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index bc18d0da75d..c668eac2e26 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -13,6 +13,7 @@ #include "BoundaryConditions/PML.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "Evolve/WarpXDtType.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #ifdef WARPX_USE_FFT @@ -525,7 +526,7 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num } // interact the particles with EB walls (if present) - if (m_eb_enabled) { + if (EB::enabled()) { mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries( *mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 80110f5eb18..b341844304a 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -7,6 +7,7 @@ #include "WarpX.H" #include "FieldSolver/ElectrostaticSolver.H" +#include "EmbeddedBoundary/Enabled.H" #include "Fluids/MultiFluidContainer.H" #include "Fluids/WarpXFluidContainer.H" #include "Parallelization/GuardCellManager.H" @@ -289,7 +290,7 @@ WarpX::AddSpaceChargeFieldLabFrame () // Compute the electric field. Note that if an EB is used the electric // field will be calculated in the computePhi call. - if (!m_eb_enabled) { computeE( Efield_fp, phi_fp, beta ); } + if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } else { if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } } @@ -333,12 +334,11 @@ WarpX::computePhi (const amrex::Vector >& rho, std::optional post_phi_calculation; #ifdef AMREX_USE_EB - // TODO: double check no overhead occurs on "m_eb_enabled == false" std::optional > eb_farray_box_factory; #else std::optional > const eb_farray_box_factory; #endif - if (m_eb_enabled) + if (EB::enabled()) { // EB: use AMReX to directly calculate the electric field since with EB's the // simple finite difference scheme in WarpX::computeE sometimes fails @@ -400,7 +400,7 @@ WarpX::computePhi (const amrex::Vector >& rho, WarpX::grid_type, this->m_poisson_boundary_handler, is_solver_igf_on_lev0, - m_eb_enabled, + EB::enabled(), WarpX::do_single_precision_comms, this->ref_ratio, post_phi_calculation, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 1a9b79a8acb..46e4d3efa06 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -140,14 +140,14 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); - amrex::Array4 lx, ly, lz; + amrex::Array4 eb_lx, eb_ly, eb_lz; if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + eb_lx = edge_lengths[0]->array(mfi); + eb_ly = edge_lengths[1]->array(mfi); + eb_lz = edge_lengths[2]->array(mfi); } #ifdef WARPX_DIM_XZ - amrex::ignore_unused(ly); + amrex::ignore_unused(eb_ly); #endif // material prop // @@ -180,7 +180,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field push if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + if (eb_lx && eb_lx(i, j, k) <= 0) { return; } // Interpolate conductivity, sigma, to Ex position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, @@ -198,10 +198,10 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ #ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } + if (eb_ly && eb_ly(i,j,k) <= 0) { return; } #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0)) { return; } + if (eb_lx && (eb_lx(i, j, k)<=0 || eb_lx(i-1, j, k)<=0 || eb_lz(i, j, k)<=0 || eb_lz(i, j-1, k)<=0)) { return; } #endif // Interpolate conductivity, sigma, to Ey position on the grid @@ -221,7 +221,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field push if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } + if (eb_lz && eb_lz(i, j, k) <= 0) { return; } // Interpolate conductivity, sigma, to Ez position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index d715e64cdaa..305ccf02eb3 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -7,6 +7,7 @@ #include "WarpX.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" +#include "EmbeddedBoundary/Enabled.H" #include "Parallelization/GuardCellManager.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" @@ -184,7 +185,7 @@ WarpX::computeVectorPotential (const amrex::Vectordmap, this->grids, this->m_vector_poisson_boundary_handler, - m_eb_enabled, + EB::enabled(), WarpX::do_single_precision_comms, this->ref_ratio, post_A_calculation, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 5e5ebb19921..de763831d98 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -16,6 +16,7 @@ #endif #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -702,6 +703,7 @@ WarpX::InitPML () if (finest_level > 0) { do_pml = 1; } if (do_pml) { + bool const eb_enabled = EB::enabled(); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) do_pml_Lo[0][0] = 0; // no PML at r=0, in cylindrical geometry pml_rz[0] = std::make_unique(0, boxArray(0), DistributionMap(0), &Geom(0), pml_ncell, do_pml_in_domain); @@ -717,7 +719,7 @@ WarpX::InitPML () psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), - m_eb_enabled, + eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[0], do_pml_Hi[0]); @@ -757,7 +759,7 @@ WarpX::InitPML () do_moving_window, pml_has_particles, do_pml_in_domain, psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), - m_eb_enabled, + eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[lev], do_pml_Hi[lev]); @@ -936,7 +938,8 @@ WarpX::InitLevelData (int lev, Real /*time*/) } #ifdef AMREX_USE_EB - if (m_eb_enabled) { InitializeEBGridData(lev); } + bool const eb_enabled = EB::enabled(); + if (eb_enabled) { InitializeEBGridData(lev); } #endif // if the input string for the B-field is "parse_b_ext_grid_function", @@ -981,7 +984,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) && (lev <= maxlevel_extEMfield_init)) { #ifdef AMREX_USE_EB - if (m_eb_enabled) { + if (eb_enabled) { // We initialize ECTRhofield consistently with the Efield if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_fdtd_solver_fp[lev]->EvolveECTRho( @@ -1016,7 +1019,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) 'E', lev, PatchType::coarse); #ifdef AMREX_USE_EB - if (m_eb_enabled) { + if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { // We initialize ECTRhofield consistently with the Efield m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], @@ -1063,6 +1066,8 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); + bool const eb_enabled = EB::enabled(); + for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box &tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect()); const amrex::Box &tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect()); @@ -1073,7 +1078,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( auto const &mfzfab = mfz->array(mfi); amrex::Array4 lx, ly, lz, Sx, Sy, Sz; - if (m_eb_enabled) { + if (eb_enabled) { lx = edge_lengths[0]->array(mfi); ly = edge_lengths[1]->array(mfi); lz = edge_lengths[2]->array(mfi); @@ -1085,7 +1090,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; #endif - if (m_eb_enabled) { + if (eb_enabled) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) lx_lo = amrex::lbound(lx); lx_hi = amrex::ubound(lx); @@ -1271,7 +1276,7 @@ void WarpX::InitializeEBGridData (int lev) if (lev == maxLevel()) { // Throw a warning if EB is on and particle_shape > 1 - if ((nox > 1 or noy > 1 or noz > 1) and m_eb_enabled) + if ((nox > 1 or noy > 1 or noz > 1) and EB::enabled()) { ablastr::warn_manager::WMRecordWarning("Particles", "when algo.particle_shape > 1, numerical artifacts will be present when\n" diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 112db68f488..0a3ab8d2099 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -10,6 +10,7 @@ #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Initialization/ExternalField.H" @@ -177,6 +178,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi mf = std::move(pmf); }; + bool const eb_enabled = EB::enabled(); if (ba == boxArray(lev)) { if (ParallelDescriptor::NProcs() == 1) { return; } @@ -215,7 +217,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->current_fp_ampere[lev][idim], false); RemakeMultiFab(m_hybrid_pic_model->current_fp_external[lev][idim],true); } - if (m_eb_enabled) { + if (eb_enabled) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { RemakeMultiFab(m_edge_lengths[lev][idim], false); RemakeMultiFab(m_face_areas[lev][idim], false); @@ -242,7 +244,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->electron_pressure_fp[lev], false); } - if (m_eb_enabled) { + if (eb_enabled) { RemakeMultiFab(m_distance_to_eb[lev], false); #ifdef AMREX_USE_EB diff --git a/Source/WarpX.H b/Source/WarpX.H index 903e97549dd..c27807b4982 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -998,7 +998,6 @@ public: [[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;} /** Enable embedded boundaries */ - bool m_eb_enabled = false; bool m_boundary_potential_specified = false; ElectrostaticSolver::PoissonBoundaryHandler m_poisson_boundary_handler; void ComputeSpaceChargeField (bool reset_fields); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index c55eedb87a5..a705735a541 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -260,7 +260,7 @@ WarpX::WarpX () BackwardCompatibility(); - if (m_eb_enabled) { InitEB(); } + if (EB::enabled()) { InitEB(); } ablastr::utils::SignalHandling::InitSignalHandling(); @@ -788,10 +788,10 @@ WarpX::ReadParameters () "The FFT Poisson solver is not implemented in labframe-electromagnetostatic mode yet." ); - m_eb_enabled = EB::enabled(); + bool const eb_enabled = EB::enabled(); #if !defined(AMREX_USE_EB) WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - !m_eb_enabled, + !eb_enabled, "Embedded boundaries are requested via warpx.eb_enabled but were not compiled!" ); #endif @@ -806,7 +806,7 @@ WarpX::ReadParameters () potential_specified |= pp_boundary.query("potential_hi_y", m_poisson_boundary_handler.potential_yhi_str); potential_specified |= pp_boundary.query("potential_lo_z", m_poisson_boundary_handler.potential_zlo_str); potential_specified |= pp_boundary.query("potential_hi_z", m_poisson_boundary_handler.potential_zhi_str); - if (m_eb_enabled) { + if (eb_enabled) { potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); } m_boundary_potential_specified = potential_specified; @@ -2208,18 +2208,18 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d use_filter, bilinear_filter.stencil_length_each_dir); - - - if (m_eb_enabled) { #ifdef AMREX_USE_EB - int const max_guard = guard_cells.ng_FieldSolver.max(); - m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, - {max_guard, max_guard, max_guard}, - amrex::EBSupport::full); + bool const eb_enabled = EB::enabled(); + if (eb_enabled) { + int const max_guard = guard_cells.ng_FieldSolver.max(); + m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, + {max_guard, max_guard, max_guard}, + amrex::EBSupport::full); + } else #endif - } else { - m_field_factory[lev] = std::make_unique(); - } + { + m_field_factory[lev] = std::make_unique(); + } if (mypc->nSpeciesDepositOnMainGrid() && n_current_deposition_buffer == 0) { @@ -2438,7 +2438,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[z]", 0.0_rt); } - if (m_eb_enabled) { + if (EB::enabled()) { constexpr int nc_ls = 1; amrex::IntVect const ng_ls(2); AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, From 78ff8bf92b9cfebbca19ffdb1202cd04f7703b35 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:49:22 -0700 Subject: [PATCH 27/91] Docs: fix broken links in Examples section (#5239) * Docs: fix broken links in Examples section * Fix a few more paths --------- Co-authored-by: Remi Lehe --- Docs/source/developers/particles.rst | 2 +- Docs/source/usage/examples.rst | 8 ++-- .../source/usage/examples/beam-beam_collision | 1 - .../source/usage/examples/beam_beam_collision | 1 + .../source/usage/examples/ohm_solver_EM_modes | 1 - .../source/usage/examples/ohm_solver_em_modes | 1 + Docs/source/usage/pwfa.rst | 2 +- .../{README => README.rst} | 4 +- .../{README => README.rst} | 8 ++-- .../capacitive_discharge/analysis_2d.py | 2 +- .../laser_acceleration/{README => README.rst} | 24 +++++----- .../laser_ion/{README => README.rst} | 12 ++--- .../{README => README.rst} | 16 +++---- .../plasma_mirror/{README => README.rst} | 4 +- .../spacecraft_charging/analysis.py | 2 +- .../uniform_plasma/{README => README.rst} | 16 +++---- .../inputs_test_2d_collision_xz_picmi.py | 4 +- .../gaussian_beam/{README => README.rst} | 8 ++-- .../Tests/langmuir/{README => README.rst} | 44 +++++++++---------- ..._test_3d_load_external_field_grid_picmi.py | 2 +- ...t_3d_load_external_field_particle_picmi.py | 2 +- .../{README => README.rst} | 18 ++++---- .../{README => README.rst} | 8 ++-- .../{README => README.rst} | 10 ++--- .../{README => README.rst} | 8 ++-- .../particle_boundary_interaction/analysis.py | 2 +- .../Tests/pass_mpi_communicator/analysis.py | 2 +- .../inputs_test_2d_pass_mpi_comm_picmi.py | 4 +- .../Tests/point_of_contact_eb/analysis.py | 2 +- 29 files changed, 109 insertions(+), 109 deletions(-) delete mode 120000 Docs/source/usage/examples/beam-beam_collision create mode 120000 Docs/source/usage/examples/beam_beam_collision delete mode 120000 Docs/source/usage/examples/ohm_solver_EM_modes create mode 120000 Docs/source/usage/examples/ohm_solver_em_modes rename Examples/Physics_applications/beam_beam_collision/{README => README.rst} (97%) rename Examples/Physics_applications/capacitive_discharge/{README => README.rst} (91%) rename Examples/Physics_applications/laser_acceleration/{README => README.rst} (79%) rename Examples/Physics_applications/laser_ion/{README => README.rst} (93%) rename Examples/Physics_applications/plasma_acceleration/{README => README.rst} (76%) rename Examples/Physics_applications/plasma_mirror/{README => README.rst} (92%) rename Examples/Physics_applications/uniform_plasma/{README => README.rst} (72%) rename Examples/Tests/gaussian_beam/{README => README.rst} (75%) rename Examples/Tests/langmuir/{README => README.rst} (74%) rename Examples/Tests/ohm_solver_em_modes/{README => README.rst} (85%) rename Examples/Tests/ohm_solver_ion_Landau_damping/{README => README.rst} (84%) rename Examples/Tests/ohm_solver_ion_beam_instability/{README => README.rst} (86%) rename Examples/Tests/ohm_solver_magnetic_reconnection/{README => README.rst} (80%) diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index 53a0090b5c9..37260d1ed64 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -160,7 +160,7 @@ Attribute name ``int``/``real`` Description Default when they were created. ================== ================ ================================= ============== -A Python example that adds runtime options can be found in :download:`Examples/Tests/particle_data_python <../../../Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py>` +A Python example that adds runtime options can be found in :download:`Examples/Tests/particle_data_python <../../../Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py>` .. note:: diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index 0492372b4e6..244fbda6f75 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -43,7 +43,7 @@ Particle Accelerator & Beam Physics :maxdepth: 1 examples/gaussian_beam/README.rst - examples/beam-beam_collision/README.rst + examples/beam_beam_collision/README.rst High Energy Astrophysical Plasma Physics @@ -101,7 +101,7 @@ examples below were generated at that time. .. toctree:: :maxdepth: 1 - examples/ohm_solver_EM_modes/README.rst + examples/ohm_solver_em_modes/README.rst examples/ohm_solver_ion_beam_instability/README.rst examples/ohm_solver_ion_Landau_damping/README.rst @@ -127,11 +127,11 @@ Manipulating fields via Python An example of using Python to access the simulation charge density, solve the Poisson equation (using ``superLU``) and write the resulting electrostatic potential back to the simulation is given in the input file below. This example uses the ``fields.py`` module included in the ``pywarpx`` library. -* :download:`Direct Poisson solver example <../../../Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py>` +* :download:`Direct Poisson solver example <../../../Examples/Physics_applications/capacitive_discharge/inputs_test_2d_background_mcc_picmi.py>` An example of initializing the fields by accessing their data through Python, advancing the simulation for a chosen number of time steps, and plotting the fields again through Python. The simulation runs with 128 regular cells, 8 guard cells, and 10 PML cells, in each direction. Moreover, it uses div(E) and div(B) cleaning both in the regular grid and in the PML and initializes all available electromagnetic fields (E,B,F,G) identically. -* :download:`Unit pulse with PML <../../../Examples/Tests/python_wrappers/PICMI_inputs_2d.py>` +* :download:`Unit pulse with PML <../../../Examples/Tests/python_wrappers/inputs_test_2d_python_wrappers_picmi.py>` Many Further Examples, Demos and Tests diff --git a/Docs/source/usage/examples/beam-beam_collision b/Docs/source/usage/examples/beam-beam_collision deleted file mode 120000 index 8c6ac6b30b1..00000000000 --- a/Docs/source/usage/examples/beam-beam_collision +++ /dev/null @@ -1 +0,0 @@ -../../../../Examples/Physics_applications/beam-beam_collision \ No newline at end of file diff --git a/Docs/source/usage/examples/beam_beam_collision b/Docs/source/usage/examples/beam_beam_collision new file mode 120000 index 00000000000..2f46224fd8b --- /dev/null +++ b/Docs/source/usage/examples/beam_beam_collision @@ -0,0 +1 @@ +../../../../Examples/Physics_applications/beam_beam_collision/ \ No newline at end of file diff --git a/Docs/source/usage/examples/ohm_solver_EM_modes b/Docs/source/usage/examples/ohm_solver_EM_modes deleted file mode 120000 index 485be7241ae..00000000000 --- a/Docs/source/usage/examples/ohm_solver_EM_modes +++ /dev/null @@ -1 +0,0 @@ -../../../../Examples/Tests/ohm_solver_EM_modes \ No newline at end of file diff --git a/Docs/source/usage/examples/ohm_solver_em_modes b/Docs/source/usage/examples/ohm_solver_em_modes new file mode 120000 index 00000000000..03214170a1f --- /dev/null +++ b/Docs/source/usage/examples/ohm_solver_em_modes @@ -0,0 +1 @@ +../../../../Examples/Tests/ohm_solver_em_modes/ \ No newline at end of file diff --git a/Docs/source/usage/pwfa.rst b/Docs/source/usage/pwfa.rst index 5119184089c..1d3481b5589 100644 --- a/Docs/source/usage/pwfa.rst +++ b/Docs/source/usage/pwfa.rst @@ -5,7 +5,7 @@ In-Depth: PWFA As described in the :doc:`../theory/intro`, one of the key applications of the WarpX exascale computing platform is in modelling future, compact and economic plasma-based accelerators. In this section we describe the simulation setup of a realistic *electron beam driven plasma wakefield accelerator* (PWFA) configuration. -For illustration purposes the setup can be explored with **WarpX** using the example input file :download:`PWFA <../../../Examples/Physics_applications/plasma_acceleration/inputs_2d_boost>`. +For illustration purposes the setup can be explored with **WarpX** using the example input file :download:`PWFA <../../../Examples/Physics_applications/plasma_acceleration/inputs_test_2d_plasma_acceleration_boosted>`. The simulation setup consists of 4 particle species: drive beam (driver), witness beam (beam), plasma electrons (plasma_e), and plasma ions (plasma_p). The species physical parameters are summarized in the following table. diff --git a/Examples/Physics_applications/beam_beam_collision/README b/Examples/Physics_applications/beam_beam_collision/README.rst similarity index 97% rename from Examples/Physics_applications/beam_beam_collision/README rename to Examples/Physics_applications/beam_beam_collision/README.rst index 559a81277db..4f89365c8f0 100644 --- a/Examples/Physics_applications/beam_beam_collision/README +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -24,9 +24,9 @@ The PICMI input file is not available for this example yet. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. -.. literalinclude:: inputs +.. literalinclude:: inputs_test_3d_beam_beam_collision :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/inputs``. + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/inputs_test_3d_beam_beam_collision``. Visualize diff --git a/Examples/Physics_applications/capacitive_discharge/README b/Examples/Physics_applications/capacitive_discharge/README.rst similarity index 91% rename from Examples/Physics_applications/capacitive_discharge/README rename to Examples/Physics_applications/capacitive_discharge/README.rst index 708b4528cd5..13b6b3010b3 100644 --- a/Examples/Physics_applications/capacitive_discharge/README +++ b/Examples/Physics_applications/capacitive_discharge/README.rst @@ -22,17 +22,17 @@ The implementation has been tested against the benchmark results from :cite:t:`e Run --- -The 1D PICMI input file can be used to reproduce the results from Turner et al. for a given case, ``N`` from 1 to 4, by executing ``python3 PICMI_inputs_1d.py -n N``, e.g., +The 1D PICMI input file can be used to reproduce the results from Turner et al. for a given case, ``N`` from 1 to 4, by executing ``python3 inputs_base_1d_picmi.py -n N``, e.g., .. code-block:: bash - python3 PICMI_inputs_1d.py -n 1 + python3 inputs_base_1d_picmi.py -n 1 For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. -.. literalinclude:: PICMI_inputs_1d.py +.. literalinclude:: inputs_base_1d_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py``. + :caption: You can copy this file from ``Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py``. Analyze diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py index e9782fabe23..f52f69f4bf4 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py @@ -2,7 +2,7 @@ # Copyright 2021 Modern Electron -# This script checks that the PICMI_inputs_2d.py run more-or-less matches the +# This script checks that the inputs_test_2d_background_mcc_picmi.py run more-or-less matches the # results from the non-PICMI run. The PICMI run is using an external Poisson # solver that directly solves the Poisson equation using matrix inversion # rather than the iterative approach from the MLMG solver. diff --git a/Examples/Physics_applications/laser_acceleration/README b/Examples/Physics_applications/laser_acceleration/README.rst similarity index 79% rename from Examples/Physics_applications/laser_acceleration/README rename to Examples/Physics_applications/laser_acceleration/README.rst index 0ba3b5382f2..66de28de7a7 100644 --- a/Examples/Physics_applications/laser_acceleration/README +++ b/Examples/Physics_applications/laser_acceleration/README.rst @@ -24,43 +24,43 @@ For `MPI-parallel `__ runs, prefix these lines with ` This example can be run **either** as: - * **Python** script: ``python3 PICMI_inputs_3d.py`` or - * WarpX **executable** using an input file: ``warpx.3d inputs_3d max_step=400`` + * **Python** script: ``python3 inputs_test_3d_laser_acceleration_picmi.py`` or + * WarpX **executable** using an input file: ``warpx.3d inputs_test_3d_laser_acceleration max_step=400`` .. tab-set:: .. tab-item:: Python: Script - .. literalinclude:: PICMI_inputs_3d.py + .. literalinclude:: inputs_test_3d_laser_acceleration_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/PICMI_inputs_3d.py``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py``. .. tab-item:: Executable: Input File - .. literalinclude:: inputs_3d + .. literalinclude:: inputs_test_3d_laser_acceleration :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_3d``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration``. .. tab-item:: RZ This example can be run **either** as: - * **Python** script: ``python3 PICMI_inputs_rz.py`` or - * WarpX **executable** using an input file: ``warpx.rz inputs_3d max_step=400`` + * **Python** script: ``python3 inputs_test_rz_laser_acceleration_picmi.py`` or + * WarpX **executable** using an input file: ``warpx.rz inputs_test_rz_laser_acceleration max_step=400`` .. tab-set:: .. tab-item:: Python: Script - .. literalinclude:: PICMI_inputs_rz.py + .. literalinclude:: inputs_test_rz_laser_acceleration_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/PICMI_inputs_rz.py``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration_picmi.py``. .. tab-item:: Executable: Input File - .. literalinclude:: inputs_rz + .. literalinclude:: inputs_test_rz_laser_acceleration :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_rz``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_acceleration/inputs_test_rz_laser_acceleration``. Analyze ------- diff --git a/Examples/Physics_applications/laser_ion/README b/Examples/Physics_applications/laser_ion/README.rst similarity index 93% rename from Examples/Physics_applications/laser_ion/README rename to Examples/Physics_applications/laser_ion/README.rst index 29862a30518..e55cf6889d4 100644 --- a/Examples/Physics_applications/laser_ion/README +++ b/Examples/Physics_applications/laser_ion/README.rst @@ -21,8 +21,8 @@ Run This example can be run **either** as: -* **Python** script: ``mpiexec -n 2 python3 PICMI_inputs_2d.py`` or -* WarpX **executable** using an input file: ``mpiexec -n 2 warpx.2d inputs_2d`` +* **Python** script: ``mpiexec -n 2 python3 inputs_test_2d_laser_ion_acc_picmi.py`` or +* WarpX **executable** using an input file: ``mpiexec -n 2 warpx.2d inputs_test_2d_laser_ion_acc`` .. tip:: @@ -35,16 +35,16 @@ This example can be run **either** as: .. tab-item:: Python: Script - .. literalinclude:: PICMI_inputs_2d.py + .. literalinclude:: inputs_test_2d_laser_ion_acc_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Physics_applications/laser_ion/PICMI_inputs_2d.py``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py``. .. tab-item:: Executable: Input File - .. literalinclude:: inputs_2d + .. literalinclude:: inputs_test_2d_laser_ion_acc :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/laser_ion/inputs_2d``. + :caption: You can copy this file from ``Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc``. Analyze ------- diff --git a/Examples/Physics_applications/plasma_acceleration/README b/Examples/Physics_applications/plasma_acceleration/README.rst similarity index 76% rename from Examples/Physics_applications/plasma_acceleration/README rename to Examples/Physics_applications/plasma_acceleration/README.rst index d5775e93aa8..0c1b1819cab 100644 --- a/Examples/Physics_applications/plasma_acceleration/README +++ b/Examples/Physics_applications/plasma_acceleration/README.rst @@ -16,7 +16,7 @@ In the Beam, Plasma & Accelerator Simulation Toolkit (BLAST), `HiPACE++ `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. @@ -35,17 +35,17 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. note:: - TODO: This input file should use the boosted frame method, like the ``inputs_3d_boost`` file. + TODO: This input file should use the boosted frame method, like the ``inputs_test_3d_plasma_acceleration_boosted`` file. - .. literalinclude:: PICMI_inputs_plasma_acceleration.py + .. literalinclude:: inputs_test_3d_plasma_acceleration_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration.py``. + :caption: You can copy this file from ``Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_picmi.py``. .. tab-item:: Executable: Input File - .. literalinclude:: inputs_3d_boost + .. literalinclude:: inputs_test_3d_plasma_acceleration_boosted :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/plasma_acceleration/inputs_3d_boost``. + :caption: You can copy this file from ``Examples/Physics_applications/plasma_acceleration/inputs_test_3d_plasma_acceleration_boosted``. Analyze ------- diff --git a/Examples/Physics_applications/plasma_mirror/README b/Examples/Physics_applications/plasma_mirror/README.rst similarity index 92% rename from Examples/Physics_applications/plasma_mirror/README rename to Examples/Physics_applications/plasma_mirror/README.rst index 8741db09699..56f1da7db46 100644 --- a/Examples/Physics_applications/plasma_mirror/README +++ b/Examples/Physics_applications/plasma_mirror/README.rst @@ -33,9 +33,9 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. tab-item:: Executable: Input File - .. literalinclude:: inputs_2d + .. literalinclude:: inputs_test_2d_plasma_mirror :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/plasma_mirror/inputs_2d``. + :caption: You can copy this file from ``Examples/Physics_applications/plasma_mirror/inputs_test_2d_plasma_mirror``. Analyze diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 6528a3bde65..8819c435fb7 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -6,7 +6,7 @@ thermal plasma. The potential on the spacecraft decreases over the time to reach an equilibrium floating potential. -An input Python file PICMI_inputs_rz.py is used. +An input Python file inputs_test_rz_spacecraft_charging_picmi.py is used. The test will check the curve fitting parameters v0 and tau defined by the following exponential function: phi(t)=v0(1-exp(-t/tau)) diff --git a/Examples/Physics_applications/uniform_plasma/README b/Examples/Physics_applications/uniform_plasma/README.rst similarity index 72% rename from Examples/Physics_applications/uniform_plasma/README rename to Examples/Physics_applications/uniform_plasma/README.rst index 50d132712c6..04a0fb4555c 100644 --- a/Examples/Physics_applications/uniform_plasma/README +++ b/Examples/Physics_applications/uniform_plasma/README.rst @@ -21,15 +21,15 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. note:: - TODO: This input file should be created following the ``inputs_3d`` file. + TODO: This input file should be created following the ``inputs_test_3d_uniform_plasma`` file. .. tab-item:: Executable: Input File - This example can be run **either** as WarpX **executable** using an input file: ``warpx.3d inputs_3d`` + This example can be run **either** as WarpX **executable** using an input file: ``warpx.3d inputs_test_3d_uniform_plasma`` - .. literalinclude:: inputs_3d + .. literalinclude:: inputs_test_3d_uniform_plasma :language: ini - :caption: You can copy this file from ``usage/examples/lwfa/inputs_3d``. + :caption: You can copy this file from ``usage/examples/lwfa/inputs_test_3d_uniform_plasma``. .. tab-item:: 2D @@ -39,15 +39,15 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. note:: - TODO: This input file should be created following the ``inputs_2d`` file. + TODO: This input file should be created following the ``inputs_test_2d_uniform_plasma`` file. .. tab-item:: Executable: Input File - This example can be run **either** as WarpX **executable** using an input file: ``warpx.2d inputs_2d`` + This example can be run **either** as WarpX **executable** using an input file: ``warpx.2d inputs_test_2d_uniform_plasma`` - .. literalinclude:: inputs_2d + .. literalinclude:: inputs_test_2d_uniform_plasma :language: ini - :caption: You can copy this file from ``usage/examples/lwfa/inputs_2d``. + :caption: You can copy this file from ``usage/examples/lwfa/inputs_test_2d_uniform_plasma``. Analyze ------- diff --git a/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py b/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py index f1b3e8d3b28..7bc83a1e801 100755 --- a/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py +++ b/Examples/Tests/collision/inputs_test_2d_collision_xz_picmi.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # # --- Input file for binary Coulomb collision testing. This input script -# --- runs the same test as inputs_2d but via Python, therefore the input +# --- runs the same test as inputs_test_2d_collision_xz but via Python, therefore the input # --- values where directly copied from inputs_2d. from pywarpx import picmi @@ -142,5 +142,5 @@ ##### SIMULATION EXECUTION ###### ################################# -# sim.write_input_file('PICMI_inputs_2d') +# sim.write_input_file('inputs_test_2d_collision_xz') sim.step(max_steps) diff --git a/Examples/Tests/gaussian_beam/README b/Examples/Tests/gaussian_beam/README.rst similarity index 75% rename from Examples/Tests/gaussian_beam/README rename to Examples/Tests/gaussian_beam/README.rst index bfca2bb2398..e3baf9842c2 100644 --- a/Examples/Tests/gaussian_beam/README +++ b/Examples/Tests/gaussian_beam/README.rst @@ -11,7 +11,7 @@ Run This example can be run **either** as: -* **Python** script: ``python3 PICMI_inputs_gaussian_beam.py`` or +* **Python** script: ``python3 inputs_test_3d_gaussian_beam_picmi.py`` or * WarpX **executable** using an input file: (*TODO*) For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. @@ -20,15 +20,15 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. tab-item:: Python: Script - .. literalinclude:: PICMI_inputs_gaussian_beam.py + .. literalinclude:: inputs_test_3d_gaussian_beam_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py``. + :caption: You can copy this file from ``Examples/Tests/gaussian_beam/inputs_test_3d_gaussian_beam_picmi.py``. .. tab-item:: Executable: Input File .. note:: - TODO: This input file should be created following the ``PICMI_inputs_gaussian_beam.py`` file. + TODO: This input file should be created following the ``inputs_test_3d_gaussian_beam_picmi.py`` file. Analyze diff --git a/Examples/Tests/langmuir/README b/Examples/Tests/langmuir/README.rst similarity index 74% rename from Examples/Tests/langmuir/README rename to Examples/Tests/langmuir/README.rst index 60c0018744c..4748f428dc1 100644 --- a/Examples/Tests/langmuir/README +++ b/Examples/Tests/langmuir/README.rst @@ -23,19 +23,19 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. tab-item:: Python: Script - This example can be run as a **Python** script: ``python3 PICMI_inputs_3d.py``. + This example can be run as a **Python** script: ``python3 inputs_test_3d_langmuir_multi_picmi.py``. - .. literalinclude:: PICMI_inputs_3d.py + .. literalinclude:: inputs_test_3d_langmuir_multi_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/langmuir/PICMI_inputs_3d.py``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_3d_langmuir_multi_picmi.py``. .. tab-item:: Executable: Input File - This example can be run as WarpX **executable** using an input file: ``warpx.3d inputs_3d`` + This example can be run as WarpX **executable** using an input file: ``warpx.3d inputs_test_3d_langmuir_multi`` - .. literalinclude:: inputs_3d + .. literalinclude:: inputs_test_3d_langmuir_multi :language: ini - :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_3d``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_3d_langmuir_multi``. .. tab-item:: 2D @@ -43,19 +43,19 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. tab-item:: Python: Script - This example can be run as a **Python** script: ``python3 PICMI_inputs_2d.py``. + This example can be run as a **Python** script: ``python3 inputs_test_2d_langmuir_multi_picmi.py``. - .. literalinclude:: PICMI_inputs_2d.py + .. literalinclude:: inputs_test_2d_langmuir_multi_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/langmuir/PICMI_inputs_2d.py``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_2d_langmuir_multi_picmi.py``. .. tab-item:: Executable: Input File - This example can be run as WarpX **executable** using an input file: ``warpx.2d inputs_2d`` + This example can be run as WarpX **executable** using an input file: ``warpx.2d inputs_test_2d_langmuir_multi`` - .. literalinclude:: inputs_2d + .. literalinclude:: inputs_test_2d_langmuir_multi :language: ini - :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_2d``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_2d_langmuir_multi``. .. tab-item:: RZ @@ -64,19 +64,19 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. tab-item:: Python: Script - This example can be run as a **Python** script: ``python3 PICMI_inputs_rz.py``. + This example can be run as a **Python** script: ``python3 inputs_test_rz_langmuir_multi_picmi.py``. - .. literalinclude:: PICMI_inputs_rz.py + .. literalinclude:: inputs_test_rz_langmuir_multi_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/langmuir/PICMI_inputs_rz.py``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_rz_langmuir_multi_picmi.py``. .. tab-item:: Executable: Input File - This example can be run as WarpX **executable** using an input file: ``warpx.rz inputs_rz`` + This example can be run as WarpX **executable** using an input file: ``warpx.rz inputs_test_rz_langmuir_multi`` - .. literalinclude:: inputs_rz + .. literalinclude:: inputs_test_rz_langmuir_multi :language: ini - :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_rz``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_rz_langmuir_multi``. .. tab-item:: 1D @@ -87,15 +87,15 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. note:: - TODO: This input file should be created, like the ``inputs_1d`` file. + TODO: This input file should be created, like the ``inputs_test_1d_langmuir_multi`` file. .. tab-item:: Executable: Input File - This example can be run as WarpX **executable** using an input file: ``warpx.1d inputs_1d`` + This example can be run as WarpX **executable** using an input file: ``warpx.1d inputs_test_1d_langmuir_multi`` - .. literalinclude:: inputs_1d + .. literalinclude:: inputs_test_1d_langmuir_multi :language: ini - :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_1d``. + :caption: You can copy this file from ``Examples/Tests/langmuir/inputs_test_1d_langmuir_multi``. Analyze diff --git a/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py index 231552d088e..ee24fb667cf 100644 --- a/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py +++ b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_grid_picmi.py @@ -134,5 +134,5 @@ ##### SIMULATION EXECUTION ###### ################################# -# sim.write_input_file('PICMI_inputs_3d') +# sim.write_input_file('inputs_test_3d_load_external_field_grid') sim.step(max_steps) diff --git a/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py index c2ec6c1a5b7..75977a01729 100644 --- a/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py +++ b/Examples/Tests/load_external_field/inputs_test_3d_load_external_field_particle_picmi.py @@ -134,5 +134,5 @@ ##### SIMULATION EXECUTION ###### ################################# -# sim.write_input_file('PICMI_inputs_3d') +# sim.write_input_file('inputs_test_3d_load_external_field_particle') sim.step(max_steps) diff --git a/Examples/Tests/ohm_solver_em_modes/README b/Examples/Tests/ohm_solver_em_modes/README.rst similarity index 85% rename from Examples/Tests/ohm_solver_em_modes/README rename to Examples/Tests/ohm_solver_em_modes/README.rst index 034ee5815f0..24d95d2bcb8 100644 --- a/Examples/Tests/ohm_solver_em_modes/README +++ b/Examples/Tests/ohm_solver_em_modes/README.rst @@ -13,11 +13,11 @@ Run The same input script can be used for 1d, 2d or 3d Cartesian simulations as well as replicating either the parallel propagating or ion-Bernstein modes as indicated below. -.. dropdown:: Script ``PICMI_inputs.py`` +.. dropdown:: Script ``inputs_test_1d_ohm_solver_em_modes_picmi.py`` - .. literalinclude:: PICMI_inputs.py + .. literalinclude:: inputs_test_1d_ohm_solver_em_modes_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py``. + :caption: You can copy this file from ``Examples/Tests/ohm_solver_EM_modes/inputs_test_1d_ohm_solver_em_modes_picmi.py``. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. @@ -29,7 +29,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. code-block:: bash - python3 PICMI_inputs.py -dim {1/2/3} --bdir z + python3 inputs_test_1d_ohm_solver_em_modes_picmi.py -dim {1/2/3} --bdir z .. tab-item:: Perpendicular propagating waves @@ -37,7 +37,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. code-block:: bash - python3 PICMI_inputs.py -dim {1/2/3} --bdir {x/y} + python3 inputs_test_1d_ohm_solver_em_modes_picmi.py -dim {1/2/3} --bdir {x/y} Analyze ------- @@ -82,17 +82,17 @@ Run The following script initializes a thermal plasma in a metallic cylinder with periodic boundaries at the cylinder ends. -.. dropdown:: Script ``PICMI_inputs_rz.py`` +.. dropdown:: Script ``inputs_test_rz_ohm_solver_em_modes_picmi.py`` - .. literalinclude:: PICMI_inputs_rz.py + .. literalinclude:: inputs_test_rz_ohm_solver_em_modes_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py``. + :caption: You can copy this file from ``Examples/Tests/ohm_solver_EM_modes/inputs_test_rz_ohm_solver_em_modes_picmi.py``. The example can be executed using: .. code-block:: bash - python3 PICMI_inputs_rz.py + python3 inputs_test_rz_ohm_solver_em_modes_picmi.py Analyze ------- diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/README b/Examples/Tests/ohm_solver_ion_Landau_damping/README.rst similarity index 84% rename from Examples/Tests/ohm_solver_ion_Landau_damping/README rename to Examples/Tests/ohm_solver_ion_Landau_damping/README.rst index dd4f94b4edf..d54f4e16aa6 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/README +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/README.rst @@ -14,17 +14,17 @@ Run The same input script can be used for 1d, 2d or 3d simulations and to sweep different temperature ratios. -.. dropdown:: Script ``PICMI_inputs.py`` +.. dropdown:: Script ``inputs_test_2d_ohm_solver_landau_damping_picmi.py`` - .. literalinclude:: PICMI_inputs.py + .. literalinclude:: inputs_test_2d_ohm_solver_landau_damping_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/ohm_solver_ion_Landau_damping/PICMI_inputs.py``. + :caption: You can copy this file from ``Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py``. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. .. code-block:: bash - python3 PICMI_inputs.py -dim {1/2/3} --temp_ratio {value} + python3 inputs_test_2d_ohm_solver_landau_damping_picmi.py -dim {1/2/3} --temp_ratio {value} Analyze ------- diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/README b/Examples/Tests/ohm_solver_ion_beam_instability/README.rst similarity index 86% rename from Examples/Tests/ohm_solver_ion_beam_instability/README rename to Examples/Tests/ohm_solver_ion_beam_instability/README.rst index 59469cf4aa9..49b5f1ac0a0 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/README +++ b/Examples/Tests/ohm_solver_ion_beam_instability/README.rst @@ -13,11 +13,11 @@ Run The same input script can be used for 1d, 2d or 3d simulations as well as replicating either the resonant or non-resonant condition as indicated below. -.. dropdown:: Script ``PICMI_inputs.py`` +.. dropdown:: Script ``inputs_test_1d_ohm_solver_ion_beam_picmi.py`` - .. literalinclude:: PICMI_inputs.py + .. literalinclude:: inputs_test_1d_ohm_solver_ion_beam_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/ohm_solver_ion_beam_instability/PICMI_inputs.py``. + :caption: You can copy this file from ``Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py``. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. @@ -29,7 +29,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. code-block:: bash - python3 PICMI_inputs.py -dim {1/2/3} --resonant + python3 inputs_test_1d_ohm_solver_ion_beam_picmi.py -dim {1/2/3} --resonant .. tab-item:: Non-resonant case @@ -37,7 +37,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. code-block:: bash - python3 PICMI_inputs.py -dim {1/2/3} + python3 inputs_test_1d_ohm_solver_ion_beam_picmi.py -dim {1/2/3} Analyze ------- diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/README b/Examples/Tests/ohm_solver_magnetic_reconnection/README.rst similarity index 80% rename from Examples/Tests/ohm_solver_magnetic_reconnection/README rename to Examples/Tests/ohm_solver_magnetic_reconnection/README.rst index 943b5bd0248..5181a6381d8 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/README +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/README.rst @@ -12,11 +12,11 @@ Run The following **Python** script configures and launches the simulation. -.. dropdown:: Script ``PICMI_inputs.py`` +.. dropdown:: Script ``inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py`` - .. literalinclude:: PICMI_inputs.py + .. literalinclude:: inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py :language: python3 - :caption: You can copy this file from ``Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py``. + :caption: You can copy this file from ``Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py``. Running the full simulation should take about 4 hours if executed on 1 V100 GPU. For `MPI-parallel `__ runs, prefix these lines with @@ -24,7 +24,7 @@ For `MPI-parallel `__ runs, prefix these lines with .. code-block:: bash - python3 PICMI_inputs.py + python3 inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py Analyze ------- diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index b80cf4b52aa..3b9d2f12b84 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -5,7 +5,7 @@ The sphere is centered on O and has a radius of 0.2 (EB) The electron is initially at: (0,0,-0.25) and moves with a velocity: (0.5e10,0,1.0e10) with a time step of 1e-11. -An input file PICMI_inputs_rz.py is used. +An input file inputs_test_rz_particle_boundary_interaction_picmi.py is used. """ import os diff --git a/Examples/Tests/pass_mpi_communicator/analysis.py b/Examples/Tests/pass_mpi_communicator/analysis.py index af55346e1f5..041687c4775 100755 --- a/Examples/Tests/pass_mpi_communicator/analysis.py +++ b/Examples/Tests/pass_mpi_communicator/analysis.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # This is a script that analyses the simulation results from -# the script `PICMI_inputs_2d`. +# the script `inputs_test_2d_pass_mpi_comm_picmi.py`. import sys diff --git a/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py b/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py index 200cea7be0f..c87a7a0045c 100755 --- a/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py +++ b/Examples/Tests/pass_mpi_communicator/inputs_test_2d_pass_mpi_comm_picmi.py @@ -126,7 +126,7 @@ # simulation run ########################## -# TODO: Enable in pyAMReX, then enable lines in PICMI_inputs_2d.py again +# TODO: Enable in pyAMReX, then enable lines in inputs_test_2d_pass_mpi_comm_picmi.py again # https://github.com/AMReX-Codes/pyamrex/issues/163 # sim.step(max_steps, mpi_comm=new_comm) @@ -141,7 +141,7 @@ # If any of these tests fail, the terminal will print that the # program crashed. -# TODO: Enable in pyAMReX, then enable lines in PICMI_inputs_2d.py again +# TODO: Enable in pyAMReX, then enable lines in inputs_test_2d_pass_mpi_comm_picmi.py again # https://github.com/AMReX-Codes/pyamrex/issues/163 # comm_world_size = comm_world.size # new_comm_size = new_comm.size diff --git a/Examples/Tests/point_of_contact_eb/analysis.py b/Examples/Tests/point_of_contact_eb/analysis.py index 9fb097f99d4..3f42aa6eeca 100755 --- a/Examples/Tests/point_of_contact_eb/analysis.py +++ b/Examples/Tests/point_of_contact_eb/analysis.py @@ -5,7 +5,7 @@ It compares the numerical results with the analytical solutions. The sphere is centered on O and has a radius of 0.2 (EB) The electron is initially at: (-0.25,0,0) and moves with a normalized momentum: (1,0.5,0) -An input file PICMI_inputs_3d.py is used. +An input file inputs_test_3d_point_of_contact_eb is used. """ import os From 21c9c949baa7347e6d0689c1820d1b4204210504 Mon Sep 17 00:00:00 2001 From: johvandewetering <92386744+johvandewetering@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:45:27 -0700 Subject: [PATCH 28/91] Docs LaTeX fix for sqrt (#5240) Co-authored-by: Johannes Van de Wetering --- Docs/source/theory/collisions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/theory/collisions.rst b/Docs/source/theory/collisions.rst index 52e36521125..ef6b83a699b 100644 --- a/Docs/source/theory/collisions.rst +++ b/Docs/source/theory/collisions.rst @@ -45,7 +45,7 @@ where :math:`u` is the speed of the particle as tracked in WarpX (i.e. :math:`u = \gamma v` with :math:`v` the particle speed), while :math:`m` and :math:`M` are the rest masses of the simulation and background species, respectively. The Lorentz factor is defined in the usual way, -:math:`\gamma \def \sqrt{1 + u^2/c^2}`. Note that if :math:`\gamma\to1` the above +:math:`\gamma \equiv \sqrt{1 + u^2/c^2}`. Note that if :math:`\gamma\to1` the above expression reduces to the classical equation :math:`E_{coll} = \frac{1}{2}\frac{Mm}{M+m} u^2`. The collision cross-sections for all scattering processes are evaluated at the energy as calculated above. From b341d510b9c5084f131cdab156abb8712e604c28 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 10 Sep 2024 11:59:39 -0700 Subject: [PATCH 29/91] Clean up in SpectralFieldData for multi-dimensions (#5244) --- .../SpectralSolver/SpectralFieldData.H | 9 +- .../SpectralSolver/SpectralFieldData.cpp | 143 ++++++------------ 2 files changed, 52 insertions(+), 100 deletions(-) diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index 8ced43ced94..d6c4916bdac 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -176,11 +176,10 @@ class SpectralFieldData ablastr::math::anyfft::FFTplans forward_plan, backward_plan; // Correcting "shift" factors when performing FFT from/to // a cell-centered grid in real space, instead of a nodal grid - SpectralShiftFactor xshift_FFTfromCell, xshift_FFTtoCell, - zshift_FFTfromCell, zshift_FFTtoCell; -#if defined(WARPX_DIM_3D) - SpectralShiftFactor yshift_FFTfromCell, yshift_FFTtoCell; -#endif + // (0,1,2) is the dimension number + SpectralShiftFactor shift0_FFTfromCell, shift0_FFTtoCell, + shift1_FFTfromCell, shift1_FFTtoCell, + shift2_FFTfromCell, shift2_FFTtoCell; bool m_periodic_single_box; }; diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp index 20c97f4b5d4..8e7b9ed9ae4 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp @@ -142,24 +142,21 @@ SpectralFieldData::SpectralFieldData( const int lev, // By default, we assume the FFT is done from/to a nodal grid in real space // If the FFT is performed from/to a cell-centered grid in real space, // a correcting "shift" factor must be applied in spectral space. - xshift_FFTfromCell = k_space.getSpectralShiftFactor(dm, 0, + shift0_FFTfromCell = k_space.getSpectralShiftFactor(dm, 0, ShiftType::TransformFromCellCentered); - xshift_FFTtoCell = k_space.getSpectralShiftFactor(dm, 0, + shift0_FFTtoCell = k_space.getSpectralShiftFactor(dm, 0, ShiftType::TransformToCellCentered); -#if defined(WARPX_DIM_3D) - yshift_FFTfromCell = k_space.getSpectralShiftFactor(dm, 1, +#if AMREX_SPACEDIM > 1 + shift1_FFTfromCell = k_space.getSpectralShiftFactor(dm, 1, ShiftType::TransformFromCellCentered); - yshift_FFTtoCell = k_space.getSpectralShiftFactor(dm, 1, + shift1_FFTtoCell = k_space.getSpectralShiftFactor(dm, 1, ShiftType::TransformToCellCentered); - zshift_FFTfromCell = k_space.getSpectralShiftFactor(dm, 2, +#if AMREX_SPACEDIM > 2 + shift2_FFTfromCell = k_space.getSpectralShiftFactor(dm, 2, ShiftType::TransformFromCellCentered); - zshift_FFTtoCell = k_space.getSpectralShiftFactor(dm, 2, - ShiftType::TransformToCellCentered); -#else - zshift_FFTfromCell = k_space.getSpectralShiftFactor(dm, 1, - ShiftType::TransformFromCellCentered); - zshift_FFTtoCell = k_space.getSpectralShiftFactor(dm, 1, + shift2_FFTtoCell = k_space.getSpectralShiftFactor(dm, 2, ShiftType::TransformToCellCentered); +#endif #endif // Allocate and initialize the FFT plans @@ -221,16 +218,12 @@ SpectralFieldData::ForwardTransform (const int lev, const bool do_costs = WarpXUtilLoadBalance::doCosts(cost, mf.boxArray(), mf.DistributionMap()); // Check field index type, in order to apply proper shift in spectral space -#if (AMREX_SPACEDIM >= 2) - const bool is_nodal_x = mf.is_nodal(0); + const bool is_nodal_0 = mf.is_nodal(0); +#if AMREX_SPACEDIM > 1 + const bool is_nodal_1 = mf.is_nodal(1); +#if AMREX_SPACEDIM > 2 + const bool is_nodal_2 = mf.is_nodal(2); #endif -#if defined(WARPX_DIM_3D) - const bool is_nodal_y = mf.is_nodal(1); - const bool is_nodal_z = mf.is_nodal(2); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const bool is_nodal_z = mf.is_nodal(1); -#elif defined(WARPX_DIM_1D_Z) - const bool is_nodal_z = mf.is_nodal(0); #endif // Loop over boxes @@ -275,13 +268,14 @@ SpectralFieldData::ForwardTransform (const int lev, { const Array4 fields_arr = SpectralFieldData::fields[mfi].array(); const Array4 tmp_arr = tmpSpectralField[mfi].array(); -#if (AMREX_SPACEDIM >= 2) - const Complex* xshift_arr = xshift_FFTfromCell[mfi].dataPtr(); + + const Complex* shift0_arr = shift0_FFTfromCell[mfi].dataPtr(); +#if AMREX_SPACEDIM > 1 + const Complex* shift1_arr = shift1_FFTfromCell[mfi].dataPtr(); +#if AMREX_SPACEDIM > 2 + const Complex* shift2_arr = shift2_FFTfromCell[mfi].dataPtr(); #endif -#if defined(WARPX_DIM_3D) - const Complex* yshift_arr = yshift_FFTfromCell[mfi].dataPtr(); #endif - const Complex* zshift_arr = zshift_FFTfromCell[mfi].dataPtr(); // Loop over indices within one box const Box spectralspace_bx = tmpSpectralField[mfi].box(); @@ -289,16 +283,12 @@ SpectralFieldData::ForwardTransform (const int lev, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { Complex spectral_field_value = tmp_arr(i,j,k); // Apply proper shift in each dimension -#if (AMREX_SPACEDIM >= 2) - if (!is_nodal_x) { spectral_field_value *= xshift_arr[i]; } + if (!is_nodal_0) { spectral_field_value *= shift0_arr[i]; } +#if AMREX_SPACEDIM > 1 + if (!is_nodal_1) { spectral_field_value *= shift1_arr[j]; } +#if AMREX_SPACEDIM > 2 + if (!is_nodal_2) { spectral_field_value *= shift2_arr[k]; } #endif -#if defined(WARPX_DIM_3D) - if (!is_nodal_y) { spectral_field_value *= yshift_arr[j]; } - if (!is_nodal_z) { spectral_field_value *= zshift_arr[k]; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (!is_nodal_z) { spectral_field_value *= zshift_arr[j]; } -#elif defined(WARPX_DIM_1D_Z) - if (!is_nodal_z) { spectral_field_value *= zshift_arr[i]; } #endif // Copy field into the right index fields_arr(i,j,k,field_index) = spectral_field_value; @@ -328,32 +318,9 @@ SpectralFieldData::BackwardTransform (const int lev, const bool do_costs = WarpXUtilLoadBalance::doCosts(cost, mf.boxArray(), mf.DistributionMap()); // Check field index type, in order to apply proper shift in spectral space -#if (AMREX_SPACEDIM >= 2) - const bool is_nodal_x = mf.is_nodal(0); -#endif -#if defined(WARPX_DIM_3D) - const bool is_nodal_y = mf.is_nodal(1); - const bool is_nodal_z = mf.is_nodal(2); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const bool is_nodal_z = mf.is_nodal(1); -#elif defined(WARPX_DIM_1D_Z) - const bool is_nodal_z = mf.is_nodal(0); -#endif - -#if (AMREX_SPACEDIM >= 2) - const int si = (is_nodal_x) ? 1 : 0; -#endif -#if defined(WARPX_DIM_1D_Z) - const int si = (is_nodal_z) ? 1 : 0; - const int sj = 0; - const int sk = 0; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int sj = (is_nodal_z) ? 1 : 0; - const int sk = 0; -#elif defined(WARPX_DIM_3D) - const int sj = (is_nodal_y) ? 1 : 0; - const int sk = (is_nodal_z) ? 1 : 0; -#endif + const bool is_nodal_0 = mf.is_nodal(0); + const bool is_nodal_1 = (AMREX_SPACEDIM > 1 ? mf.is_nodal(1) : 0); + const bool is_nodal_2 = (AMREX_SPACEDIM > 2 ? mf.is_nodal(2) : 0); // Numbers of guard cells const amrex::IntVect& mf_ng = mf.nGrowVect(); @@ -375,13 +342,13 @@ SpectralFieldData::BackwardTransform (const int lev, { const Array4 field_arr = SpectralFieldData::fields[mfi].array(); const Array4 tmp_arr = tmpSpectralField[mfi].array(); -#if (AMREX_SPACEDIM >= 2) - const Complex* xshift_arr = xshift_FFTtoCell[mfi].dataPtr(); + const Complex* shift0_arr = shift0_FFTtoCell[mfi].dataPtr(); +#if AMREX_SPACEDIM > 1 + const Complex* shift1_arr = shift1_FFTtoCell[mfi].dataPtr(); +#if AMREX_SPACEDIM > 2 + const Complex* shift2_arr = shift2_FFTtoCell[mfi].dataPtr(); #endif -#if defined(WARPX_DIM_3D) - const Complex* yshift_arr = yshift_FFTtoCell[mfi].dataPtr(); #endif - const Complex* zshift_arr = zshift_FFTtoCell[mfi].dataPtr(); // Loop over indices within one box const Box spectralspace_bx = tmpSpectralField[mfi].box(); @@ -389,16 +356,12 @@ SpectralFieldData::BackwardTransform (const int lev, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { Complex spectral_field_value = field_arr(i,j,k,field_index); // Apply proper shift in each dimension -#if (AMREX_SPACEDIM >= 2) - if (!is_nodal_x) { spectral_field_value *= xshift_arr[i]; } + if (!is_nodal_0) { spectral_field_value *= shift0_arr[i]; } +#if AMREX_SPACEDIM > 1 + if (!is_nodal_1) { spectral_field_value *= shift1_arr[j]; } +#if AMREX_SPACEDIM > 2 + if (!is_nodal_2) { spectral_field_value *= shift2_arr[k]; } #endif -#if defined(WARPX_DIM_3D) - if (!is_nodal_y) { spectral_field_value *= yshift_arr[j]; } - if (!is_nodal_z) { spectral_field_value *= zshift_arr[k]; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (!is_nodal_z) { spectral_field_value *= zshift_arr[j]; } -#elif defined(WARPX_DIM_1D_Z) - if (!is_nodal_z) { spectral_field_value *= zshift_arr[i]; } #endif // Copy field into temporary array tmp_arr(i,j,k) = spectral_field_value; @@ -419,28 +382,18 @@ SpectralFieldData::BackwardTransform (const int lev, // Total number of cells, including ghost cells (nj represents ny in 3D and nz in 2D) const int ni = mf_box.length(0); -#if defined(WARPX_DIM_1D_Z) - constexpr int nj = 1; - constexpr int nk = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int nj = mf_box.length(1); - constexpr int nk = 1; -#elif defined(WARPX_DIM_3D) - const int nj = mf_box.length(1); - const int nk = mf_box.length(2); -#endif + const int nj = (AMREX_SPACEDIM > 1 ? mf_box.length(1) : 1); + const int nk = (AMREX_SPACEDIM > 2 ? mf_box.length(2) : 1); + + const int si = (is_nodal_0) ? 1 : 0; + const int sj = (is_nodal_1) ? 1 : 0; + const int sk = (is_nodal_2) ? 1 : 0; + // Lower bound of the box (lo_j represents lo_y in 3D and lo_z in 2D) const int lo_i = amrex::lbound(mf_box).x; -#if defined(WARPX_DIM_1D_Z) - constexpr int lo_j = 0; - constexpr int lo_k = 0; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int lo_j = amrex::lbound(mf_box).y; - constexpr int lo_k = 0; -#elif defined(WARPX_DIM_3D) - const int lo_j = amrex::lbound(mf_box).y; - const int lo_k = amrex::lbound(mf_box).z; -#endif + const int lo_j = (AMREX_SPACEDIM > 1 ? amrex::lbound(mf_box).y : 0); + const int lo_k = (AMREX_SPACEDIM > 2 ? amrex::lbound(mf_box).z : 0); + // If necessary, do not fill the guard cells // (shrink box by passing negative number of cells) if (!m_periodic_single_box) From 3459ccfa4b3e50de1843a9aa3db5470c65473c04 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 10 Sep 2024 20:35:57 -0700 Subject: [PATCH 30/91] [Hackathon] Clean up macros in Source/EmbeddedBoundary/WarpXInitEB.cpp (#5237) * Clean up macros in Source/EmbeddedBoundary/WarpXInitEB.cpp * Fix order of loops --- Source/EmbeddedBoundary/WarpXInitEB.cpp | 140 ++++++++++-------------- 1 file changed, 56 insertions(+), 84 deletions(-) diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index b3e6290ad6c..f63e4eb45d3 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -86,6 +86,11 @@ WarpX::InitEB () if (!EB::enabled()) { throw std::runtime_error("InitEB only works when EBs are enabled at runtime"); } + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("EBs only implemented in 2D and 3D"); +#endif + #ifdef AMREX_USE_EB BL_PROFILE("InitEB"); @@ -121,21 +126,20 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeEdgeLengths"); +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeEdgeLengths only implemented in 2D and 3D"); +#endif + auto const &flags = eb_fact.getMultiEBCellFlagFab(); auto const &edge_centroid = eb_fact.getEdgeCent(); + for (int idim = 0; idim < 3; ++idim){ #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - edge_lengths[1]->setVal(0.); -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - for (int idim = 0; idim < 3; ++idim){ - if(idim == 1) { continue; } -#elif defined(WARPX_DIM_3D) - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim){ -#else - WARPX_ABORT_WITH_MESSAGE( - "ComputeEdgeLengths: Only implemented in 2D3V and 3D3V"); + if (idim == 1) { + edge_lengths[1]->setVal(0.); + continue; + } #endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), edge_lengths[idim]->nGrowVect()); amrex::FabType const fab_type = flags[mfi].getType(box); @@ -154,13 +158,10 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed } else { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) int idim_amrex = idim; - if(idim == 2) { idim_amrex = 1; } + if (idim == 2) { idim_amrex = 1; } auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); #elif defined(WARPX_DIM_3D) auto const &edge_cent = edge_centroid[idim]->const_array(mfi); -#else - WARPX_ABORT_WITH_MESSAGE( - "ComputeEdgeLengths: Only implemented in 2D3V and 3D3V"); #endif amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { if (edge_cent(i, j, k) == amrex::Real(-1.0)) { @@ -187,31 +188,26 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeFaceAreas"); +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeFaceAreas only implemented in 2D and 3D"); +#endif + auto const &flags = eb_fact.getMultiEBCellFlagFab(); #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In 2D the volume frac is actually the area frac. auto const &area_frac = eb_fact.getVolFrac(); #elif defined(WARPX_DIM_3D) auto const &area_frac = eb_fact.getAreaFrac(); -#else - WARPX_ABORT_WITH_MESSAGE( - "ComputeFaceAreas: Only implemented in 2D3V and 3D3V"); #endif + for (int idim = 0; idim < 3; ++idim) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - face_areas[0]->setVal(0.); - face_areas[2]->setVal(0.); -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - // In 2D we change the extrema of the for loop so that we only have the case idim=1 - for (int idim = 1; idim < AMREX_SPACEDIM; ++idim) { -#elif defined(WARPX_DIM_3D) - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { -#else - WARPX_ABORT_WITH_MESSAGE( - "ComputeFaceAreas: Only implemented in 2D3V and 3D3V"); + if (idim == 0 || idim == 2) { + face_areas[idim]->setVal(0.); + continue; + } #endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), face_areas[idim]->nGrowVect()); amrex::FabType const fab_type = flags[mfi].getType(box); @@ -231,9 +227,6 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face auto const &face = area_frac.const_array(mfi); #elif defined(WARPX_DIM_3D) auto const &face = area_frac[idim]->const_array(mfi); -#else - WARPX_ABORT_WITH_MESSAGE( - "ComputeFaceAreas: Only implemented in 2D3V and 3D3V"); #endif amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { face_areas_dim(i, j, k) = face(i, j, k); @@ -249,16 +242,15 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt const std::array& cell_size) { BL_PROFILE("ScaleEdges"); - for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleEdges only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim){ #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - for (int idim = 0; idim < 3; ++idim){ - if(idim == 1) { continue; } -#elif defined(WARPX_DIM_3D) - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim){ -#else - WARPX_ABORT_WITH_MESSAGE( - "ScaleEdges: Only implemented in 2D3V and 3D3V"); + if (idim == 1) { continue; } #endif + for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { const amrex::Box& box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), edge_lengths[idim]->nGrowVect() ); auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); @@ -270,38 +262,22 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt } void -WarpX::ScaleAreas(std::array< std::unique_ptr, 3 >& face_areas, - const std::array& cell_size) { +WarpX::ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas, + const std::array& cell_size) { BL_PROFILE("ScaleAreas"); - amrex::Real full_area; +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleAreas only implemented in 2D and 3D"); +#endif - for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { + for (int idim = 0; idim < 3; ++idim) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - // In 2D we change the extrema of the for loop so that we only have the case idim=1 - for (int idim = 1; idim < AMREX_SPACEDIM; ++idim) { -#elif defined(WARPX_DIM_3D) - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { -#else - WARPX_ABORT_WITH_MESSAGE( - "ScaleAreas: Only implemented in 2D3V and 3D3V"); + if (idim == 0 || idim == 2) { continue; } #endif + for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), face_areas[idim]->nGrowVect() ); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - full_area = cell_size[0]*cell_size[2]; -#elif defined(WARPX_DIM_3D) - if (idim == 0) { - full_area = cell_size[1]*cell_size[2]; - } else if (idim == 1) { - full_area = cell_size[0]*cell_size[2]; - } else { - full_area = cell_size[0]*cell_size[1]; - } -#else - WARPX_ABORT_WITH_MESSAGE( - "ScaleAreas: Only implemented in 2D3V and 3D3V"); -#endif + amrex::Real const full_area = cell_size[(idim+1)%3]*cell_size[(idim+2)%3]; auto const &face_areas_dim = face_areas[idim]->array(mfi); amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { @@ -314,22 +290,21 @@ WarpX::ScaleAreas(std::array< std::unique_ptr, 3 >& face_areas, void -WarpX::MarkCells(){ +WarpX::MarkCells () { #ifndef WARPX_DIM_RZ auto const &cell_size = CellSize(maxLevel()); -#ifdef WARPX_DIM_3D - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { -#elif defined(WARPX_DIM_XZ) - m_flag_info_face[maxLevel()][0]->setVal(0.); - m_flag_info_face[maxLevel()][2]->setVal(0.); - m_flag_ext_face[maxLevel()][0]->setVal(0.); - m_flag_ext_face[maxLevel()][2]->setVal(0.); - // In 2D we change the extrema of the for loop so that we only have the case idim=1 - for (int idim = 1; idim < AMREX_SPACEDIM; ++idim) { -#else - WARPX_ABORT_WITH_MESSAGE( - "MarkCells: Only implemented in 2D3V and 3D3V"); +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) + WARPX_ABORT_WITH_MESSAGE("MarkCells only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim) { +#if defined(WARPX_DIM_XZ) + if (idim == 0 || idim == 2) { + m_flag_info_face[maxLevel()][idim]->setVal(0.); + m_flag_ext_face[maxLevel()][idim]->setVal(0.); + continue; + } #endif for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { //amrex::Box const &box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect()); @@ -352,19 +327,16 @@ WarpX::MarkCells(){ // Minimal area for this cell to be stable mod_areas_dim(i, j, k) = S(i, j, k); double S_stab; - if(idim == 0){ + if (idim == 0){ S_stab = 0.5 * std::max({ly(i, j, k) * dz, ly(i, j, k + 1) * dz, lz(i, j, k) * dy, lz(i, j + 1, k) * dy}); - }else if(idim == 1){ + }else if (idim == 1){ #ifdef WARPX_DIM_XZ S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j + 1, k) * dz, lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); #elif defined(WARPX_DIM_3D) S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j, k + 1) * dz, lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); -#else - WARPX_ABORT_WITH_MESSAGE( - "MarkCells: Only implemented in 2D3V and 3D3V"); #endif }else { S_stab = 0.5 * std::max({lx(i, j, k) * dy, lx(i, j + 1, k) * dy, From f04a332720f8cef53e4990a766b8a3c6698ff46f Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Wed, 11 Sep 2024 12:50:24 -0700 Subject: [PATCH 31/91] AMReX_BLProfiler.H should be included, not AMReX_TinyProfiler.H (#5250) --- Source/Initialization/WarpXAMReXInit.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Initialization/WarpXAMReXInit.cpp b/Source/Initialization/WarpXAMReXInit.cpp index dfb127ae055..5009d2def59 100644 --- a/Source/Initialization/WarpXAMReXInit.cpp +++ b/Source/Initialization/WarpXAMReXInit.cpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include From 28cf684016ce86e8e0dedfafa5e2b26553a1815f Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 12 Sep 2024 10:37:15 -0700 Subject: [PATCH 32/91] [Hackathon] Clean dimension macros in particle routines (#5248) * Cleanup of particles * Fixes in Source/Particles/LaserParticleContainer.cpp * Fixes in Source/Particles/Pusher/GetAndSetPosition.H * Revert declaration of SetParticlePosition attributes * Change std::pow to amrex::Math::powi --- Source/Particles/LaserParticleContainer.cpp | 24 ++--- .../Particles/PhysicalParticleContainer.cpp | 88 ++++++------------- Source/Particles/Pusher/GetAndSetPosition.H | 52 +++++------ Source/Particles/Pusher/UpdatePosition.H | 41 ++++----- 4 files changed, 72 insertions(+), 133 deletions(-) diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index e9509a1ef40..d43fb240756 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -413,12 +413,10 @@ LaserParticleContainer::InitData (int lev) #if defined(WARPX_DIM_3D) return {m_u_X[0]*(pos[0]-m_position[0])+m_u_X[1]*(pos[1]-m_position[1])+m_u_X[2]*(pos[2]-m_position[2]), m_u_Y[0]*(pos[0]-m_position[0])+m_u_Y[1]*(pos[1]-m_position[1])+m_u_Y[2]*(pos[2]-m_position[2])}; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) -# if defined(WARPX_DIM_RZ) +#elif defined(WARPX_DIM_RZ) return {pos[0]-m_position[0], 0.0_rt}; -# else +#elif defined(WARPX_DIM_XZ) return {m_u_X[0]*(pos[0]-m_position[0])+m_u_X[2]*(pos[2]-m_position[2]), 0.0_rt}; -# endif #else return {m_u_X[2]*(pos[2]-m_position[2]), 0.0_rt}; #endif @@ -734,13 +732,12 @@ LaserParticleContainer::ComputeSpacing (int lev, Real& Sx, Real& Sy) const Sy = std::min(std::min(dx[0]/(std::abs(m_u_Y[0])+eps), dx[1]/(std::abs(m_u_Y[1])+eps)), dx[2]/(std::abs(m_u_Y[2])+eps)); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) -# if defined(WARPX_DIM_RZ) +#elif defined(WARPX_DIM_RZ) Sx = dx[0]; -# else + Sy = 1.0; +#elif defined(WARPX_DIM_XZ) Sx = std::min(dx[0]/(std::abs(m_u_X[0])+eps), dx[2]/(std::abs(m_u_X[2])+eps)); -# endif Sy = 1.0; #else Sx = 1.0; @@ -750,7 +747,7 @@ LaserParticleContainer::ComputeSpacing (int lev, Real& Sx, Real& Sy) const } void -LaserParticleContainer::ComputeWeightMobility (Real Sx, Real Sy) +LaserParticleContainer::ComputeWeightMobility ([[maybe_unused]] Real Sx, [[maybe_unused]] Real Sy) { // The mobility is the constant of proportionality between the field to // be emitted, and the corresponding velocity that the particles need to have. @@ -760,14 +757,7 @@ LaserParticleContainer::ComputeWeightMobility (Real Sx, Real Sy) m_mobility = eps/m_e_max; m_weight = PhysConst::ep0 / m_mobility; // Multiply by particle spacing -#if defined(WARPX_DIM_3D) - m_weight *= Sx * Sy; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - m_weight *= Sx; - amrex::ignore_unused(Sy); -#else - amrex::ignore_unused(Sx,Sy); -#endif + m_weight *= AMREX_D_TERM(1._rt, * Sx, * Sy); // When running in the boosted-frame, the input parameters (and in particular // the amplitude of the field) are given in the lab-frame. // Therefore, the mobility needs to be modified by a factor WarpX::gamma_boost. diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 1ad43755464..ec483c21bbc 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -178,16 +178,16 @@ namespace pos.x = lo_corner[0] + (iv[0]+r.x)*dx[0]; pos.y = lo_corner[1] + (iv[1]+r.y)*dx[1]; pos.z = lo_corner[2] + (iv[2]+r.z)*dx[2]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) +#elif defined(WARPX_DIM_XZ) pos.x = lo_corner[0] + (iv[0]+r.x)*dx[0]; pos.y = 0.0_rt; -#if defined WARPX_DIM_XZ pos.z = lo_corner[1] + (iv[1]+r.y)*dx[1]; -#elif defined WARPX_DIM_RZ +#elif defined(WARPX_DIM_RZ) // Note that for RZ, r.y will be theta + pos.x = lo_corner[0] + (iv[0]+r.x)*dx[0]; + pos.y = 0.0_rt; pos.z = lo_corner[1] + (iv[1]+r.z)*dx[1]; -#endif -#else +#elif defined(WARPX_DIM_1D_Z) pos.x = 0.0_rt; pos.y = 0.0_rt; pos.z = lo_corner[0] + (iv[0]+r.x)*dx[0]; @@ -222,20 +222,9 @@ namespace #endif ) noexcept { - pa[PIdx::z][ip] = 0._rt; -#if (AMREX_SPACEDIM >= 2) - pa[PIdx::x][ip] = 0._rt; -#endif -#if defined(WARPX_DIM_3D) - pa[PIdx::y][ip] = 0._rt; -#endif - pa[PIdx::w ][ip] = 0._rt; - pa[PIdx::ux][ip] = 0._rt; - pa[PIdx::uy][ip] = 0._rt; - pa[PIdx::uz][ip] = 0._rt; -#ifdef WARPX_DIM_RZ - pa[PIdx::theta][ip] = 0._rt; -#endif + for (int idx=0 ; idx < PIdx::nattribs ; idx++) { + pa[idx][ip] = 0._rt; + } if (do_field_ionization) {pi[ip] = 0;} #ifdef WARPX_QED if (has_quantum_sync) {p_optical_depth_QSR[ip] = 0._rt;} @@ -758,6 +747,7 @@ PhysicalParticleContainer::AddPlasmaFromFile(PlasmaInjector & plasma_injector, const std::shared_ptr ptr_offset_z = ps["positionOffset"]["z"].loadChunk(); auto const position_unit_z = static_cast(ps["position"]["z"].unitSI()); auto const position_offset_unit_z = static_cast(ps["positionOffset"]["z"].unitSI()); + const std::shared_ptr ptr_ux = ps["momentum"]["x"].loadChunk(); auto const momentum_unit_x = static_cast(ps["momentum"]["x"].unitSI()); const std::shared_ptr ptr_uz = ps["momentum"]["z"].loadChunk(); @@ -1264,13 +1254,8 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int Real scale_fac = 0.0_rt; if( pcounts[index] != 0) { -#if defined(WARPX_DIM_3D) - scale_fac = dx[0]*dx[1]*dx[2]/pcounts[index]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - scale_fac = dx[0]*dx[1]/pcounts[index]; -#elif defined(WARPX_DIM_1D_Z) - scale_fac = dx[0]/pcounts[index]; -#endif + amrex::Real const dV = AMREX_D_TERM(dx[0], *dx[1], *dx[2]); + scale_fac = dV/pcounts[index]; } for (int i_part = 0; i_part < pcounts[index]; ++i_part) @@ -1285,29 +1270,15 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int auto pos = getCellCoords(overlap_corner, dx, r, iv); #if defined(WARPX_DIM_3D) - if (!tile_realbox.contains(XDim3{pos.x,pos.y,pos.z})) { - ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi -#ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW -#endif - ); - continue; - } + bool const box_contains = tile_realbox.contains(XDim3{pos.x,pos.y,pos.z}); #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(k); - if (!tile_realbox.contains(XDim3{pos.x,pos.z,0.0_rt})) { - ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi -#ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW -#endif - ); - continue; - } -#else + bool const box_contains = tile_realbox.contains(XDim3{pos.x,pos.z,0.0_rt}); +#elif defined(WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); - if (!tile_realbox.contains(XDim3{pos.z,0.0_rt,0.0_rt})) { + bool const box_contains = tile_realbox.contains(XDim3{pos.z,0.0_rt,0.0_rt}); +#endif + if (!box_contains) { ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED ,loc_has_quantum_sync, p_optical_depth_QSR @@ -1316,7 +1287,6 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int ); continue; } -#endif // Save the x and y values to use in the insideBounds checks. // This is needed with WARPX_DIM_RZ since x and y are modified. @@ -1461,13 +1431,14 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int pa[PIdx::x][ip] = pos.x; pa[PIdx::y][ip] = pos.y; pa[PIdx::z][ip] = pos.z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) -#ifdef WARPX_DIM_RZ +#elif defined(WARPX_DIM_XZ) + pa[PIdx::x][ip] = pos.x; + pa[PIdx::z][ip] = pos.z; +#elif defined(WARPX_DIM_RZ) pa[PIdx::theta][ip] = theta; -#endif pa[PIdx::x][ip] = xb; pa[PIdx::z][ip] = pos.z; -#else +#elif defined(WARPX_DIM_1D_Z) pa[PIdx::z][ip] = pos.z; #endif } @@ -1967,7 +1938,7 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, #elif defined(WARPX_DIM_XZ) pa[PIdx::x][ip] = ppos.x; pa[PIdx::z][ip] = ppos.z; -#else +#elif defined(WARPX_DIM_1D_Z) pa[PIdx::z][ip] = ppos.z; #endif } @@ -2367,13 +2338,7 @@ PhysicalParticleContainer::SplitParticles (int lev) long np_split; if(split_type==0) { - #if defined(WARPX_DIM_3D) - np_split = 8; - #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - np_split = 4; - #else - np_split = 2; - #endif + np_split = amrex::Math::powi(2); } else { np_split = 2*AMREX_SPACEDIM; } @@ -2832,15 +2797,12 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, if (save_previous_position) { #if (AMREX_SPACEDIM >= 2) x_old = pti.GetAttribs(particle_comps["prev_x"]).dataPtr() + offset; -#else - amrex::ignore_unused(x_old); #endif #if defined(WARPX_DIM_3D) y_old = pti.GetAttribs(particle_comps["prev_y"]).dataPtr() + offset; -#else - amrex::ignore_unused(y_old); #endif z_old = pti.GetAttribs(particle_comps["prev_z"]).dataPtr() + offset; + amrex::ignore_unused(x_old, y_old); } // Loop over the particles and update their momentum diff --git a/Source/Particles/Pusher/GetAndSetPosition.H b/Source/Particles/Pusher/GetAndSetPosition.H index b9e7dc91684..ab06fe3d6cd 100644 --- a/Source/Particles/Pusher/GetAndSetPosition.H +++ b/Source/Particles/Pusher/GetAndSetPosition.H @@ -63,25 +63,16 @@ struct GetParticlePosition { using RType = amrex::ParticleReal; -#if defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) - const RType* AMREX_RESTRICT m_x = nullptr; - const RType* AMREX_RESTRICT m_z = nullptr; -#elif defined(WARPX_DIM_3D) const RType* AMREX_RESTRICT m_x = nullptr; const RType* AMREX_RESTRICT m_y = nullptr; const RType* AMREX_RESTRICT m_z = nullptr; -#elif defined(WARPX_DIM_1D_Z) - const RType* AMREX_RESTRICT m_z = nullptr; -#endif #if defined(WARPX_DIM_RZ) const RType* m_theta = nullptr; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - static constexpr RType m_y_default = RType(0.0); -#elif defined(WARPX_DIM_1D_Z) +#endif + static constexpr RType m_x_default = RType(0.0); static constexpr RType m_y_default = RType(0.0); -#endif GetParticlePosition () = default; @@ -118,20 +109,20 @@ struct GetParticlePosition AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator() (const long i, RType& x, RType& y, RType& z) const noexcept { -#ifdef WARPX_DIM_RZ +#if defined(WARPX_DIM_RZ) RType const r = m_x[i]; x = r*std::cos(m_theta[i]); y = r*std::sin(m_theta[i]); z = m_z[i]; -#elif WARPX_DIM_3D +#elif defined(WARPX_DIM_3D) x = m_x[i]; y = m_y[i]; z = m_z[i]; -#elif WARPX_DIM_XZ +#elif defined(WARPX_DIM_XZ) x = m_x[i]; y = m_y_default; z = m_z[i]; -#else +#elif defined(WARPX_DIM_1D_Z) x = m_x_default; y = m_y_default; z = m_z[i]; @@ -146,19 +137,19 @@ struct GetParticlePosition AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void AsStored (const long i, RType& x, RType& y, RType& z) const noexcept { -#ifdef WARPX_DIM_RZ +#if defined(WARPX_DIM_RZ) x = m_x[i]; y = m_theta[i]; z = m_z[i]; -#elif WARPX_DIM_3D +#elif defined(WARPX_DIM_3D) x = m_x[i]; y = m_y[i]; z = m_z[i]; -#elif WARPX_DIM_XZ +#elif defined(WARPX_DIM_XZ) x = m_x[i]; y = m_y_default; z = m_z[i]; -#else +#elif defined(WARPX_DIM_1D_Z) x = m_x_default; y = m_y_default; z = m_z[i]; @@ -178,16 +169,17 @@ struct SetParticlePosition { using RType = amrex::ParticleReal; -#if defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) +#if defined(WARPX_DIM_3D) RType* AMREX_RESTRICT m_x; + RType* AMREX_RESTRICT m_y; RType* AMREX_RESTRICT m_z; -#elif defined(WARPX_DIM_3D) +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) RType* AMREX_RESTRICT m_x; - RType* AMREX_RESTRICT m_y; RType* AMREX_RESTRICT m_z; #elif defined(WARPX_DIM_1D_Z) RType* AMREX_RESTRICT m_z; #endif + #if defined(WARPX_DIM_RZ) RType* AMREX_RESTRICT m_theta; #endif @@ -217,18 +209,18 @@ struct SetParticlePosition void operator() (const long i, RType x, RType y, RType z) const noexcept { amrex::ignore_unused(x,y,z); -#ifdef WARPX_DIM_RZ +#if defined(WARPX_DIM_RZ) m_theta[i] = std::atan2(y, x); m_x[i] = std::sqrt(x*x + y*y); m_z[i] = z; -#elif WARPX_DIM_3D +#elif defined(WARPX_DIM_3D) m_x[i] = x; m_y[i] = y; m_z[i] = z; -#elif WARPX_DIM_XZ +#elif defined(WARPX_DIM_XZ) m_x[i] = x; m_z[i] = z; -#else +#elif defined(WARPX_DIM_1D_Z) m_z[i] = z; #endif } @@ -241,18 +233,18 @@ struct SetParticlePosition void AsStored (const long i, RType x, RType y, RType z) const noexcept { amrex::ignore_unused(x,y,z); -#ifdef WARPX_DIM_RZ +#if defined(WARPX_DIM_RZ) m_x[i] = x; m_theta[i] = y; m_z[i] = z; -#elif WARPX_DIM_3D +#elif defined(WARPX_DIM_3D) m_x[i] = x; m_y[i] = y; m_z[i] = z; -#elif WARPX_DIM_XZ +#elif defined(WARPX_DIM_XZ) m_x[i] = x; m_z[i] = z; -#else +#elif defined(WARPX_DIM_1D_Z) m_z[i] = z; #endif } diff --git a/Source/Particles/Pusher/UpdatePosition.H b/Source/Particles/Pusher/UpdatePosition.H index 89c2de88e47..d11ba6fe21f 100644 --- a/Source/Particles/Pusher/UpdatePosition.H +++ b/Source/Particles/Pusher/UpdatePosition.H @@ -22,7 +22,9 @@ * x^{n+1} - x^{n} = dt*u^{n+1/2}/gamma^{n+1/2} */ AMREX_GPU_HOST_DEVICE AMREX_INLINE -void UpdatePosition(amrex::ParticleReal& x, amrex::ParticleReal& y, amrex::ParticleReal& z, +void UpdatePosition([[maybe_unused]] amrex::ParticleReal& x, + [[maybe_unused]] amrex::ParticleReal& y, + [[maybe_unused]] amrex::ParticleReal& z, const amrex::ParticleReal ux, const amrex::ParticleReal uy, const amrex::ParticleReal uz, const amrex::Real dt ) { @@ -35,13 +37,9 @@ void UpdatePosition(amrex::ParticleReal& x, amrex::ParticleReal& y, amrex::Parti // Update positions over one time step #if (AMREX_SPACEDIM >= 2) x += ux * inv_gamma * dt; -#else - amrex::ignore_unused(x); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) // RZ pushes particles in 3D y += uy * inv_gamma * dt; -#else - amrex::ignore_unused(y); #endif z += uz * inv_gamma * dt; } @@ -53,10 +51,12 @@ void UpdatePosition(amrex::ParticleReal& x, amrex::ParticleReal& y, amrex::Parti * See Eqs. 15 and 17 in Chen, JCP 407 (2020) 109228. */ AMREX_GPU_HOST_DEVICE AMREX_INLINE -void UpdatePositionImplicit(amrex::ParticleReal& x, amrex::ParticleReal& y, amrex::ParticleReal& z, - const amrex::ParticleReal ux_n, const amrex::ParticleReal uy_n, const amrex::ParticleReal uz_n, - const amrex::ParticleReal ux, const amrex::ParticleReal uy, const amrex::ParticleReal uz, - const amrex::Real dt ) +void UpdatePositionImplicit ([[maybe_unused]] amrex::ParticleReal& x, + [[maybe_unused]] amrex::ParticleReal& y, + [[maybe_unused]] amrex::ParticleReal& z, + const amrex::ParticleReal ux_n, const amrex::ParticleReal uy_n, const amrex::ParticleReal uz_n, + const amrex::ParticleReal ux, const amrex::ParticleReal uy, const amrex::ParticleReal uz, + const amrex::Real dt ) { using namespace amrex::literals; @@ -74,13 +74,9 @@ void UpdatePositionImplicit(amrex::ParticleReal& x, amrex::ParticleReal& y, amre // Update positions over one time step #if (AMREX_SPACEDIM >= 2) x += ux * inv_gamma * dt; -#else - amrex::ignore_unused(x); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) // RZ pushes particles in 3D y += uy * inv_gamma * dt; -#else - amrex::ignore_unused(y); #endif z += uz * inv_gamma * dt; } @@ -90,20 +86,19 @@ void UpdatePositionImplicit(amrex::ParticleReal& x, amrex::ParticleReal& y, amre * of the particles for given electric and magnetic fields on the grid. */ AMREX_GPU_HOST_DEVICE AMREX_INLINE -void PositionNorm( amrex::ParticleReal dxp, amrex::ParticleReal dyp, amrex::ParticleReal dzp, - amrex::ParticleReal& dxp_save, amrex::ParticleReal& dyp_save, amrex::ParticleReal& dzp_save, - amrex::ParticleReal idxg2, amrex::ParticleReal idyg2, amrex::ParticleReal idzg2, +void PositionNorm ([[maybe_unused]] amrex::ParticleReal dxp, + [[maybe_unused]] amrex::ParticleReal dyp, + [[maybe_unused]] amrex::ParticleReal dzp, + [[maybe_unused]] amrex::ParticleReal& dxp_save, + [[maybe_unused]] amrex::ParticleReal& dyp_save, + [[maybe_unused]] amrex::ParticleReal& dzp_save, + [[maybe_unused]] amrex::ParticleReal idxg2, + [[maybe_unused]] amrex::ParticleReal idyg2, + [[maybe_unused]] amrex::ParticleReal idzg2, amrex::ParticleReal& step_norm, const int iter ) { using namespace amrex::literals; -#if defined(WARPX_DIM_1D_Z) - amrex::ignore_unused(dxp, dxp_save, idxg2); -#endif -#if !defined(WARPX_DIM_3D) - amrex::ignore_unused(dyp, dyp_save, idyg2); -#endif - if (iter==0) { step_norm = 1.0_prt; } else { step_norm = (dzp - dzp_save)*(dzp - dzp_save)*idzg2; From dcc972c038556954818a6dc074242095a3377e27 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Thu, 12 Sep 2024 16:51:55 -0500 Subject: [PATCH 33/91] Clean up cache of clang sanitizer CI (#5255) --- .github/workflows/cleanup-cache.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cleanup-cache.yml b/.github/workflows/cleanup-cache.yml index 3abe232b879..c71a48cdad8 100644 --- a/.github/workflows/cleanup-cache.yml +++ b/.github/workflows/cleanup-cache.yml @@ -2,7 +2,7 @@ name: CleanUpCache on: workflow_run: - workflows: [🧹 clang-tidy, 🔍 CodeQL, 🐧 CUDA, 🐧 HIP, 🐧 Intel, 🍏 macOS, 🐧 OpenMP] + workflows: [🧴 clang sanitizers, 🧹 clang-tidy, 🔍 CodeQL, 🐧 CUDA, 🐧 HIP, 🐧 Intel, 🍏 macOS, 🐧 OpenMP] types: - completed From 4d4eecd3ef37c95435908687f8eb6d1cd0ba7b60 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:31:10 -0700 Subject: [PATCH 34/91] Added labeling function for WarpX CI tests (#5253) * Added `label_warpx_test` function which labels a test * CI tests, that run longer than 10 seconds, labeled with slow label * Updated documentation. * Clean-up * Added labels to `test_3d_beam_beam_collision` and `test_rz_multiJ_psatd` CI tests * Update Example to `-LE` Co-authored-by: Axel Huebl --- Docs/source/developers/testing.rst | 6 ++++++ Examples/CMakeLists.txt | 16 ++++++++++++++++ .../beam_beam_collision/CMakeLists.txt | 1 + .../laser_acceleration/CMakeLists.txt | 2 ++ Examples/Tests/langmuir/CMakeLists.txt | 1 + .../Tests/nci_psatd_stability/CMakeLists.txt | 1 + .../Tests/nodal_electrostatic/CMakeLists.txt | 1 + .../Tests/ohm_solver_em_modes/CMakeLists.txt | 1 + .../ohm_solver_ion_Landau_damping/CMakeLists.txt | 1 + .../CMakeLists.txt | 1 + Examples/Tests/particles_in_pml/CMakeLists.txt | 1 + 11 files changed, 32 insertions(+) diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 8b85976c6f0..905aeb17a3f 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -81,6 +81,12 @@ For easier debugging, it can be convenient to run the tests on your local machin ctest --test-dir build -E laser_acceleration +* Run only tests not labeled with the ``slow`` label: + + .. code-block:: sh + + ctest --test-dir build -LE slow + Once the execution of CTest is completed, you can find all files associated with each test in its corresponding directory under ``build/bin/``. For example, if you run the single test ``test_3d_laser_acceleration``, you can find all files associated with this test in the directory ``build/bin/test_3d_laser_acceleration/``. diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index 7ebb1465be4..fe1da3d08e6 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -217,6 +217,22 @@ function(add_warpx_test endif() endfunction() +# Add a CTest label to a WarpX test set. +# +# Labeling it here will add the label to the run test, its analysis and its cleanup. +# +# name: unique name of this test +# label: ctest LABELS property value to be added +# +function(label_warpx_test name label) + set(_test_names "${name}.run;${name}.analysis;${name}.cleanup") + foreach(_test_name IN LISTS _test_names) + if(TEST ${_test_name}) + set_property(TEST ${_test_name} APPEND PROPERTY LABELS "${label}") + endif() + endforeach() +endfunction() + # Add tests (alphabetical order) ############################################## # diff --git a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt index 0b34eeff865..09e96f04d7f 100644 --- a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt +++ b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt @@ -10,3 +10,4 @@ add_warpx_test( diags/diag1/ # output OFF # dependency ) +label_warpx_test(test_3d_beam_beam_collision slow) diff --git a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt index 1a09a669a6d..c26b06b380a 100644 --- a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt @@ -30,6 +30,7 @@ add_warpx_test( diags/diag1000001 # output OFF # dependency ) +label_warpx_test(test_1d_laser_acceleration_fluid_boosted slow) add_warpx_test( test_1d_laser_acceleration_picmi # name @@ -140,3 +141,4 @@ add_warpx_test( diags/diag1000010 # output OFF # dependency ) +label_warpx_test(test_rz_laser_acceleration_picmi slow) diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt index 3f44d364276..bd0cea79c7a 100644 --- a/Examples/Tests/langmuir/CMakeLists.txt +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -341,6 +341,7 @@ if(WarpX_FFT) diags/diag1000040 # output OFF # dependency ) + label_warpx_test(test_3d_langmuir_multi_psatd_vay_deposition_nodal slow) endif() add_warpx_test( diff --git a/Examples/Tests/nci_psatd_stability/CMakeLists.txt b/Examples/Tests/nci_psatd_stability/CMakeLists.txt index ed087fc4190..051f81b1784 100644 --- a/Examples/Tests/nci_psatd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_psatd_stability/CMakeLists.txt @@ -203,4 +203,5 @@ if(WarpX_FFT) diags/diag1000050 # output OFF # dependency ) + label_warpx_test(test_rz_multiJ_psatd slow) endif() diff --git a/Examples/Tests/nodal_electrostatic/CMakeLists.txt b/Examples/Tests/nodal_electrostatic/CMakeLists.txt index 915298f15ab..a6b3f5b0102 100644 --- a/Examples/Tests/nodal_electrostatic/CMakeLists.txt +++ b/Examples/Tests/nodal_electrostatic/CMakeLists.txt @@ -10,3 +10,4 @@ add_warpx_test( diags/diag1000010 # output OFF # dependency ) +label_warpx_test(test_3d_nodal_electrostatic_solver slow) diff --git a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt index e689c83a1e4..a08c321d88d 100644 --- a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt @@ -20,3 +20,4 @@ add_warpx_test( diags/diag1000100 # output OFF # dependency ) +label_warpx_test(test_rz_ohm_solver_em_modes_picmi slow) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt index 3b2d0bb794b..501b1ce2ced 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt @@ -10,3 +10,4 @@ add_warpx_test( diags/diag1000100 # output OFF # dependency ) +label_warpx_test(test_2d_ohm_solver_landau_damping_picmi slow) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt index 53a9bbdeada..81c6b0d41fd 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt @@ -10,3 +10,4 @@ add_warpx_test( diags/diag1002500 # output OFF # dependency ) +label_warpx_test(test_1d_ohm_solver_ion_beam_picmi slow) diff --git a/Examples/Tests/particles_in_pml/CMakeLists.txt b/Examples/Tests/particles_in_pml/CMakeLists.txt index c1782dc4d1f..fb539461ec2 100644 --- a/Examples/Tests/particles_in_pml/CMakeLists.txt +++ b/Examples/Tests/particles_in_pml/CMakeLists.txt @@ -40,3 +40,4 @@ add_warpx_test( diags/diag1000200 # output OFF # dependency ) +label_warpx_test(test_3d_particles_in_pml_mr slow) From c9220fbfe93f5e4daa4849a8cf5c17fbb9453a37 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Thu, 12 Sep 2024 18:15:51 -0700 Subject: [PATCH 35/91] Refactor theory in docs and add multiphysics (#5245) - grouped multiphysics docs in a new subfolder - moved QED docs away from developer docs - updated a doxygen function in qed --- Docs/source/developers/developers.rst | 1 - Docs/source/index.rst | 3 +-- Docs/source/theory/boosted_frame.rst | 16 +++++++++++++++- .../theory/{ => boosted_frame}/Input_output.png | Bin .../theory/{ => boosted_frame}/input_output.rst | 2 +- .../theory/{ => multiphysics}/collisions.rst | 6 +++--- Docs/source/theory/multiphysics/ionization.rst | 8 ++++++++ .../{developers => theory/multiphysics}/qed.rst | 9 +++++---- Docs/source/theory/multiphysics_extensions.rst | 13 +++++++++++++ Docs/source/usage/parameters.rst | 8 ++++---- 10 files changed, 50 insertions(+), 16 deletions(-) rename Docs/source/theory/{ => boosted_frame}/Input_output.png (100%) rename Docs/source/theory/{ => boosted_frame}/input_output.rst (99%) rename Docs/source/theory/{ => multiphysics}/collisions.rst (98%) create mode 100644 Docs/source/theory/multiphysics/ionization.rst rename Docs/source/{developers => theory/multiphysics}/qed.rst (86%) create mode 100644 Docs/source/theory/multiphysics_extensions.rst diff --git a/Docs/source/developers/developers.rst b/Docs/source/developers/developers.rst index aa2e6196377..222f665b563 100644 --- a/Docs/source/developers/developers.rst +++ b/Docs/source/developers/developers.rst @@ -15,7 +15,6 @@ Implementation Details initialization diagnostics moving_window - qed portability warning_logger python diff --git a/Docs/source/index.rst b/Docs/source/index.rst index 6216ad52290..9668620976a 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -111,8 +111,7 @@ Theory theory/amr theory/boundary_conditions theory/boosted_frame - theory/input_output - theory/collisions + theory/multiphysics_extensions theory/kinetic_fluid_hybrid_model theory/cold_fluid_model diff --git a/Docs/source/theory/boosted_frame.rst b/Docs/source/theory/boosted_frame.rst index ea1f662bd30..04c30bedc98 100644 --- a/Docs/source/theory/boosted_frame.rst +++ b/Docs/source/theory/boosted_frame.rst @@ -12,7 +12,21 @@ The simulations of plasma accelerators from first principles are extremely compu A first principle simulation of a short driver beam (laser or charged particles) propagating through a plasma that is orders of magnitude longer necessitates a very large number of time steps. Recasting the simulation in a frame of reference that is moving close to the speed of light in the direction of the driver beam leads to simulating a driver beam that appears longer propagating through a plasma that appears shorter than in the laboratory. Thus, this relativistic transformation of space and time reduces the disparity of scales, and thereby the number of time steps to complete the simulation, by orders of magnitude. -Even using a moving window, however, a full PIC simulation of a plasma accelerator can be extraordinarily demanding computationally, as many time steps are needed to resolve the crossing of the short driver beam with the plasma column. As it turns out, choosing an optimal frame of reference that travels close to the speed of light in the direction of the laser or particle beam (as opposed to the usual choice of the laboratory frame) enables speedups by orders of magnitude :cite:p:`bf-Vayprl07,bf-Vaypop2011`. This is a result of the properties of Lorentz contraction and dilation of space and time. In the frame of the laboratory, a very short driver (laser or particle) beam propagates through a much longer plasma column, necessitating millions to tens of millions of time steps for parameters in the range of the BELLA or FACET-II experiments. As sketched in :numref:`fig_Boosted_frame`, in a frame moving with the driver beam in the plasma at velocity :math:`v=\beta c` (where :math:`c` is the speed of light in vacuum), the beam length is now elongated by :math:`\approx(1+\beta)\gamma` while the plasma contracts by :math:`\gamma` (where :math:`\gamma=1/\sqrt{1-\beta^2}` is the relativistic factor associated with the frame velocity). The number of time steps that is needed to simulate a “longer” beam through a “shorter” plasma is now reduced by up to :math:`\approx(1+\beta) \gamma^2` (a detailed derivation of the speedup is given below). +Even using a moving window, however, a full PIC simulation of a plasma accelerator can be extraordinarily demanding computationally, as many time steps are needed to resolve the crossing of the short driver beam with the plasma column. +As it turns out, choosing an optimal frame of reference that travels close to the speed of light in the direction of the laser or particle beam (as opposed to the usual choice of the laboratory frame) enables speedups by orders of magnitude :cite:p:`bf-Vayprl07,bf-Vaypop2011`. +This is a result of the properties of Lorentz contraction and dilation of space and time. +In the frame of the laboratory, a very short driver (laser or particle) beam propagates through a much longer plasma column, necessitating millions to tens of millions of time steps for parameters in the range of the BELLA or FACET-II experiments. +As sketched in :numref:`fig_Boosted_frame`, in a frame moving with the driver beam in the plasma at velocity :math:`v=\beta c` (where :math:`c` is the speed of light in vacuum), the beam length is now elongated by :math:`\approx(1+\beta)\gamma` while the plasma contracts by :math:`\gamma` (where :math:`\gamma=1/\sqrt{1-\beta^2}` is the relativistic factor associated with the frame velocity) +The number of time steps that is needed to simulate a “longer” beam through a “shorter” plasma is now reduced by up to :math:`\approx(1+\beta) \gamma^2` (a detailed derivation of the speedup is given below). + +.. note:: + + For additional reading on inputs and outputs in boosted frame simulations, consider the following pages: + + .. toctree:: + :maxdepth: 1 + + boosted_frame/input_output The modeling of a plasma acceleration stage in a boosted frame involves the fully electromagnetic modeling of a plasma propagating at near the speed of light, for which Numerical Cerenkov diff --git a/Docs/source/theory/Input_output.png b/Docs/source/theory/boosted_frame/Input_output.png similarity index 100% rename from Docs/source/theory/Input_output.png rename to Docs/source/theory/boosted_frame/Input_output.png diff --git a/Docs/source/theory/input_output.rst b/Docs/source/theory/boosted_frame/input_output.rst similarity index 99% rename from Docs/source/theory/input_output.rst rename to Docs/source/theory/boosted_frame/input_output.rst index 21a5f5c8d2c..f47915116df 100644 --- a/Docs/source/theory/input_output.rst +++ b/Docs/source/theory/boosted_frame/input_output.rst @@ -1,4 +1,4 @@ -.. _theory-io: +.. _boosted_frame-io: Inputs and Outputs ================== diff --git a/Docs/source/theory/collisions.rst b/Docs/source/theory/multiphysics/collisions.rst similarity index 98% rename from Docs/source/theory/collisions.rst rename to Docs/source/theory/multiphysics/collisions.rst index ef6b83a699b..08485345a13 100644 --- a/Docs/source/theory/collisions.rst +++ b/Docs/source/theory/multiphysics/collisions.rst @@ -1,4 +1,4 @@ -.. _theory-collisions: +.. _multiphysics-collisions: Collisions ========== @@ -8,7 +8,7 @@ including collisions between kinetic particles (Coulomb collisions, DSMC, nuclear fusion) as well as collisions between kinetic particles and a fixed (i.e. non-evolving) background species (MCC, background stopping). -.. _theory-collisions-mcc: +.. _multiphysics-collisions-mcc: Background Monte Carlo Collisions (MCC) --------------------------------------- @@ -52,7 +52,7 @@ for all scattering processes are evaluated at the energy as calculated above. Once a particle is selected for a specific collision process, that process determines how the particle is scattered as outlined below. -.. _theory-collisions-dsmc: +.. _multiphysics-collisions-dsmc: Direct Simulation Monte Carlo (DSMC) ------------------------------------ diff --git a/Docs/source/theory/multiphysics/ionization.rst b/Docs/source/theory/multiphysics/ionization.rst new file mode 100644 index 00000000000..d93781603d9 --- /dev/null +++ b/Docs/source/theory/multiphysics/ionization.rst @@ -0,0 +1,8 @@ +.. _multiphysics-ionization: + +Ionization +========== + +.. note:: + + This section will be added soon! diff --git a/Docs/source/developers/qed.rst b/Docs/source/theory/multiphysics/qed.rst similarity index 86% rename from Docs/source/developers/qed.rst rename to Docs/source/theory/multiphysics/qed.rst index f509d6ea386..3e961bb2898 100644 --- a/Docs/source/developers/qed.rst +++ b/Docs/source/theory/multiphysics/qed.rst @@ -1,7 +1,7 @@ -.. _developers-qed: +.. _multiphysics-qed: -QED -==================== +Quantum Electrodynamics (QED) +============================= Quantum synchrotron ------------------- @@ -28,7 +28,8 @@ electron-positron pairs can be created in vacuum in the function ``MultiParticleContainer::doQEDSchwinger`` in turn calls the function ``filterCreateTransformFromFAB``: -.. doxygenfunction:: filterCreateTransformFromFAB(DstTile&, DstTile&, const amrex::Box, const FABs&, const Index, const Index, FilterFunc&&, CreateFunc1&&, CreateFunc2&&, TransFunc&&) +Filter Create Transform Function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``filterCreateTransformFromFAB`` proceeds in three steps. In the filter phase, we loop on every cell and calculate the number of physical pairs created within diff --git a/Docs/source/theory/multiphysics_extensions.rst b/Docs/source/theory/multiphysics_extensions.rst new file mode 100644 index 00000000000..6c9ab040ef2 --- /dev/null +++ b/Docs/source/theory/multiphysics_extensions.rst @@ -0,0 +1,13 @@ +.. _theory-multiphysics: + +Multi-Physics Extensions +======================== + +WarpX includes various extensions to the traditional PIC loop which enable it to model additional physics. + +.. toctree:: + :maxdepth: 1 + + multiphysics/collisions + multiphysics/ionization + multiphysics/qed diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 1d9c0c14bbf..86ab7594c5f 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -821,7 +821,7 @@ Particle initialization * ``particles.rigid_injected_species`` (`strings`, separated by spaces) List of species injected using the rigid injection method. The rigid injection method is useful when injecting a relativistic particle beam in boosted-frame - simulations; see the :ref:`input-output section ` for more details. + simulations; see the :ref:`input-output section ` for more details. For species injected using this method, particles are translated along the `+z` axis with constant velocity as long as their ``z`` coordinate verifies ``zzinject_plane``, @@ -1953,7 +1953,7 @@ Collision models ---------------- WarpX provides several particle collision models, using varying degrees of approximation. -Details about the collision models can be found in the :ref:`theory section `. +Details about the collision models can be found in the :ref:`theory section `. * ``collisions.collision_names`` (`strings`, separated by spaces) The name of each collision type. @@ -1976,10 +1976,10 @@ Details about the collision models can be found in the :ref:`theory section .species_type = 'deuterium'``) - ``dsmc`` for pair-wise, non-Coulomb collisions between kinetic species. This is a "direct simulation Monte Carlo" treatment of collisions between - kinetic species. See :ref:`DSMC section `. + kinetic species. See :ref:`DSMC section `. - ``background_mcc`` for collisions between particles and a neutral background. This is a relativistic Monte Carlo treatment for particles colliding - with a neutral background gas. See :ref:`MCC section `. + with a neutral background gas. See :ref:`MCC section `. - ``background_stopping`` for slowing of ions due to collisions with electrons or ions. This implements the approximate formulae as derived in Introduction to Plasma Physics, from Goldston and Rutherford, section 14.2. From 73e1f84e6800296ff2f329c135b4ca3939ea4f29 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 12 Sep 2024 19:36:11 -0700 Subject: [PATCH 36/91] CTest: more docs on `-R` regular expression filtering (#5257) * CTest: more docs on `-R` regular expression filtering * Simplify * Regex101 Lijnk * Rephrase * Rephrase Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --------- Co-authored-by: Axel Huebl --- Docs/source/developers/testing.rst | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 905aeb17a3f..5bbc7d0fef4 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -81,6 +81,18 @@ For easier debugging, it can be convenient to run the tests on your local machin ctest --test-dir build -E laser_acceleration +* Sometimes two or more tests share a large number of input parameters and differ by a small set of options. + Such tests typically also share a base string in their names. + For example, you can find three different tests named ``test_3d_langmuir_multi``, ``test_3d_langmuir_multi_nodal`` and ``test_3d_langmuir_multi_picmi``. + In such a case, if you wish to run the test ``test_3d_langmuir_multi`` only, this can be done again with the ``-R`` regular `expression filter `__ via + + .. code-block:: sh + + ctest --test-dir build -R "test_3d_langmuir_multi\..*" + + Note that filtering with ``-R "test_3d_langmuir_multi"`` would include the additional tests that have the same substring in their name and would not be sufficient to isolate a single test. + Note also that the escaping ``\.`` in the regular expression is necessary in order to take into account the fact that each test is automatically appended with the strings ``.run``, ``.analysis`` and possibly ``.cleanup``. + * Run only tests not labeled with the ``slow`` label: .. code-block:: sh @@ -161,7 +173,7 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as If you need a new Python package dependency for testing, please add it in `Regression/requirements.txt `__. -Sometimes, two tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. +Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. Naming conventions for automated tests -------------------------------------- From 32737be55ae62380329ecf7b25c0368d4ab36d17 Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 12 Sep 2024 19:54:49 -0700 Subject: [PATCH 37/91] Cleanup stencils for filtering (#4985) * Clean up the FDTD stencils, simplifying the dimension macros * Small fix * Further clean up of macros * Clean up of macros in BilinearFilter * Fix in 2D for NCIGodfreyFilter * Removed AMREX_SPACEDIM * Some clean up * Bug fix * Clean up of DoFilter for GPU * Revert previous commit * Change (x,y,z) to (0,1,2) to be more general --- Source/Filter/BilinearFilter.cpp | 33 +++---- Source/Filter/Filter.H | 22 +++-- Source/Filter/Filter.cpp | 138 +++++++++++++---------------- Source/Filter/NCIGodfreyFilter.cpp | 19 ++-- 4 files changed, 93 insertions(+), 119 deletions(-) diff --git a/Source/Filter/BilinearFilter.cpp b/Source/Filter/BilinearFilter.cpp index a095bce6ae3..66976045943 100644 --- a/Source/Filter/BilinearFilter.cpp +++ b/Source/Filter/BilinearFilter.cpp @@ -64,34 +64,25 @@ void BilinearFilter::ComputeStencils(){ WARPX_PROFILE("BilinearFilter::ComputeStencils()"); int i = 0; for (const auto& el : npass_each_dir ) { - stencil_length_each_dir[i++] = static_cast(el); + stencil_length_each_dir[i++] = static_cast(el) + 1; } - stencil_length_each_dir += 1.; + + m_stencil_0.resize( 1u + npass_each_dir[0] ); + compute_stencil(m_stencil_0, npass_each_dir[0]); +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) || defined(WARPX_DIM_3D) + m_stencil_1.resize( 1u + npass_each_dir[1] ); + compute_stencil(m_stencil_1, npass_each_dir[1]); +#endif #if defined(WARPX_DIM_3D) - // npass_each_dir = npass_x npass_y npass_z - stencil_x.resize( 1u + npass_each_dir[0] ); - stencil_y.resize( 1u + npass_each_dir[1] ); - stencil_z.resize( 1u + npass_each_dir[2] ); - compute_stencil(stencil_x, npass_each_dir[0]); - compute_stencil(stencil_y, npass_each_dir[1]); - compute_stencil(stencil_z, npass_each_dir[2]); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - // npass_each_dir = npass_x npass_z - stencil_x.resize( 1u + npass_each_dir[0] ); - stencil_z.resize( 1u + npass_each_dir[1] ); - compute_stencil(stencil_x, npass_each_dir[0]); - compute_stencil(stencil_z, npass_each_dir[1]); -#elif defined(WARPX_DIM_1D_Z) - // npass_each_dir = npass_z - stencil_z.resize( 1u + npass_each_dir[0] ); - compute_stencil(stencil_z, npass_each_dir[0]); + m_stencil_2.resize( 1u + npass_each_dir[2] ); + compute_stencil(m_stencil_2, npass_each_dir[2]); #endif + slen = stencil_length_each_dir.dim3(); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) +#if defined(WARPX_DIM_1D_Z) || defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) slen.z = 1; #endif #if defined(WARPX_DIM_1D_Z) slen.y = 1; - slen.z = 1; #endif } diff --git a/Source/Filter/Filter.H b/Source/Filter/Filter.H index 5c648a75c4c..584f6b151d7 100644 --- a/Source/Filter/Filter.H +++ b/Source/Filter/Filter.H @@ -21,9 +21,9 @@ public: // Apply stencil on MultiFab. // Guard cells are handled inside this function - void ApplyStencil(amrex::MultiFab& dstmf, - const amrex::MultiFab& srcmf, int lev, int scomp=0, - int dcomp=0, int ncomp=10000); + void ApplyStencil (amrex::MultiFab& dstmf, + const amrex::MultiFab& srcmf, int lev, int scomp=0, + int dcomp=0, int ncomp=10000); // Apply stencil on a FabArray. void ApplyStencil (amrex::FArrayBox& dstfab, @@ -31,20 +31,18 @@ public: int scomp=0, int dcomp=0, int ncomp=10000); // public for cuda - void DoFilter(const amrex::Box& tbx, - amrex::Array4 const& tmp, - amrex::Array4 const& dst, - int scomp, int dcomp, int ncomp); + void DoFilter (const amrex::Box& tbx, + amrex::Array4 const& tmp, + amrex::Array4 const& dst, + int scomp, int dcomp, int ncomp); - // In 2D, stencil_length_each_dir = {length(stencil_x), length(stencil_z)} + // Length of stencil in each included direction amrex::IntVect stencil_length_each_dir; protected: // Stencil along each direction. - // in 2D, stencil_y is not initialized. - amrex::Gpu::DeviceVector stencil_x, stencil_y, stencil_z; - // Length of each stencil. - // In 2D, slen = {length(stencil_x), length(stencil_z), 1} + amrex::Gpu::DeviceVector m_stencil_0, m_stencil_1, m_stencil_2; + // Length of each stencil, 1 for dimensions not included amrex::Dim3 slen; private: diff --git a/Source/Filter/Filter.cpp b/Source/Filter/Filter.cpp index 6243ce4ebbf..40dfc54f8cb 100644 --- a/Source/Filter/Filter.cpp +++ b/Source/Filter/Filter.cpp @@ -87,23 +87,21 @@ Filter::ApplyStencil (FArrayBox& dstfab, const FArrayBox& srcfab, DoFilter(tbx, src, dst, scomp, dcomp, ncomp); } -/* \brief Apply stencil (2D/3D, CPU/GPU) +/* \brief Apply stencil (CPU/GPU) */ void Filter::DoFilter (const Box& tbx, Array4 const& src, Array4 const& dst, int scomp, int dcomp, int ncomp) { -#if (AMREX_SPACEDIM >= 2) - amrex::Real const* AMREX_RESTRICT sx = stencil_x.data(); -#endif -#if defined(WARPX_DIM_3D) - amrex::Real const* AMREX_RESTRICT sy = stencil_y.data(); -#endif - amrex::Real const* AMREX_RESTRICT sz = stencil_z.data(); + AMREX_D_TERM( + amrex::Real const* AMREX_RESTRICT s0 = m_stencil_0.data();, + amrex::Real const* AMREX_RESTRICT s1 = m_stencil_1.data();, + amrex::Real const* AMREX_RESTRICT s2 = m_stencil_2.data(); + ) Dim3 slen_local = slen; -#if defined(WARPX_DIM_3D) +#if AMREX_SPACEDIM == 3 AMREX_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n, { Real d = 0.0; @@ -115,25 +113,25 @@ void Filter::DoFilter (const Box& tbx, return src.contains(jj,kk,ll) ? src(jj,kk,ll,nn) : 0.0_rt; }; - for (int iz=0; iz < slen_local.z; ++iz){ - for (int iy=0; iy < slen_local.y; ++iy){ - for (int ix=0; ix < slen_local.x; ++ix){ - Real sss = sx[ix]*sy[iy]*sz[iz]; - d += sss*( src_zeropad(i-ix,j-iy,k-iz,scomp+n) - +src_zeropad(i+ix,j-iy,k-iz,scomp+n) - +src_zeropad(i-ix,j+iy,k-iz,scomp+n) - +src_zeropad(i+ix,j+iy,k-iz,scomp+n) - +src_zeropad(i-ix,j-iy,k+iz,scomp+n) - +src_zeropad(i+ix,j-iy,k+iz,scomp+n) - +src_zeropad(i-ix,j+iy,k+iz,scomp+n) - +src_zeropad(i+ix,j+iy,k+iz,scomp+n)); + for (int i2=0; i2 < slen_local.z; ++i2){ + for (int i1=0; i1 < slen_local.y; ++i1){ + for (int i0=0; i0 < slen_local.x; ++i0){ + Real sss = s0[i0]*s1[i1]*s2[i2]; + d += sss*( src_zeropad(i-i0,j-i1,k-i2,scomp+n) + +src_zeropad(i+i0,j-i1,k-i2,scomp+n) + +src_zeropad(i-i0,j+i1,k-i2,scomp+n) + +src_zeropad(i+i0,j+i1,k-i2,scomp+n) + +src_zeropad(i-i0,j-i1,k+i2,scomp+n) + +src_zeropad(i+i0,j-i1,k+i2,scomp+n) + +src_zeropad(i-i0,j+i1,k+i2,scomp+n) + +src_zeropad(i+i0,j+i1,k+i2,scomp+n)); } } } dst(i,j,k,dcomp+n) = d; }); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) +#elif AMREX_SPACEDIM == 2 AMREX_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n, { Real d = 0.0; @@ -145,21 +143,21 @@ void Filter::DoFilter (const Box& tbx, return src.contains(jj,kk,ll) ? src(jj,kk,ll,nn) : 0.0_rt; }; - for (int iz=0; iz < slen_local.z; ++iz){ - for (int iy=0; iy < slen_local.y; ++iy){ - for (int ix=0; ix < slen_local.x; ++ix){ - Real sss = sx[ix]*sz[iy]; - d += sss*( src_zeropad(i-ix,j-iy,k,scomp+n) - +src_zeropad(i+ix,j-iy,k,scomp+n) - +src_zeropad(i-ix,j+iy,k,scomp+n) - +src_zeropad(i+ix,j+iy,k,scomp+n)); + for (int i2=0; i2 < slen_local.z; ++i2){ + for (int i1=0; i1 < slen_local.y; ++i1){ + for (int i0=0; i0 < slen_local.x; ++i0){ + Real sss = s0[i0]*s1[i1]; + d += sss*( src_zeropad(i-i0,j-i1,k,scomp+n) + +src_zeropad(i+i0,j-i1,k,scomp+n) + +src_zeropad(i-i0,j+i1,k,scomp+n) + +src_zeropad(i+i0,j+i1,k,scomp+n)); } } } dst(i,j,k,dcomp+n) = d; }); -#elif defined(WARPX_DIM_1D_Z) +#elif AMREX_SPACEDIM == 1 AMREX_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n, { Real d = 0.0; @@ -171,21 +169,18 @@ void Filter::DoFilter (const Box& tbx, return src.contains(jj,kk,ll) ? src(jj,kk,ll,nn) : 0.0_rt; }; - for (int iz=0; iz < slen_local.z; ++iz){ - for (int iy=0; iy < slen_local.y; ++iy){ - for (int ix=0; ix < slen_local.x; ++ix){ - Real sss = sz[iy]; - d += sss*( src_zeropad(i-ix,j,k,scomp+n) - +src_zeropad(i+ix,j,k,scomp+n)); + for (int i2=0; i2 < slen_local.z; ++i2){ + for (int i1=0; i1 < slen_local.y; ++i1){ + for (int i0=0; i0 < slen_local.x; ++i0){ + Real sss = s0[i0]; + d += sss*( src_zeropad(i-i0,j,k,scomp+n) + +src_zeropad(i+i0,j,k,scomp+n)); } } } dst(i,j,k,dcomp+n) = d; }); -#else - WARPX_ABORT_WITH_MESSAGE( - "Filter not implemented for the current geometry!"); #endif } @@ -278,13 +273,11 @@ void Filter::DoFilter (const Box& tbx, const auto lo = amrex::lbound(tbx); const auto hi = amrex::ubound(tbx); // tmp and dst are of type Array4 (Fortran ordering) -#if (AMREX_SPACEDIM >= 2) - amrex::Real const* AMREX_RESTRICT sx = stencil_x.data(); -#endif -#if defined(WARPX_DIM_3D) - amrex::Real const* AMREX_RESTRICT sy = stencil_y.data(); -#endif - amrex::Real const* AMREX_RESTRICT sz = stencil_z.data(); + AMREX_D_TERM( + amrex::Real const* AMREX_RESTRICT s0 = m_stencil_0.data();, + amrex::Real const* AMREX_RESTRICT s1 = m_stencil_1.data();, + amrex::Real const* AMREX_RESTRICT s2 = m_stencil_2.data(); + ) for (int n = 0; n < ncomp; ++n) { // Set dst value to 0. for (int k = lo.z; k <= hi.z; ++k) { @@ -295,41 +288,32 @@ void Filter::DoFilter (const Box& tbx, } } // 3 nested loop on 3D stencil - for (int iz=0; iz < slen.z; ++iz){ - for (int iy=0; iy < slen.y; ++iy){ - for (int ix=0; ix < slen.x; ++ix){ -#if defined(WARPX_DIM_3D) - const Real sss = sx[ix]*sy[iy]*sz[iz]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const Real sss = sx[ix]*sz[iy]; -#else - const Real sss = sz[ix]; -#endif + for (int i2=0; i2 < slen.z; ++i2){ + for (int i1=0; i1 < slen.y; ++i1){ + for (int i0=0; i0 < slen.x; ++i0){ + const Real sss = AMREX_D_TERM(s0[i0], *s1[i1], *s2[i2]); // 3 nested loop on 3D array for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { AMREX_PRAGMA_SIMD for (int i = lo.x; i <= hi.x; ++i) { -#if defined(WARPX_DIM_3D) - dst(i,j,k,dcomp+n) += sss*(tmp(i-ix,j-iy,k-iz,scomp+n) - +tmp(i+ix,j-iy,k-iz,scomp+n) - +tmp(i-ix,j+iy,k-iz,scomp+n) - +tmp(i+ix,j+iy,k-iz,scomp+n) - +tmp(i-ix,j-iy,k+iz,scomp+n) - +tmp(i+ix,j-iy,k+iz,scomp+n) - +tmp(i-ix,j+iy,k+iz,scomp+n) - +tmp(i+ix,j+iy,k+iz,scomp+n)); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - dst(i,j,k,dcomp+n) += sss*(tmp(i-ix,j-iy,k,scomp+n) - +tmp(i+ix,j-iy,k,scomp+n) - +tmp(i-ix,j+iy,k,scomp+n) - +tmp(i+ix,j+iy,k,scomp+n)); -#elif defined(WARPX_DIM_1D_Z) - dst(i,j,k,dcomp+n) += sss*(tmp(i-ix,j,k,scomp+n) - +tmp(i+ix,j,k,scomp+n)); -#else - WARPX_ABORT_WITH_MESSAGE( - "Filter not implemented for the current geometry!"); +#if AMREX_SPACEDIM == 3 + dst(i,j,k,dcomp+n) += sss*(tmp(i-i0,j-i1,k-i2,scomp+n) + +tmp(i+i0,j-i1,k-i2,scomp+n) + +tmp(i-i0,j+i1,k-i2,scomp+n) + +tmp(i+i0,j+i1,k-i2,scomp+n) + +tmp(i-i0,j-i1,k+i2,scomp+n) + +tmp(i+i0,j-i1,k+i2,scomp+n) + +tmp(i-i0,j+i1,k+i2,scomp+n) + +tmp(i+i0,j+i1,k+i2,scomp+n)); +#elif AMREX_SPACEDIM == 2 + dst(i,j,k,dcomp+n) += sss*(tmp(i-i0,j-i1,k,scomp+n) + +tmp(i+i0,j-i1,k,scomp+n) + +tmp(i-i0,j+i1,k,scomp+n) + +tmp(i+i0,j+i1,k,scomp+n)); +#elif AMREX_SPACEDIM == 1 + dst(i,j,k,dcomp+n) += sss*(tmp(i-i0,j,k,scomp+n) + +tmp(i+i0,j,k,scomp+n)); #endif } } diff --git a/Source/Filter/NCIGodfreyFilter.cpp b/Source/Filter/NCIGodfreyFilter.cpp index 9567bdf1bb2..a73efb0ec64 100644 --- a/Source/Filter/NCIGodfreyFilter.cpp +++ b/Source/Filter/NCIGodfreyFilter.cpp @@ -121,17 +121,18 @@ void NCIGodfreyFilter::ComputeStencils() # endif h_stencil_z[0] /= 2._rt; - stencil_x.resize(h_stencil_x.size()); + m_stencil_0.resize(h_stencil_x.size()); + Gpu::copyAsync(Gpu::hostToDevice,h_stencil_x.begin(),h_stencil_x.end(),m_stencil_0.begin()); # if defined(WARPX_DIM_3D) - stencil_y.resize(h_stencil_y.size()); + m_stencil_1.resize(h_stencil_y.size()); + m_stencil_2.resize(h_stencil_z.size()); + Gpu::copyAsync(Gpu::hostToDevice,h_stencil_y.begin(),h_stencil_y.end(),m_stencil_1.begin()); + Gpu::copyAsync(Gpu::hostToDevice,h_stencil_z.begin(),h_stencil_z.end(),m_stencil_2.begin()); +# elif (AMREX_SPACEDIM == 2) + // In 2D, the filter applies stencil_1 to the 2nd dimension + m_stencil_1.resize(h_stencil_z.size()); + Gpu::copyAsync(Gpu::hostToDevice,h_stencil_z.begin(),h_stencil_z.end(),m_stencil_1.begin()); # endif - stencil_z.resize(h_stencil_z.size()); - - Gpu::copyAsync(Gpu::hostToDevice,h_stencil_x.begin(),h_stencil_x.end(),stencil_x.begin()); -# if defined(WARPX_DIM_3D) - Gpu::copyAsync(Gpu::hostToDevice,h_stencil_y.begin(),h_stencil_y.end(),stencil_y.begin()); -# endif - Gpu::copyAsync(Gpu::hostToDevice,h_stencil_z.begin(),h_stencil_z.end(),stencil_z.begin()); Gpu::synchronize(); } From d2e58577905e66cb955d95cba4de4f1e19f899ac Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Fri, 13 Sep 2024 11:51:50 -0500 Subject: [PATCH 38/91] Use AMREX_ENUM for algorithm selection (#5252) AMREX_ENUM is strongly typed enum (i.e., C++ scoped enum). It also supports reflection (e.g., converting from enumerator to string and vice versa). This provides more safety checks at compile time and simplifies the algorithm selection code by avoiding duplication of the mapping between enumerator and string. --- .github/workflows/cuda.yml | 2 +- Source/BoundaryConditions/PML.H | 3 +- Source/BoundaryConditions/PML.cpp | 3 +- .../WarpXFieldBoundaries.cpp | 8 +- Source/BoundaryConditions/WarpX_PEC.H | 28 +- Source/BoundaryConditions/WarpX_PEC.cpp | 28 +- Source/Diagnostics/ParticleIO.cpp | 2 +- .../Diagnostics/ReducedDiags/FieldReduction.H | 2 +- .../ReducedDiags/FieldReduction.cpp | 6 +- Source/FieldSolver/ElectrostaticSolver.cpp | 4 +- .../ApplySilverMuellerBoundary.cpp | 4 +- .../FiniteDifferenceSolver.H | 8 +- .../FiniteDifferenceSolver.cpp | 2 +- .../MagnetostaticSolver.cpp | 4 +- .../PsatdAlgorithmFirstOrder.H | 8 +- .../PsatdAlgorithmFirstOrder.cpp | 4 +- .../SpectralAlgorithms/PsatdAlgorithmRZ.H | 6 +- .../SpectralAlgorithms/PsatdAlgorithmRZ.cpp | 4 +- .../SpectralSolver/SpectralFieldData.H | 5 +- .../SpectralSolver/SpectralFieldData.cpp | 4 +- .../SpectralSolver/SpectralSolver.H | 8 +- .../SpectralSolver/SpectralSolver.cpp | 6 +- .../SpectralSolver/SpectralSolverRZ.H | 4 +- .../SpectralSolver/SpectralSolverRZ.cpp | 4 +- Source/Parallelization/GuardCellManager.H | 3 +- Source/Parallelization/GuardCellManager.cpp | 2 +- Source/Particles/Gather/FieldGather.H | 8 +- Source/Particles/ParticleIO.H | 2 +- .../Particles/PhysicalParticleContainer.cpp | 2 +- Source/Particles/Pusher/PushSelector.H | 2 +- Source/Utils/CMakeLists.txt | 1 - Source/Utils/Make.package | 1 - Source/Utils/WarpXAlgorithmSelection.H | 263 +++++++---------- Source/Utils/WarpXAlgorithmSelection.cpp | 271 ------------------ Source/Utils/WarpXUtil.cpp | 45 ++- Source/WarpX.H | 50 ++-- Source/WarpX.cpp | 57 ++-- Source/ablastr/utils/Enums.H | 12 +- cmake/dependencies/AMReX.cmake | 2 +- 39 files changed, 279 insertions(+), 599 deletions(-) delete mode 100644 Source/Utils/WarpXAlgorithmSelection.cpp diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 11765013bb7..8b1c99d917e 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 216ce6f37de4b65be57fc1006b3457b4fc318e03 && cd - + cd ../amrex && git checkout --detach 4460afbbce250ac6b463ea2bee0d9930c5059d2f && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index ba9e2c3ab5d..203c109f026 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -143,7 +143,8 @@ public: amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, ablastr::utils::enums::GridType grid_type, int do_moving_window, int pml_has_particles, int do_pml_in_domain, - int psatd_solution_type, int J_in_time, int rho_in_time, + PSATDSolutionType psatd_solution_type, + JInTime J_in_time, RhoInTime rho_in_time, bool do_pml_dive_cleaning, bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index f413831c74d..a66dcb5c0bb 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -552,7 +552,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, Real dt, int nox_fft, int noy_fft, int noz_fft, ablastr::utils::enums::GridType grid_type, int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain, - const int psatd_solution_type, const int J_in_time, const int rho_in_time, + const PSATDSolutionType psatd_solution_type, + const JInTime J_in_time, const RhoInTime rho_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index 2b063b99a15..6d2525bc724 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -25,8 +25,8 @@ namespace /** Returns true if any field boundary is set to FieldBoundaryType FT, else returns false.*/ template [[nodiscard]] - bool isAnyBoundary (const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi) + bool isAnyBoundary (const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi) { const auto isFT = [](const auto& b){ return b == FT;}; @@ -37,8 +37,8 @@ namespace /** Returns true if any particle boundary is set to ParticleBoundaryType PT, else returns false.*/ template [[nodiscard]] - bool isAnyBoundary (const amrex::Vector& particle_boundary_lo, - const amrex::Vector& particle_boundary_hi) + bool isAnyBoundary (const amrex::Array& particle_boundary_lo, + const amrex::Array& particle_boundary_hi) { const auto isPT = [](const auto& b){ return b == PT;}; diff --git a/Source/BoundaryConditions/WarpX_PEC.H b/Source/BoundaryConditions/WarpX_PEC.H index a6af894beb4..c387d8c1793 100644 --- a/Source/BoundaryConditions/WarpX_PEC.H +++ b/Source/BoundaryConditions/WarpX_PEC.H @@ -31,8 +31,8 @@ namespace PEC { */ void ApplyPECtoEfield ( std::array Efield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios, bool split_pml_field = false); @@ -52,8 +52,8 @@ namespace PEC { */ void ApplyPECtoBfield ( std::array Bfield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios); @@ -73,10 +73,10 @@ namespace PEC { */ void ApplyReflectiveBoundarytoRhofield( amrex::MultiFab* rho, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, - const amrex::Vector& particle_boundary_lo, - const amrex::Vector& particle_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, + const amrex::Array& particle_boundary_lo, + const amrex::Array& particle_boundary_hi, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios); @@ -97,10 +97,10 @@ namespace PEC { void ApplyReflectiveBoundarytoJfield( amrex::MultiFab* Jx, amrex::MultiFab* Jy, amrex::MultiFab* Jz, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, - const amrex::Vector& particle_boundary_lo, - const amrex::Vector& particle_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, + const amrex::Array& particle_boundary_lo, + const amrex::Array& particle_boundary_hi, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios); @@ -117,8 +117,8 @@ namespace PEC { */ void ApplyPECtoElectronPressure ( amrex::MultiFab* Pefield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios); } diff --git a/Source/BoundaryConditions/WarpX_PEC.cpp b/Source/BoundaryConditions/WarpX_PEC.cpp index 3ad0ab4663e..bedc5b264b7 100644 --- a/Source/BoundaryConditions/WarpX_PEC.cpp +++ b/Source/BoundaryConditions/WarpX_PEC.cpp @@ -457,8 +457,8 @@ namespace void PEC::ApplyPECtoEfield ( std::array Efield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios, const bool split_pml_field) @@ -540,8 +540,8 @@ PEC::ApplyPECtoEfield ( void PEC::ApplyPECtoBfield ( std::array Bfield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios) { @@ -627,10 +627,10 @@ PEC::ApplyPECtoBfield ( void PEC::ApplyReflectiveBoundarytoRhofield ( amrex::MultiFab* rho, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, - const amrex::Vector& particle_boundary_lo, - const amrex::Vector& particle_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, + const amrex::Array& particle_boundary_lo, + const amrex::Array& particle_boundary_hi, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios) { @@ -713,10 +713,10 @@ PEC::ApplyReflectiveBoundarytoRhofield ( void PEC::ApplyReflectiveBoundarytoJfield( amrex::MultiFab* Jx, amrex::MultiFab* Jy, amrex::MultiFab* Jz, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, - const amrex::Vector& particle_boundary_lo, - const amrex::Vector& particle_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, + const amrex::Array& particle_boundary_lo, + const amrex::Array& particle_boundary_hi, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios) { @@ -902,8 +902,8 @@ PEC::ApplyReflectiveBoundarytoJfield( void PEC::ApplyPECtoElectronPressure ( amrex::MultiFab* Pefield, - const amrex::Vector& field_boundary_lo, - const amrex::Vector& field_boundary_hi, + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios) { diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index bfb1867e741..bf67b51bbeb 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -241,7 +241,7 @@ MultiParticleContainer::WriteHeader (std::ostream& os) const void storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, - int electrostatic_solver_id, bool is_full_diagnostic ) { + ElectrostaticSolverAlgo electrostatic_solver_id, bool is_full_diagnostic ) { using PinnedParIter = typename PinnedMemoryParticleContainer::ParIterType; diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.H b/Source/Diagnostics/ReducedDiags/FieldReduction.H index f467499ad56..9574caa3d5d 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.H +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.H @@ -73,7 +73,7 @@ private: std::unique_ptr m_parser; // Type of reduction (e.g. Maximum, Minimum or Sum) - int m_reduction_type; + ReductionType m_reduction_type; public: diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.cpp b/Source/Diagnostics/ReducedDiags/FieldReduction.cpp index 683ae1921d6..9d0521eb181 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.cpp @@ -60,9 +60,7 @@ FieldReduction::FieldReduction (const std::string& rd_name) parser_string = std::regex_replace(parser_string, std::regex("\n\\s*"), " "); // read reduction type - std::string reduction_type_string; - pp_rd_name.get("reduction_type", reduction_type_string); - m_reduction_type = GetAlgorithmInteger (pp_rd_name, "reduction_type"); + pp_rd_name.get_enum_sloppy("reduction_type", m_reduction_type, "-_"); if (amrex::ParallelDescriptor::IOProcessor()) { @@ -77,7 +75,7 @@ FieldReduction::FieldReduction (const std::string& rd_name) ofs << m_sep; ofs << "[" << c++ << "]time(s)"; ofs << m_sep; - ofs << "[" << c++ << "]" + reduction_type_string + " of " + parser_string + " (SI units)"; + ofs << "[" << c++ << "]" + amrex::getEnumNameString(m_reduction_type) + " of " + parser_string + " (SI units)"; ofs << std::endl; // close file diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index b341844304a..22d1c663a53 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -1059,7 +1059,7 @@ void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs (const amrex::Geo else { WARPX_ABORT_WITH_MESSAGE( "Field boundary conditions have to be either periodic, PEC or neumann " - "when using the electrostatic Multigrid solver, but they are " + GetFieldBCTypeString(WarpX::field_boundary_lo[idim]) + "when using the electrostatic Multigrid solver, but they are " + amrex::getEnumNameString(WarpX::field_boundary_lo[idim]) ); } @@ -1074,7 +1074,7 @@ void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs (const amrex::Geo else { WARPX_ABORT_WITH_MESSAGE( "Field boundary conditions have to be either periodic, PEC or neumann " - "when using the electrostatic Multigrid solver, but they are " + GetFieldBCTypeString(WarpX::field_boundary_hi[idim]) + "when using the electrostatic Multigrid solver, but they are " + amrex::getEnumNameString(WarpX::field_boundary_hi[idim]) ); } } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp index 5e75698903e..e6f010e6f44 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp @@ -39,8 +39,8 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( std::array< std::unique_ptr, 3 >& Bfield, amrex::Box domain_box, amrex::Real const dt, - amrex::Vector field_boundary_lo, - amrex::Vector field_boundary_hi) { + amrex::Array field_boundary_lo, + amrex::Array field_boundary_hi) { // Ensure that we are using the Yee solver WARPX_ALWAYS_ASSERT_WITH_MESSAGE( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 6a33c89c184..0a9f21e6863 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -47,7 +47,7 @@ class FiniteDifferenceSolver * \param grid_type Whether the solver is applied to a collocated or staggered grid */ FiniteDifferenceSolver ( - int fdtd_algo, + ElectromagneticSolverAlgo fdtd_algo, std::array cell_size, ablastr::utils::enums::GridType grid_type ); @@ -92,8 +92,8 @@ class FiniteDifferenceSolver std::array< std::unique_ptr, 3 >& Bfield, amrex::Box domain_box, amrex::Real dt, - amrex::Vector field_boundary_lo, - amrex::Vector field_boundary_hi); + amrex::Array field_boundary_lo, + amrex::Array field_boundary_hi); void ComputeDivE ( const std::array,3>& Efield, amrex::MultiFab& divE ); @@ -179,7 +179,7 @@ class FiniteDifferenceSolver private: - int m_fdtd_algo; + ElectromagneticSolverAlgo m_fdtd_algo; ablastr::utils::enums::GridType m_grid_type; #ifdef WARPX_DIM_RZ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp index 9af610031c0..fdd02c5249b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp @@ -28,7 +28,7 @@ /* This function initializes the stencil coefficients for the chosen finite-difference algorithm */ FiniteDifferenceSolver::FiniteDifferenceSolver ( - int const fdtd_algo, + ElectromagneticSolverAlgo const fdtd_algo, std::array cell_size, ablastr::utils::enums::GridType grid_type): // Register the type of finite-difference algorithm diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 305ccf02eb3..4ae988b9d10 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -322,7 +322,7 @@ void MagnetostaticSolver::VectorPoissonBoundaryHandler::defineVectorPotentialBCs else { WARPX_ABORT_WITH_MESSAGE( "Field boundary conditions have to be either periodic, PEC or neumann " - "when using the magnetostatic solver, but they are " + GetFieldBCTypeString(WarpX::field_boundary_lo[idim]) + "when using the magnetostatic solver, but they are " + amrex::getEnumNameString(WarpX::field_boundary_lo[idim]) ); } @@ -342,7 +342,7 @@ void MagnetostaticSolver::VectorPoissonBoundaryHandler::defineVectorPotentialBCs else { WARPX_ABORT_WITH_MESSAGE( "Field boundary conditions have to be either periodic, PEC or neumann " - "when using the magnetostatic solver, but they are " + GetFieldBCTypeString(WarpX::field_boundary_lo[idim]) + "when using the magnetostatic solver, but they are " + amrex::getEnumNameString(WarpX::field_boundary_lo[idim]) ); } } diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H index 23c38f33a85..94a16c13ec1 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H @@ -55,8 +55,8 @@ class PsatdAlgorithmFirstOrder : public SpectralBaseAlgorithm ablastr::utils::enums::GridType grid_type, amrex::Real dt, bool div_cleaning, - int J_in_time, - int rho_in_time); + JInTime J_in_time, + RhoInTime rho_in_time); /** * \brief Updates E, B, F, and G fields in spectral space, @@ -93,8 +93,8 @@ class PsatdAlgorithmFirstOrder : public SpectralBaseAlgorithm // Other member variables amrex::Real m_dt; bool m_div_cleaning; - int m_J_in_time; - int m_rho_in_time; + JInTime m_J_in_time; + RhoInTime m_rho_in_time; }; #endif // WARPX_USE_FFT #endif // WARPX_PSATD_ALGORITHM_FIRST_ORDER_H_ diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp index 14db3c9af48..c5f60e18d24 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp @@ -38,8 +38,8 @@ PsatdAlgorithmFirstOrder::PsatdAlgorithmFirstOrder ( ablastr::utils::enums::GridType grid_type, const amrex::Real dt, const bool div_cleaning, - const int J_in_time, - const int rho_in_time + const JInTime J_in_time, + const RhoInTime rho_in_time ) // Initializer list : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type), diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H index c2efbd79935..2cbb4d7402d 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H @@ -27,8 +27,8 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ amrex::Real dt_step, bool update_with_rho, bool time_averaging, - int J_in_time, - int rho_in_time, + JInTime J_in_time, + RhoInTime rho_in_time, bool dive_cleaning, bool divb_cleaning); // Redefine functions from base class @@ -65,7 +65,7 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ amrex::Real m_dt; bool m_update_with_rho; bool m_time_averaging; - int m_J_in_time; + JInTime m_J_in_time; bool m_dive_cleaning; bool m_divb_cleaning; SpectralRealCoefficients C_coef, S_ck_coef, X1_coef, X2_coef, X3_coef; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp index efff5c30a41..66d99e29715 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp @@ -25,8 +25,8 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace, amrex::Real const dt, bool const update_with_rho, const bool time_averaging, - const int J_in_time, - const int rho_in_time, + const JInTime J_in_time, + const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning): // Initialize members of base class and member variables diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index d6c4916bdac..42bf32c4724 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -11,6 +11,7 @@ #include "SpectralFieldData_fwd.H" #include "SpectralKSpace.H" +#include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpX_Complex.H" #include @@ -59,8 +60,8 @@ class SpectralFieldIndex */ SpectralFieldIndex (bool update_with_rho, bool time_averaging, - int J_in_time, - int rho_in_time, + JInTime J_in_time, + RhoInTime rho_in_time, bool dive_cleaning, bool divb_cleaning, bool pml, diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp index 8e7b9ed9ae4..a20429eea68 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp @@ -35,8 +35,8 @@ using namespace amrex; SpectralFieldIndex::SpectralFieldIndex (const bool update_with_rho, const bool time_averaging, - const int J_in_time, - const int rho_in_time, + const JInTime J_in_time, + const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning, const bool pml, diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 26581900c02..1aa1e540711 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -12,6 +12,8 @@ #include "SpectralAlgorithms/SpectralBaseAlgorithm.H" #include "SpectralFieldData.H" +#include "Utils/WarpXAlgorithmSelection.H" + #include #include @@ -86,9 +88,9 @@ class SpectralSolver bool periodic_single_box, bool update_with_rho, bool fft_do_time_averaging, - int psatd_solution_type, - int J_in_time, - int rho_in_time, + PSATDSolutionType psatd_solution_type, + JInTime J_in_time, + RhoInTime rho_in_time, bool dive_cleaning, bool divb_cleaning); diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index c6192187064..fcd52597e07 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -37,9 +37,9 @@ SpectralSolver::SpectralSolver ( const bool pml, const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, - const int psatd_solution_type, - const int J_in_time, - const int rho_in_time, + const PSATDSolutionType psatd_solution_type, + const JInTime J_in_time, + const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning) { diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H index 2e94fe95da2..004255e4d72 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H @@ -41,8 +41,8 @@ class SpectralSolverRZ bool with_pml, bool update_with_rho, bool fft_do_time_averaging, - int J_in_time, - int rho_in_time, + JInTime J_in_time, + RhoInTime rho_in_time, bool dive_cleaning, bool divb_cleaning); diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp index 3529247ef56..7eb3f2c3ae6 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp @@ -36,8 +36,8 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev, bool const with_pml, bool const update_with_rho, const bool fft_do_time_averaging, - const int J_in_time, - const int rho_in_time, + const JInTime J_in_time, + const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning) : k_space(realspace_ba, dm, dx) diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H index c70bd6d3a35..c9de3d8b250 100644 --- a/Source/Parallelization/GuardCellManager.H +++ b/Source/Parallelization/GuardCellManager.H @@ -7,6 +7,7 @@ #ifndef WARPX_GUARDCELLMANAGER_H_ #define WARPX_GUARDCELLMANAGER_H_ +#include #include #include @@ -63,7 +64,7 @@ public: int nox, int nox_fft, int noy_fft, int noz_fft, int nci_corr_stencil, - int electromagnetic_solver_id, + ElectromagneticSolverAlgo electromagnetic_solver_id, int max_level, const amrex::Vector& v_galilean, const amrex::Vector& v_comoving, diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp index c8c0ff40acf..a36e39d9497 100644 --- a/Source/Parallelization/GuardCellManager.cpp +++ b/Source/Parallelization/GuardCellManager.cpp @@ -42,7 +42,7 @@ guardCellManager::Init ( const int nox, const int nox_fft, const int noy_fft, const int noz_fft, const int nci_corr_stencil, - const int electromagnetic_solver_id, + const ElectromagneticSolverAlgo electromagnetic_solver_id, const int max_level, const amrex::Vector& v_galilean, const amrex::Vector& v_comoving, diff --git a/Source/Particles/Gather/FieldGather.H b/Source/Particles/Gather/FieldGather.H index 4b4590b8642..95a010031ec 100644 --- a/Source/Particles/Gather/FieldGather.H +++ b/Source/Particles/Gather/FieldGather.H @@ -1714,9 +1714,9 @@ void doGatherShapeNImplicit ( const amrex::Dim3& lo, const int n_rz_azimuthal_modes, const int nox, - const int depos_type ) + const CurrentDepositionAlgo depos_type ) { - if (depos_type==0) { // CurrentDepositionAlgo::Esirkepov + if (depos_type == CurrentDepositionAlgo::Esirkepov) { if (nox == 1) { doGatherShapeNEsirkepovStencilImplicit<1>(xp_n, yp_n, zp_n, xp_nph, yp_nph, zp_nph, Exp, Eyp, Ezp, Bxp, Byp, Bzp, @@ -1743,7 +1743,7 @@ void doGatherShapeNImplicit ( dinv, xyzmin, lo, n_rz_azimuthal_modes); } } - else if (depos_type==3) { // CurrentDepositionAlgo::Villasenor + else if (depos_type == CurrentDepositionAlgo::Villasenor) { if (nox == 1) { doGatherPicnicShapeN<1>(xp_n, yp_n, zp_n, xp_nph, yp_nph, zp_nph, Exp, Eyp, Ezp, Bxp, Byp, Bzp, @@ -1770,7 +1770,7 @@ void doGatherShapeNImplicit ( dinv, xyzmin, lo, n_rz_azimuthal_modes); } } - else if (depos_type==1) { // CurrentDepositionAlgo::Direct + else if (depos_type == CurrentDepositionAlgo::Direct) { if (nox == 1) { doGatherShapeN<1,0>(xp_nph, yp_nph, zp_nph, Exp, Eyp, Ezp, Bxp, Byp, Bzp, ex_arr, ey_arr, ez_arr, bx_arr, by_arr, bz_arr, diff --git a/Source/Particles/ParticleIO.H b/Source/Particles/ParticleIO.H index d5fc68f4097..8d3516e6890 100644 --- a/Source/Particles/ParticleIO.H +++ b/Source/Particles/ParticleIO.H @@ -90,6 +90,6 @@ particlesConvertUnits (ConvertDirection convert_direction, T_ParticleContainer* */ void storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, - int electrostatic_solver_id, bool is_full_diagnostic ); + ElectrostaticSolverAlgo electrostatic_solver_id, bool is_full_diagnostic ); #endif /* WARPX_PARTICLEIO_H_ */ diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index ec483c21bbc..94a65303cc5 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -3002,7 +3002,7 @@ PhysicalParticleContainer::ImplicitPushXP (WarpXParIter& pti, const Dim3 lo = lbound(box); - const int depos_type = WarpX::current_deposition_algo; + const auto depos_type = WarpX::current_deposition_algo; const int nox = WarpX::nox; const int n_rz_azimuthal_modes = WarpX::n_rz_azimuthal_modes; diff --git a/Source/Particles/Pusher/PushSelector.H b/Source/Particles/Pusher/PushSelector.H index 4a82e582bfb..d256a1a5e40 100644 --- a/Source/Particles/Pusher/PushSelector.H +++ b/Source/Particles/Pusher/PushSelector.H @@ -49,7 +49,7 @@ void doParticleMomentumPush(amrex::ParticleReal& ux, const int ion_lev, const amrex::ParticleReal m, const amrex::ParticleReal a_q, - const int pusher_algo, + const ParticlePusherAlgo pusher_algo, const int do_crr, #ifdef WARPX_QED const amrex::Real t_chi_max, diff --git a/Source/Utils/CMakeLists.txt b/Source/Utils/CMakeLists.txt index 3d804fe9cde..82053bfc88a 100644 --- a/Source/Utils/CMakeLists.txt +++ b/Source/Utils/CMakeLists.txt @@ -6,7 +6,6 @@ foreach(D IN LISTS WarpX_DIMS) ParticleUtils.cpp SpeciesUtils.cpp RelativeCellPosition.cpp - WarpXAlgorithmSelection.cpp WarpXMovingWindow.cpp WarpXTagging.cpp WarpXUtil.cpp diff --git a/Source/Utils/Make.package b/Source/Utils/Make.package index dd7e61ff4fa..dc1f1da5c4c 100644 --- a/Source/Utils/Make.package +++ b/Source/Utils/Make.package @@ -2,7 +2,6 @@ CEXE_sources += WarpXMovingWindow.cpp CEXE_sources += WarpXTagging.cpp CEXE_sources += WarpXUtil.cpp CEXE_sources += WarpXVersion.cpp -CEXE_sources += WarpXAlgorithmSelection.cpp CEXE_sources += Interpolate.cpp CEXE_sources += IntervalsParser.cpp CEXE_sources += RelativeCellPosition.cpp diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index b9105557462..f67aeddadd0 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -9,6 +9,7 @@ #define WARPX_UTILS_WARPXALGORITHMSELECTION_H_ #include +#include #include #include @@ -20,23 +21,19 @@ using namespace ablastr::utils::enums; // NOLINT(google-global-names-in-headers * \brief struct to determine the computational medium, i.e., vacuum or material/macroscopic default is vacuum. */ -struct MediumForEM { - enum { - Vacuum = 0, - Macroscopic = 1 - }; -}; +AMREX_ENUM(MediumForEM, + Vacuum, + Macroscopic, + Default = Vacuum); /** * \brief struct to select the overall evolve scheme */ -struct EvolveScheme { - enum { - Explicit = 0, - ThetaImplicitEM = 1, - SemiImplicitEM = 2 - }; -}; +AMREX_ENUM(EvolveScheme, + Explicit, + ThetaImplicitEM, + SemiImplicitEM, + Default = Explicit); /** * \brief struct to select algorithm for macroscopic Maxwell solver @@ -44,158 +41,116 @@ struct EvolveScheme { Backward Euler (fully-implicit) represents sigma*E = sigma*E^(n+1) default is Backward Euler as it is more robust. */ -struct MacroscopicSolverAlgo { - enum { - BackwardEuler = 0, - LaxWendroff = 1 - }; -}; - -struct ElectromagneticSolverAlgo { - enum { - None = 0, - Yee = 1, - CKC = 2, - PSATD = 3, - ECT = 4, - HybridPIC = 5 - }; -}; - -struct ElectrostaticSolverAlgo { - enum { - None = 0, - Relativistic = 1, - LabFrameElectroMagnetostatic = 2, - LabFrame = 3 // Non relativistic - }; -}; - -struct PoissonSolverAlgo { - enum { - Multigrid = 1, - IntegratedGreenFunction = 2, - }; -}; - -struct ParticlePusherAlgo { - enum { - Boris = 0, - Vay = 1, - HigueraCary = 2 - }; -}; - -struct CurrentDepositionAlgo { - enum { - Esirkepov = 0, - Direct = 1, - Vay = 2, - Villasenor = 3 - }; -}; - -struct ChargeDepositionAlgo { - // Only the Standard algorithm is implemented - enum { - Standard = 0 - }; -}; - -struct GatheringAlgo { - enum { - EnergyConserving = 0, - MomentumConserving - }; -}; - -struct PSATDSolutionType { - enum { - FirstOrder = 0, - SecondOrder = 1 - }; -}; - -struct JInTime { - enum { - Constant = 0, - Linear = 1 - }; -}; - -struct RhoInTime { - enum { - Constant = 0, - Linear = 1 - }; -}; +AMREX_ENUM(MacroscopicSolverAlgo, + BackwardEuler, + LaxWendroff, + Default = BackwardEuler); + +AMREX_ENUM(ElectromagneticSolverAlgo, + None, + Yee, + CKC, + PSATD, + ECT, + HybridPIC, + hybrid = HybridPIC, + Default = Yee); + +AMREX_ENUM(ElectrostaticSolverAlgo, + None, + Relativistic, + LabFrameElectroMagnetostatic, + LabFrame, + Default = None); + +AMREX_ENUM(PoissonSolverAlgo, + Multigrid, + IntegratedGreenFunction, + fft = IntegratedGreenFunction, + Default = Multigrid); + +AMREX_ENUM(ParticlePusherAlgo, + Boris, + Vay, + HigueraCary, + higuera = HigueraCary, + Default = Boris); + +AMREX_ENUM(CurrentDepositionAlgo, + Esirkepov, + Direct, + Vay, + Villasenor, + Default = Esirkepov); + +AMREX_ENUM(ChargeDepositionAlgo, + Standard, + Default = Standard); + +AMREX_ENUM(GatheringAlgo, + EnergyConserving, + MomentumConserving, + Default = EnergyConserving); + +AMREX_ENUM(PSATDSolutionType, + FirstOrder, + SecondOrder, + Default = SecondOrder); + +AMREX_ENUM(JInTime, + Constant, + Linear, + Default = Constant); + +AMREX_ENUM(RhoInTime, + Constant, + Linear, + Default = Linear); /** Strategy to compute weights for use in load balance. */ -struct LoadBalanceCostsUpdateAlgo { - enum { - Timers = 0, //!< load balance according to in-code timer-based weights (i.e., with `costs`) - Heuristic = 1 /**< load balance according to weights computed from number of cells - and number of particles per box (i.e., with `costs_heuristic`) */ - }; -}; +AMREX_ENUM(LoadBalanceCostsUpdateAlgo, + Timers, //!< load balance according to in-code timer-based weights (i.e., with `costs`) + Heuristic, /**< load balance according to weights computed from number of cells + and number of particles per box (i.e., with `costs_heuristic`) */ + Default = Timers); /** Field boundary conditions at the domain boundary */ -enum struct FieldBoundaryType { - PML = 0, - Periodic = 1, - PEC = 2, //!< perfect electric conductor (PEC) with E_tangential=0 - PMC = 3, //!< perfect magnetic conductor (PMC) with B_tangential=0 - Damped = 4, // Fields in the guard cells are damped for PSATD - //in the moving window direction - Absorbing_SilverMueller = 5, // Silver-Mueller boundary condition - Neumann = 6, // For electrostatic, the normal E is set to zero - None = 7, // The fields values at the boundary are not updated. This is - // useful for RZ simulations, at r=0. - Open = 8 // Used in the Integrated Green Function Poisson solver - // Note that the solver implicitely assumes open BCs: - // no need to enforce them separately -}; +AMREX_ENUM(FieldBoundaryType, + PML, + Periodic, + PEC, //!< perfect electric conductor (PEC) with E_tangential=0 + PMC, //!< perfect magnetic conductor (PMC) with B_tangential=0 + Damped, // Fields in the guard cells are damped for PSATD + //in the moving window direction + Absorbing_SilverMueller, // Silver-Mueller boundary condition + absorbingsilvermueller = Absorbing_SilverMueller, + Neumann, // For electrostatic, the normal E is set to zero + None, // The fields values at the boundary are not updated. This is + // useful for RZ simulations, at r=0. + Open, // Used in the Integrated Green Function Poisson solver + // Note that the solver implicitely assumes open BCs: + // no need to enforce them separately + Default = PML); /** Particle boundary conditions at the domain boundary */ -enum struct ParticleBoundaryType { - Absorbing = 0, //!< particles crossing domain boundary are removed - Open = 1, //!< particles cross domain boundary leave with damped j - Reflecting = 2, //!< particles are reflected - Periodic = 3, //!< particles are introduced from the periodic boundary - Thermal = 4, - None = 5 //!< For r=0 boundary with RZ simulations -}; +AMREX_ENUM(ParticleBoundaryType, + Absorbing, //!< particles crossing domain boundary are removed + Open, //!< particles cross domain boundary leave with damped j + Reflecting, //!< particles are reflected + Periodic, //!< particles are introduced from the periodic boundary + Thermal, + None, //!< For r=0 boundary with RZ simulations + Default = Absorbing); /** MPI reductions */ -struct ReductionType { - enum { - Maximum = 0, - Minimum = 1, - Sum = 2 - }; -}; - -int -GetAlgorithmInteger(const amrex::ParmParse& pp, const char* pp_search_key ); - -/** Select BC Type for fields, if field=true - * else select BCType for particles. - */ -FieldBoundaryType -GetFieldBCTypeInteger( std::string BCType ); - -/** Select BC Type for particles. - */ -ParticleBoundaryType -GetParticleBCTypeInteger( std::string BCType ); - -/** Find the name associated with a BC type - */ -std::string -GetFieldBCTypeString( FieldBoundaryType fb_type ); +AMREX_ENUM(ReductionType, + Maximum, + Minimum, + Sum, + Integral = Sum); #endif // WARPX_UTILS_WARPXALGORITHMSELECTION_H_ diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp deleted file mode 100644 index edcf5991c71..00000000000 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ /dev/null @@ -1,271 +0,0 @@ -/* Copyright 2019-2020 Axel Huebl, David Grote, Luca Fedeli - * Remi Lehe, Weiqun Zhang, Yinjian Zhao - * - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#include "WarpX.H" -#include "WarpXAlgorithmSelection.H" -#include "Utils/TextMsg.H" - -#include - -#include -#include - -#include -#include -#include -#include -#include - -// Define dictionary with correspondence between user-input strings, -// and corresponding integer for use inside the code - -const std::map evolve_scheme_to_int = { - {"explicit", EvolveScheme::Explicit }, - {"theta_implicit_em", EvolveScheme::ThetaImplicitEM }, - {"semi_implicit_em", EvolveScheme::SemiImplicitEM }, - {"default", EvolveScheme::Explicit } -}; - -const std::map grid_to_int = { - {"collocated", static_cast(ablastr::utils::enums::GridType::Collocated)}, - {"staggered", static_cast(ablastr::utils::enums::GridType::Staggered)}, - {"hybrid", static_cast(ablastr::utils::enums::GridType::Hybrid)}, - {"default", static_cast(ablastr::utils::enums::GridType::Staggered)} -}; - -const std::map electromagnetic_solver_algo_to_int = { - {"none", ElectromagneticSolverAlgo::None }, - {"yee", ElectromagneticSolverAlgo::Yee }, - {"ckc", ElectromagneticSolverAlgo::CKC }, - {"psatd", ElectromagneticSolverAlgo::PSATD }, - {"ect", ElectromagneticSolverAlgo::ECT }, - {"hybrid", ElectromagneticSolverAlgo::HybridPIC }, - {"default", ElectromagneticSolverAlgo::Yee } -}; - -const std::map electrostatic_solver_algo_to_int = { - {"none", ElectrostaticSolverAlgo::None }, - {"relativistic", ElectrostaticSolverAlgo::Relativistic}, - {"labframe-electromagnetostatic", ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic}, - {"labframe", ElectrostaticSolverAlgo::LabFrame}, - {"default", ElectrostaticSolverAlgo::None } -}; - -const std::map poisson_solver_algo_to_int = { - {"multigrid", PoissonSolverAlgo::Multigrid}, - {"fft", PoissonSolverAlgo::IntegratedGreenFunction}, - {"default", PoissonSolverAlgo::Multigrid } -}; - -const std::map particle_pusher_algo_to_int = { - {"boris", ParticlePusherAlgo::Boris }, - {"vay", ParticlePusherAlgo::Vay }, - {"higuera", ParticlePusherAlgo::HigueraCary }, - {"default", ParticlePusherAlgo::Boris } -}; - -const std::map current_deposition_algo_to_int = { - {"esirkepov", CurrentDepositionAlgo::Esirkepov }, - {"direct", CurrentDepositionAlgo::Direct }, - {"vay", CurrentDepositionAlgo::Vay }, - {"villasenor", CurrentDepositionAlgo::Villasenor }, - {"default", CurrentDepositionAlgo::Esirkepov } // NOTE: overwritten for PSATD and Hybrid-PIC below -}; - -const std::map charge_deposition_algo_to_int = { - {"standard", ChargeDepositionAlgo::Standard }, - {"default", ChargeDepositionAlgo::Standard } -}; - -const std::map gathering_algo_to_int = { - {"energy-conserving", GatheringAlgo::EnergyConserving }, - {"momentum-conserving", GatheringAlgo::MomentumConserving }, - {"default", GatheringAlgo::EnergyConserving } -}; - -const std::map psatd_solution_type_to_int = { - {"first-order", PSATDSolutionType::FirstOrder}, - {"second-order", PSATDSolutionType::SecondOrder}, - {"default", PSATDSolutionType::SecondOrder} -}; - -const std::map J_in_time_to_int = { - {"constant", JInTime::Constant}, - {"linear", JInTime::Linear}, - {"default", JInTime::Constant} -}; - -const std::map rho_in_time_to_int = { - {"constant", RhoInTime::Constant}, - {"linear", RhoInTime::Linear}, - {"default", RhoInTime::Linear} -}; - -const std::map load_balance_costs_update_algo_to_int = { - {"timers", LoadBalanceCostsUpdateAlgo::Timers }, - {"heuristic", LoadBalanceCostsUpdateAlgo::Heuristic }, - {"default", LoadBalanceCostsUpdateAlgo::Timers } -}; - -const std::map MaxwellSolver_medium_algo_to_int = { - {"vacuum", MediumForEM::Vacuum}, - {"macroscopic", MediumForEM::Macroscopic}, - {"default", MediumForEM::Vacuum} -}; - -const std::map MacroscopicSolver_algo_to_int = { - {"backwardeuler", MacroscopicSolverAlgo::BackwardEuler}, - {"laxwendroff", MacroscopicSolverAlgo::LaxWendroff}, - {"default", MacroscopicSolverAlgo::BackwardEuler} -}; - -const std::map FieldBCType_algo_to_enum = { - {"pml", FieldBoundaryType::PML}, - {"periodic", FieldBoundaryType::Periodic}, - {"pec", FieldBoundaryType::PEC}, - {"pmc", FieldBoundaryType::PMC}, - {"damped", FieldBoundaryType::Damped}, - {"absorbing_silver_mueller", FieldBoundaryType::Absorbing_SilverMueller}, - {"neumann", FieldBoundaryType::Neumann}, - {"open", FieldBoundaryType::Open}, - {"none", FieldBoundaryType::None}, - {"default", FieldBoundaryType::PML} -}; - -const std::map ParticleBCType_algo_to_enum = { - {"absorbing", ParticleBoundaryType::Absorbing}, - {"open", ParticleBoundaryType::Open}, - {"reflecting", ParticleBoundaryType::Reflecting}, - {"periodic", ParticleBoundaryType::Periodic}, - {"thermal", ParticleBoundaryType::Thermal}, - {"none", ParticleBoundaryType::None}, - {"default", ParticleBoundaryType::Absorbing} -}; - -const std::map ReductionType_algo_to_int = { - {"maximum", ReductionType::Maximum}, - {"minimum", ReductionType::Minimum}, - {"integral", ReductionType::Sum} -}; - -int -GetAlgorithmInteger (const amrex::ParmParse& pp, const char* pp_search_key ) -{ - // Read user input ; use "default" if it is not found - std::string algo = "default"; - pp.query( pp_search_key, algo ); - // Convert to lower case - std::transform(algo.begin(), algo.end(), algo.begin(), ::tolower); - - // Pick the right dictionary - std::map algo_to_int; - if (0 == std::strcmp(pp_search_key, "evolve_scheme")) { - algo_to_int = evolve_scheme_to_int; - } else if (0 == std::strcmp(pp_search_key, "maxwell_solver")) { - algo_to_int = electromagnetic_solver_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "grid_type")) { - algo_to_int = grid_to_int; - } else if (0 == std::strcmp(pp_search_key, "do_electrostatic")) { - algo_to_int = electrostatic_solver_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "poisson_solver")) { - algo_to_int = poisson_solver_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "particle_pusher")) { - algo_to_int = particle_pusher_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "current_deposition")) { - algo_to_int = current_deposition_algo_to_int; - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD || - WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC || - WarpX::electrostatic_solver_id != ElectrostaticSolverAlgo::None) { - algo_to_int["default"] = CurrentDepositionAlgo::Direct; - } - } else if (0 == std::strcmp(pp_search_key, "charge_deposition")) { - algo_to_int = charge_deposition_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "field_gathering")) { - algo_to_int = gathering_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "solution_type")) { - algo_to_int = psatd_solution_type_to_int; - } else if (0 == std::strcmp(pp_search_key, "J_in_time")) { - algo_to_int = J_in_time_to_int; - } else if (0 == std::strcmp(pp_search_key, "rho_in_time")) { - algo_to_int = rho_in_time_to_int; - } else if (0 == std::strcmp(pp_search_key, "load_balance_costs_update")) { - algo_to_int = load_balance_costs_update_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "em_solver_medium")) { - algo_to_int = MaxwellSolver_medium_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "macroscopic_sigma_method")) { - algo_to_int = MacroscopicSolver_algo_to_int; - } else if (0 == std::strcmp(pp_search_key, "reduction_type")) { - algo_to_int = ReductionType_algo_to_int; - } else { - std::string const pp_search_string = pp_search_key; - WARPX_ABORT_WITH_MESSAGE("Unknown algorithm type: " + pp_search_string); - } - - // Check if the user-input is a valid key for the dictionary - if (algo_to_int.count(algo) == 0) { - // Not a valid key ; print error message - const std::string pp_search_string = pp_search_key; - std::string error_message = "Invalid string for algo." + pp_search_string - + ": " + algo + ".\nThe valid values are:\n"; - for ( const auto &valid_pair : algo_to_int ) { - if (valid_pair.first != "default"){ - error_message += " - " + valid_pair.first + "\n"; - } - } - WARPX_ABORT_WITH_MESSAGE(error_message); - } - - // If the input is a valid key, return the value - return algo_to_int[algo]; -} - -FieldBoundaryType -GetFieldBCTypeInteger( std::string BCType ){ - std::transform(BCType.begin(), BCType.end(), BCType.begin(), ::tolower); - - if (FieldBCType_algo_to_enum.count(BCType) == 0) { - std::string error_message = "Invalid string for field/particle BC. : " + BCType + "\nThe valid values are : \n"; - for (const auto &valid_pair : FieldBCType_algo_to_enum) { - if (valid_pair.first != "default"){ - error_message += " - " + valid_pair.first + "\n"; - } - } - WARPX_ABORT_WITH_MESSAGE(error_message); - } - // return FieldBCType_algo_to_enum[BCType]; // This operator cannot be used for a const map - return FieldBCType_algo_to_enum.at(BCType); -} - -ParticleBoundaryType -GetParticleBCTypeInteger( std::string BCType ){ - std::transform(BCType.begin(), BCType.end(), BCType.begin(), ::tolower); - - if (ParticleBCType_algo_to_enum.count(BCType) == 0) { - std::string error_message = "Invalid string for particle BC. : " + BCType + "\nThe valid values are : \n"; - for (const auto &valid_pair : ParticleBCType_algo_to_enum) { - if (valid_pair.first != "default"){ - error_message += " - " + valid_pair.first + "\n"; - } - } - WARPX_ABORT_WITH_MESSAGE(error_message); - } - // return ParticleBCType_algo_to_enum[BCType]; // This operator cannot be used for a const map - return ParticleBCType_algo_to_enum.at(BCType); -} - -std::string -GetFieldBCTypeString( FieldBoundaryType fb_type ) { - std::string boundary_name; - for (const auto &valid_pair : FieldBCType_algo_to_enum) { - if ((valid_pair.second == fb_type)&&(valid_pair.first != "default")){ - boundary_name = valid_pair.first; - break; - } - } - return boundary_name; -} diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 2ef4ee55d6e..4556d64684f 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -76,7 +76,8 @@ void ParseGeometryInput() #ifdef WARPX_DIM_RZ const ParmParse pp_algo("algo"); - const int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; + pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(prob_lo[0] == 0., @@ -310,7 +311,8 @@ void CheckGriddingForRZSpectral () CheckDims(); const ParmParse pp_algo("algo"); - const int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; + pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); // only check for PSATD in RZ if (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { @@ -395,16 +397,14 @@ void CheckGriddingForRZSpectral () void ReadBCParams () { - amrex::Vector field_BC_lo(AMREX_SPACEDIM,"default"); - amrex::Vector field_BC_hi(AMREX_SPACEDIM,"default"); - amrex::Vector particle_BC_lo(AMREX_SPACEDIM,"default"); - amrex::Vector particle_BC_hi(AMREX_SPACEDIM,"default"); amrex::Vector geom_periodicity(AMREX_SPACEDIM,0); ParmParse pp_geometry("geometry"); const ParmParse pp_warpx("warpx"); const ParmParse pp_algo("algo"); - const int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); - const int poisson_solver_id = GetAlgorithmInteger(pp_warpx, "poisson_solver"); + auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; + pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); + auto poisson_solver_id = PoissonSolverAlgo::Default; + pp_warpx.query_enum_sloppy("poisson_solver", poisson_solver_id, "-_"); if (pp_geometry.queryarr("is_periodic", geom_periodicity)) { @@ -419,26 +419,21 @@ void ReadBCParams () // particle boundary may not be explicitly specified for some applications bool particle_boundary_specified = false; const ParmParse pp_boundary("boundary"); - pp_boundary.queryarr("field_lo", field_BC_lo, 0, AMREX_SPACEDIM); - pp_boundary.queryarr("field_hi", field_BC_hi, 0, AMREX_SPACEDIM); - if (pp_boundary.queryarr("particle_lo", particle_BC_lo, 0, AMREX_SPACEDIM)) { - particle_boundary_specified = true; - } - if (pp_boundary.queryarr("particle_hi", particle_BC_hi, 0, AMREX_SPACEDIM)) { - particle_boundary_specified = true; - } - AMREX_ALWAYS_ASSERT(field_BC_lo.size() == AMREX_SPACEDIM); - AMREX_ALWAYS_ASSERT(field_BC_hi.size() == AMREX_SPACEDIM); - AMREX_ALWAYS_ASSERT(particle_BC_lo.size() == AMREX_SPACEDIM); - AMREX_ALWAYS_ASSERT(particle_BC_hi.size() == AMREX_SPACEDIM); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { // Get field boundary type - WarpX::field_boundary_lo[idim] = GetFieldBCTypeInteger(field_BC_lo[idim]); - WarpX::field_boundary_hi[idim] = GetFieldBCTypeInteger(field_BC_hi[idim]); + pp_boundary.query_enum_sloppy("field_lo", + WarpX::field_boundary_lo[idim], "-_", idim); + pp_boundary.query_enum_sloppy("field_hi", + WarpX::field_boundary_hi[idim], "-_", idim); // Get particle boundary type - WarpX::particle_boundary_lo[idim] = GetParticleBCTypeInteger(particle_BC_lo[idim]); - WarpX::particle_boundary_hi[idim] = GetParticleBCTypeInteger(particle_BC_hi[idim]); + if (pp_boundary.query_enum_sloppy("particle_lo", + WarpX::particle_boundary_lo[idim], "-_", idim)) { + particle_boundary_specified = true; + } + if (pp_boundary.query_enum_sloppy("particle_hi", + WarpX::particle_boundary_hi[idim], "-_", idim)) { + particle_boundary_specified = true; + } if (WarpX::field_boundary_lo[idim] == FieldBoundaryType::Periodic || WarpX::field_boundary_hi[idim] == FieldBoundaryType::Periodic || diff --git a/Source/WarpX.H b/Source/WarpX.H index c27807b4982..a89ffe20573 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -170,17 +170,17 @@ public: // Algorithms //! Integer that corresponds to the current deposition algorithm (Esirkepov, direct, Vay, Villasenor) - static short current_deposition_algo; + static inline auto current_deposition_algo = CurrentDepositionAlgo::Default; //! Integer that corresponds to the charge deposition algorithm (only standard deposition) - static short charge_deposition_algo; + static inline auto charge_deposition_algo = ChargeDepositionAlgo::Default; //! Integer that corresponds to the field gathering algorithm (energy-conserving, momentum-conserving) - static short field_gathering_algo; + static inline auto field_gathering_algo = GatheringAlgo::Default; //! Integer that corresponds to the particle push algorithm (Boris, Vay, Higuera-Cary) - static short particle_pusher_algo; + static inline auto particle_pusher_algo = ParticlePusherAlgo::Default; //! Integer that corresponds to the type of Maxwell solver (Yee, CKC, PSATD, ECT) - static short electromagnetic_solver_id; + static inline auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; //! Integer that corresponds to the evolve scheme (explicit, semi_implicit_em, theta_implicit_em) - static short evolve_scheme; + static inline auto evolve_scheme = EvolveScheme::Default; //! Maximum iterations used for self-consistent particle update in implicit particle-suppressed evolve schemes static int max_particle_its_in_implicit_scheme; //! Relative tolerance used for self-consistent particle update in implicit particle-suppressed evolve schemes @@ -188,43 +188,55 @@ public: /** Records a number corresponding to the load balance cost update strategy * being used (0 or 1 corresponding to timers or heuristic). */ - static short load_balance_costs_update_algo; + static inline auto load_balance_costs_update_algo = LoadBalanceCostsUpdateAlgo::Default; //! Integer that corresponds to electromagnetic Maxwell solver (vacuum - 0, macroscopic - 1) - static int em_solver_medium; + static inline auto em_solver_medium = MediumForEM::Default; /** Integer that correspond to macroscopic Maxwell solver algorithm * (BackwardEuler - 0, Lax-Wendroff - 1) */ - static int macroscopic_solver_algo; + static inline auto macroscopic_solver_algo = MacroscopicSolverAlgo::Default; /** Integers that correspond to boundary condition applied to fields at the * lower domain boundaries * (0 to 6 correspond to PML, Periodic, PEC, PMC, Damped, Absorbing Silver-Mueller, None) */ - static amrex::Vector field_boundary_lo; + static inline amrex::Array + field_boundary_lo {AMREX_D_DECL(FieldBoundaryType::Default, + FieldBoundaryType::Default, + FieldBoundaryType::Default)}; /** Integers that correspond to boundary condition applied to fields at the * upper domain boundaries * (0 to 6 correspond to PML, Periodic, PEC, PMC, Damped, Absorbing Silver-Mueller, None) */ - static amrex::Vector field_boundary_hi; + static inline amrex::Array + field_boundary_hi {AMREX_D_DECL(FieldBoundaryType::Default, + FieldBoundaryType::Default, + FieldBoundaryType::Default)}; /** Integers that correspond to boundary condition applied to particles at the * lower domain boundaries * (0 to 4 correspond to Absorbing, Open, Reflecting, Periodic, Thermal) */ - static amrex::Vector particle_boundary_lo; + static inline amrex::Array + particle_boundary_lo {AMREX_D_DECL(ParticleBoundaryType::Default, + ParticleBoundaryType::Default, + ParticleBoundaryType::Default)}; /** Integers that correspond to boundary condition applied to particles at the * upper domain boundaries * (0 to 4 correspond to Absorbing, Open, Reflecting, Periodic, Thermal) */ - static amrex::Vector particle_boundary_hi; + static inline amrex::Array + particle_boundary_hi {AMREX_D_DECL(ParticleBoundaryType::Default, + ParticleBoundaryType::Default, + ParticleBoundaryType::Default)}; //! Integer that corresponds to the order of the PSATD solution //! (whether the PSATD equations are derived from first-order or //! second-order solution) - static short psatd_solution_type; + static inline auto psatd_solution_type = PSATDSolutionType::Default; //! Integers that correspond to the time dependency of J (constant, linear) //! and rho (linear, quadratic) for the PSATD algorithm - static short J_in_time; - static short rho_in_time; + static inline auto J_in_time = JInTime::Default; + static inline auto rho_in_time = RhoInTime::Default; //! If true, the current is deposited on a nodal grid and then centered onto a staggered grid //! using finite centering of order given by #current_centering_nox, #current_centering_noy, @@ -377,7 +389,7 @@ public: //! Integer that corresponds to the type of grid used in the simulation //! (collocated, staggered, hybrid) - static ablastr::utils::enums::GridType grid_type; + static inline auto grid_type = ablastr::utils::enums::GridType::Default; // Global rho nodal flag to know about rho index type when rho MultiFab is not allocated amrex::IntVect m_rho_nodal_flag; @@ -942,8 +954,8 @@ public: static const amrex::iMultiFab* CurrentBufferMasks (int lev); static const amrex::iMultiFab* GatherBufferMasks (int lev); - static int electrostatic_solver_id; - static int poisson_solver_id; + static inline auto electrostatic_solver_id = ElectrostaticSolverAlgo::Default; + static inline auto poisson_solver_id = PoissonSolverAlgo::Default; // Parameters for lab frame electrostatic static amrex::Real self_fields_required_precision; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index a705735a541..2e9befba992 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -110,23 +110,11 @@ bool WarpX::compute_max_step_from_btd = false; Real WarpX::zmax_plasma_to_compute_max_step = 0._rt; Real WarpX::zmin_domain_boost_step_0 = 0._rt; -short WarpX::current_deposition_algo; -short WarpX::charge_deposition_algo; -short WarpX::field_gathering_algo; -short WarpX::particle_pusher_algo; -short WarpX::electromagnetic_solver_id; -short WarpX::evolve_scheme; int WarpX::max_particle_its_in_implicit_scheme = 21; ParticleReal WarpX::particle_tol_in_implicit_scheme = 1.e-10; -short WarpX::psatd_solution_type; -short WarpX::J_in_time; -short WarpX::rho_in_time; -short WarpX::load_balance_costs_update_algo; bool WarpX::do_dive_cleaning = false; bool WarpX::do_divb_cleaning = false; bool WarpX::do_divb_cleaning_external = false; -int WarpX::em_solver_medium; -int WarpX::macroscopic_solver_algo; bool WarpX::do_single_precision_comms = false; bool WarpX::do_shared_mem_charge_deposition = false; @@ -141,11 +129,6 @@ amrex::IntVect WarpX::shared_tilesize(AMREX_D_DECL(1,1,1)); #endif int WarpX::shared_mem_current_tpb = 128; -amrex::Vector WarpX::field_boundary_lo(AMREX_SPACEDIM,FieldBoundaryType::PML); -amrex::Vector WarpX::field_boundary_hi(AMREX_SPACEDIM,FieldBoundaryType::PML); -amrex::Vector WarpX::particle_boundary_lo(AMREX_SPACEDIM,ParticleBoundaryType::Absorbing); -amrex::Vector WarpX::particle_boundary_hi(AMREX_SPACEDIM,ParticleBoundaryType::Absorbing); - int WarpX::n_rz_azimuthal_modes = 1; int WarpX::ncomps = 1; @@ -191,8 +174,6 @@ amrex::IntVect WarpX::sort_idx_type(AMREX_D_DECL(0,0,0)); bool WarpX::do_dynamic_scheduling = true; -int WarpX::electrostatic_solver_id; -int WarpX::poisson_solver_id; Real WarpX::self_fields_required_precision = 1.e-11_rt; Real WarpX::self_fields_absolute_tolerance = 0.0_rt; int WarpX::self_fields_max_iters = 200; @@ -211,7 +192,6 @@ IntVect WarpX::filter_npass_each_dir(1); int WarpX::n_field_gather_buffer = -1; int WarpX::n_current_deposition_buffer = -1; -ablastr::utils::enums::GridType WarpX::grid_type; amrex::IntVect m_rho_nodal_flag; WarpX* WarpX::m_instance = nullptr; @@ -540,8 +520,7 @@ WarpX::ReadParameters () { const ParmParse pp_algo("algo"); - electromagnetic_solver_id = static_cast(GetAlgorithmInteger(pp_algo, "maxwell_solver")); - + pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); if (electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT && !EB::enabled()) { throw std::runtime_error("ECP Solver requires to enable embedded boundaries at runtime."); } @@ -748,7 +727,7 @@ WarpX::ReadParameters () maxlevel_extEMfield_init = maxLevel(); pp_warpx.query("maxlevel_extEMfield_init", maxlevel_extEMfield_init); - electrostatic_solver_id = GetAlgorithmInteger(pp_warpx, "do_electrostatic"); + pp_warpx.query_enum_sloppy("do_electrostatic", electrostatic_solver_id, "-_"); // if an electrostatic solver is used, set the Maxwell solver to None if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { electromagnetic_solver_id = ElectromagneticSolverAlgo::None; @@ -768,7 +747,7 @@ WarpX::ReadParameters () pp_warpx.query("self_fields_verbosity", self_fields_verbosity); } - poisson_solver_id = GetAlgorithmInteger(pp_warpx, "poisson_solver"); + pp_warpx.query_enum_sloppy("poisson_solver", poisson_solver_id, "-_"); #ifndef WARPX_DIM_3D WARPX_ALWAYS_ASSERT_WITH_MESSAGE( poisson_solver_id!=PoissonSolverAlgo::IntegratedGreenFunction, @@ -1089,7 +1068,7 @@ WarpX::ReadParameters () // Integer that corresponds to the type of grid used in the simulation // (collocated, staggered, hybrid) - grid_type = static_cast(GetAlgorithmInteger(pp_warpx, "grid_type")); + pp_warpx.query_enum_sloppy("grid_type", grid_type, "-_"); // Use same shape factors in all directions, for gathering if (grid_type == GridType::Collocated) { galerkin_interpolation = false; } @@ -1232,10 +1211,15 @@ WarpX::ReadParameters () // note: current_deposition must be set after maxwell_solver (electromagnetic_solver_id) or // do_electrostatic (electrostatic_solver_id) are already determined, // because its default depends on the solver selection - current_deposition_algo = static_cast(GetAlgorithmInteger(pp_algo, "current_deposition")); - charge_deposition_algo = static_cast(GetAlgorithmInteger(pp_algo, "charge_deposition")); - particle_pusher_algo = static_cast(GetAlgorithmInteger(pp_algo, "particle_pusher")); - evolve_scheme = static_cast(GetAlgorithmInteger(pp_algo, "evolve_scheme")); + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD || + electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC || + electrostatic_solver_id != ElectrostaticSolverAlgo::None) { + current_deposition_algo = CurrentDepositionAlgo::Direct; + } + pp_algo.query_enum_sloppy("current_deposition", current_deposition_algo, "-_"); + pp_algo.query_enum_sloppy("charge_deposition", charge_deposition_algo, "-_"); + pp_algo.query_enum_sloppy("particle_pusher", particle_pusher_algo, "-_"); + pp_algo.query_enum_sloppy("evolve_scheme", evolve_scheme, "-_"); // check for implicit evolve scheme if (evolve_scheme == EvolveScheme::SemiImplicitEM) { @@ -1296,7 +1280,7 @@ WarpX::ReadParameters () // Query algo.field_gathering from input, set field_gathering_algo to // "default" if not found (default defined in Utils/WarpXAlgorithmSelection.cpp) - field_gathering_algo = static_cast(GetAlgorithmInteger(pp_algo, "field_gathering")); + pp_algo.query_enum_sloppy("field_gathering", field_gathering_algo, "-_"); // Set default field gathering algorithm for hybrid grids (momentum-conserving) std::string tmp_algo; @@ -1353,9 +1337,10 @@ WarpX::ReadParameters () " combined with mesh refinement is currently not implemented"); } - em_solver_medium = GetAlgorithmInteger(pp_algo, "em_solver_medium"); + pp_algo.query_enum_sloppy("em_solver_medium", em_solver_medium, "-_"); if (em_solver_medium == MediumForEM::Macroscopic ) { - macroscopic_solver_algo = GetAlgorithmInteger(pp_algo,"macroscopic_sigma_method"); + pp_algo.query_enum_sloppy("macroscopic_sigma_method", + macroscopic_solver_algo, "-_"); } if (evolve_scheme == EvolveScheme::SemiImplicitEM || @@ -1394,7 +1379,7 @@ WarpX::ReadParameters () } utils::parser::queryWithParser(pp_algo, "load_balance_efficiency_ratio_threshold", load_balance_efficiency_ratio_threshold); - load_balance_costs_update_algo = static_cast(GetAlgorithmInteger(pp_algo, "load_balance_costs_update")); + pp_algo.query_enum_sloppy("load_balance_costs_update", load_balance_costs_update_algo, "-_"); if (WarpX::load_balance_costs_update_algo==LoadBalanceCostsUpdateAlgo::Heuristic) { utils::parser::queryWithParser( pp_algo, "costs_heuristic_cells_wt", costs_heuristic_cells_wt); @@ -1565,12 +1550,12 @@ WarpX::ReadParameters () // Integer that corresponds to the order of the PSATD solution // (whether the PSATD equations are derived from first-order or // second-order solution) - psatd_solution_type = static_cast(GetAlgorithmInteger(pp_psatd, "solution_type")); + pp_psatd.query_enum_sloppy("solution_type", psatd_solution_type, "-_"); // Integers that correspond to the time dependency of J (constant, linear) // and rho (linear, quadratic) for the PSATD algorithm - J_in_time = static_cast(GetAlgorithmInteger(pp_psatd, "J_in_time")); - rho_in_time = static_cast(GetAlgorithmInteger(pp_psatd, "rho_in_time")); + pp_psatd.query_enum_sloppy("J_in_time", J_in_time, "-_"); + pp_psatd.query_enum_sloppy("rho_in_time", rho_in_time, "-_"); if (psatd_solution_type != PSATDSolutionType::FirstOrder || !do_multi_J) { diff --git a/Source/ablastr/utils/Enums.H b/Source/ablastr/utils/Enums.H index 1f89bede9e4..7c7129cae77 100644 --- a/Source/ablastr/utils/Enums.H +++ b/Source/ablastr/utils/Enums.H @@ -8,17 +8,19 @@ #ifndef ABLASTR_UTILS_ENUMS_H_ #define ABLASTR_UTILS_ENUMS_H_ +#include + namespace ablastr::utils::enums { /** Type of grids used in a simulation: * * Collocated at the same location (AMReX: all "NODAL"), staggered (Yee-style), or hybrid. */ - enum struct GridType { - Collocated = 0, - Staggered = 1, - Hybrid = 2 - }; + AMREX_ENUM(GridType, + Collocated, + Staggered, + Hybrid, + Default = Staggered); /** Mesh-refinement patch * diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index f65f5d36cce..d6b1707e527 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "216ce6f37de4b65be57fc1006b3457b4fc318e03" +set(WarpX_amrex_branch "4460afbbce250ac6b463ea2bee0d9930c5059d2f" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From ece2c052b2a33a46beb36da2133e23c98f762d94 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Fri, 13 Sep 2024 09:52:19 -0700 Subject: [PATCH 39/91] Reduce time in CI tests (#5232) * Removal of asserttion which prevented from usung the averaged PSATD algorithms with PML BC * Reduced resolution in CI tests: test_3d_beam_beam_collision and inputs_test_rz_multiJ_psatd. * Changed final time iteration. * Changed final time iteration. * Updated checksums. * Reverted changes unrelated to this PR --- .../inputs_test_3d_beam_beam_collision | 2 +- .../Tests/nci_psatd_stability/CMakeLists.txt | 2 +- .../inputs_test_rz_multiJ_psatd | 6 +- .../test_3d_beam_beam_collision.json | 140 +++++++++--------- .../benchmarks_json/test_rz_multiJ_psatd.json | 64 ++++---- 5 files changed, 107 insertions(+), 107 deletions(-) diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index 488e997f895..fcbd8a202e3 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -27,7 +27,7 @@ my_constants.Lz = 180.0*clight/omegab # for a full scale simulation use: nx, ny, nz = 512, 512, 1024 my_constants.nx = 64 my_constants.ny = 64 -my_constants.nz = 128 +my_constants.nz = 64 # TIME diff --git a/Examples/Tests/nci_psatd_stability/CMakeLists.txt b/Examples/Tests/nci_psatd_stability/CMakeLists.txt index 051f81b1784..f2b4ceae8ba 100644 --- a/Examples/Tests/nci_psatd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_psatd_stability/CMakeLists.txt @@ -200,7 +200,7 @@ if(WarpX_FFT) 2 # nprocs inputs_test_rz_multiJ_psatd # inputs analysis_default_regression.py # analysis - diags/diag1000050 # output + diags/diag1000025 # output OFF # dependency ) label_warpx_test(test_rz_multiJ_psatd slow) diff --git a/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd b/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd index 5e263856256..6350b9aee51 100644 --- a/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd +++ b/Examples/Tests/nci_psatd_stability/inputs_test_rz_multiJ_psatd @@ -1,8 +1,8 @@ # Iterations -max_step = 50 +max_step = 25 # Domain -amr.n_cell = 64 128 +amr.n_cell = 32 64 amr.max_level = 0 warpx.numprocs = 1 2 @@ -115,7 +115,7 @@ plasma_p.do_continuous_injection = 1 # Diagnostics diagnostics.diags_names = diag1 -diag1.intervals = 50 +diag1.intervals = 25 diag1.diag_type = Full diag1.fields_to_plot = Er Ez Bt jr jz rho rho_driver rho_plasma_e rho_plasma_p diag1.species = driver plasma_e plasma_p diff --git a/Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json b/Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json index 799692135ce..ad478e96e2f 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json +++ b/Regression/Checksum/benchmarks_json/test_3d_beam_beam_collision.json @@ -1,96 +1,96 @@ { "lev=0": { - "Bx": 958613893612.4355, - "By": 958606286084.2804, - "Bz": 50291310.73170665, - "Ex": 2.873993867215236e+20, - "Ey": 2.8740263458334176e+20, - "Ez": 1.3469564521662371e+17, - "rho_beam1": 7.96926761425663e+16, - "rho_beam2": 7.969234189119718e+16, - "rho_ele1": 325562788515568.7, - "rho_ele2": 301373974746269.7, - "rho_pos1": 314013278169396.75, - "rho_pos2": 311298336003435.56 + "Bx": 461189750202.8409, + "By": 461177227889.41614, + "Bz": 20074725.954957746, + "Ex": 1.3827102525768512e+20, + "Ey": 1.3827414851469086e+20, + "Ez": 3.5091001092648376e+16, + "rho_beam1": 3.829927705533972e+16, + "rho_beam2": 3.830015035966193e+16, + "rho_ele1": 162411820580232.94, + "rho_ele2": 150511095307150.62, + "rho_pos1": 159308102449286.12, + "rho_pos2": 156118194376152.7 }, "beam1": { - "particle_opticalDepthQSR": 104848.69019384333, - "particle_position_x": 0.0015001641452988207, - "particle_position_y": 0.0015001296473841738, - "particle_position_z": 0.004965480036291212, - "particle_momentum_x": 6.203657034942546e-15, - "particle_momentum_y": 6.161790111190829e-15, - "particle_momentum_z": 6.806194292286189e-12, - "particle_weight": 635868088.3867786 + "particle_opticalDepthQSR": 52563.18675345914, + "particle_position_x": 0.0007501135479816878, + "particle_position_y": 0.0007501095180907413, + "particle_position_z": 0.002483074586668512, + "particle_momentum_x": 3.0935661449588394e-15, + "particle_momentum_y": 3.070048977445414e-15, + "particle_momentum_z": 3.4017266957145416e-12, + "particle_weight": 636083515.3729652 }, "beam2": { - "particle_opticalDepthQSR": 104197.28107371755, - "particle_position_x": 0.0015001452558405398, - "particle_position_y": 0.0015001281739351966, - "particle_position_z": 0.0049656445643994716, - "particle_momentum_x": 6.202758467582172e-15, - "particle_momentum_y": 6.18910011814166e-15, - "particle_momentum_z": 6.7994521022372906e-12, - "particle_weight": 635874794.3085052 + "particle_opticalDepthQSR": 52275.42552501091, + "particle_position_x": 0.0007500428425956199, + "particle_position_y": 0.0007500867178448842, + "particle_position_z": 0.0024830812114940977, + "particle_momentum_x": 3.1124995623090863e-15, + "particle_momentum_y": 3.094827769550167e-15, + "particle_momentum_z": 3.4015150389915676e-12, + "particle_weight": 636114264.930704 }, "ele1": { - "particle_opticalDepthQSR": 398.7154177999122, - "particle_position_x": 5.2166787076833645e-06, - "particle_position_y": 5.005755590473258e-06, - "particle_position_z": 1.856829463647771e-05, - "particle_momentum_x": 6.0736878569270085e-18, - "particle_momentum_y": 5.735020185191748e-18, - "particle_momentum_z": 2.827581034346608e-15, - "particle_weight": 2602683.4209351614 + "particle_opticalDepthQSR": 156.022199846285, + "particle_position_x": 2.4401923319868757e-06, + "particle_position_y": 2.399150249907213e-06, + "particle_position_z": 8.791577071017722e-06, + "particle_momentum_x": 2.7299291171683895e-18, + "particle_momentum_y": 2.6551510668418435e-18, + "particle_momentum_z": 1.33445643731407e-15, + "particle_weight": 2656838.9769653436 }, "ele2": { - "particle_opticalDepthQSR": 328.6975869797729, - "particle_position_x": 4.984003903707884e-06, - "particle_position_y": 4.695016970410262e-06, - "particle_position_z": 1.606918799511055e-05, - "particle_momentum_x": 4.524294388810778e-18, - "particle_momentum_y": 4.193609622515901e-18, - "particle_momentum_z": 2.624217472737641e-15, - "particle_weight": 2432495.8168380223 + "particle_opticalDepthQSR": 163.79686010701988, + "particle_position_x": 2.724737203764692e-06, + "particle_position_y": 2.9829403746737846e-06, + "particle_position_z": 9.127382649103148e-06, + "particle_momentum_x": 2.1813342297510976e-18, + "particle_momentum_y": 2.7643067192718357e-18, + "particle_momentum_z": 1.259574872512064e-15, + "particle_weight": 2517356.358594387 }, "pho1": { - "particle_opticalDepthBW": 10028.214317531058, - "particle_position_x": 0.00014200324200040716, - "particle_position_y": 0.00014310262095706036, - "particle_position_z": 0.00047470309948487784, + "particle_opticalDepthBW": 5007.597539698783, + "particle_position_x": 7.214053121897416e-05, + "particle_position_y": 7.223804186317301e-05, + "particle_position_z": 0.00024115699590772453, "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, - "particle_weight": 61455533.15171491 + "particle_weight": 62860453.544321 }, "pho2": { - "particle_opticalDepthBW": 10261.48950301913, - "particle_position_x": 0.0001465092909391631, - "particle_position_y": 0.00014555115652303745, - "particle_position_z": 0.00048686081947093, + "particle_opticalDepthBW": 5113.753887045111, + "particle_position_x": 7.271625175781002e-05, + "particle_position_y": 7.311023374122331e-05, + "particle_position_z": 0.00024123464128276151, "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, - "particle_weight": 61924991.09906147 + "particle_weight": 59821371.52413007 }, "pos1": { - "particle_opticalDepthQSR": 380.4787933889546, - "particle_position_x": 5.59226140958729e-06, - "particle_position_y": 5.199149983019462e-06, - "particle_position_z": 1.7261766049926983e-05, - "particle_momentum_x": 5.182944941041321e-18, - "particle_momentum_y": 4.665394338329992e-18, - "particle_momentum_z": 2.565450485567441e-15, - "particle_weight": 2523696.1743423166 + "particle_opticalDepthQSR": 176.87865344045926, + "particle_position_x": 2.597218595285766e-06, + "particle_position_y": 2.5071002403671178e-06, + "particle_position_z": 8.190923176799435e-06, + "particle_momentum_x": 2.409544640420923e-18, + "particle_momentum_y": 2.5096320511498773e-18, + "particle_momentum_z": 1.3349884612525734e-15, + "particle_weight": 2604339.6419650833 }, "pos2": { - "particle_opticalDepthQSR": 378.7526306435402, - "particle_position_x": 4.812490588954386e-06, - "particle_position_y": 4.351750384371962e-06, - "particle_position_z": 1.7621416174292307e-05, - "particle_momentum_x": 4.979887438720444e-18, - "particle_momentum_y": 4.8215630209506066e-18, - "particle_momentum_z": 2.193964301475807e-15, - "particle_weight": 2513162.277112609 + "particle_opticalDepthQSR": 229.50925371797547, + "particle_position_x": 2.6205324097963396e-06, + "particle_position_y": 2.8134541282576216e-06, + "particle_position_z": 9.865542956073817e-06, + "particle_momentum_x": 2.536744632018102e-18, + "particle_momentum_y": 3.035517414633681e-18, + "particle_momentum_z": 1.3203905663185877e-15, + "particle_weight": 2570091.732557297 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json b/Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json index f30e773e7e0..d9ca66cf0c3 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json +++ b/Regression/Checksum/benchmarks_json/test_rz_multiJ_psatd.json @@ -1,40 +1,40 @@ { "lev=0": { - "Bt": 24080.416715463354, - "Er": 4536303784778.672, - "Ez": 4298815071343.07, - "jr": 361182004529074.8, - "jz": 1802215706551553.5, - "rho": 4884623.957368025, - "rho_driver": 6288266.101815153, - "rho_plasma_e": 49568366.40537152, - "rho_plasma_p": 50769182.21072973 + "Bt": 5436.145827903481, + "Er": 1084033329144.5951, + "Ez": 1031727477244.4946, + "jr": 86689049352046.02, + "jz": 384358875752423.8, + "rho": 863406.9511533659, + "rho_driver": 1142390.635353391, + "rho_plasma_e": 12207428.74629265, + "rho_plasma_p": 12452342.269855343 }, "driver": { - "particle_momentum_x": 8.723405122353729e-16, - "particle_momentum_y": 8.719285567351437e-16, - "particle_momentum_z": 5.461771334692466e-13, - "particle_position_x": 6.269566322411488, - "particle_position_y": 17.934200805964075, - "particle_theta": 1570790.0436095877, - "particle_weight": 6241484108.424456 - }, - "plasma_p": { - "particle_momentum_x": 6.650539058831456e-20, - "particle_momentum_y": 6.776260514923786e-20, - "particle_momentum_z": 5.470216831432835e-20, - "particle_position_x": 1.1365201443471713, - "particle_position_y": 0.6152066517828133, - "particle_theta": 20286.92798337582, - "particle_weight": 1002457942911.3788 + "particle_momentum_x": 8.723365602723476e-16, + "particle_momentum_y": 8.719184082046568e-16, + "particle_momentum_z": 5.461727890375063e-13, + "particle_position_x": 6.269474429349755, + "particle_position_y": 17.933429487411857, + "particle_theta": 1570777.477238974, + "particle_weight": 6241434176.35186 }, "plasma_e": { - "particle_momentum_x": 6.655027717314839e-20, - "particle_momentum_y": 6.730480164464723e-20, - "particle_momentum_z": 2.8073811669581434e-20, - "particle_position_x": 1.1423427658689635, - "particle_position_y": 0.6140113094028803, - "particle_theta": 20188.939948727297, - "particle_weight": 1002457942911.3788 + "particle_momentum_x": 1.369719083664514e-20, + "particle_momentum_y": 1.5823095211640957e-20, + "particle_momentum_z": 7.189697105606772e-21, + "particle_position_x": 0.28510775565436686, + "particle_position_y": 0.1491507116912657, + "particle_theta": 5011.181913404598, + "particle_weight": 1001422925327.429 + }, + "plasma_p": { + "particle_momentum_x": 1.4836311665634252e-20, + "particle_momentum_y": 1.3653689459385548e-20, + "particle_momentum_z": 1.2505361981099512e-20, + "particle_position_x": 0.2838368160801851, + "particle_position_y": 0.14950442866368444, + "particle_theta": 4995.577541103406, + "particle_weight": 1001422925327.4291 } } \ No newline at end of file From a33ae3f6cc7c4e37f9db43d0fdd9fc41c8523378 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 13 Sep 2024 12:11:56 -0700 Subject: [PATCH 40/91] AMReX/pyAMReX/PICSAR: Weekly Update (#5262) * Update updater scripts * AMReX: Weekly Update * pyAMReX: Weekly Update --- .github/workflows/cuda.yml | 2 +- Tools/Release/updateAMReX.py | 42 -------------------------------- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 4 files changed, 3 insertions(+), 45 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 8b1c99d917e..a1e7f5affda 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 4460afbbce250ac6b463ea2bee0d9930c5059d2f && cd - + cd ../amrex && git checkout --detach 028638564f7be0694b9898f8d4088cdbf9a6f9f5 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/Tools/Release/updateAMReX.py b/Tools/Release/updateAMReX.py index 7ba3bca8357..beeb12e85ff 100755 --- a/Tools/Release/updateAMReX.py +++ b/Tools/Release/updateAMReX.py @@ -15,15 +15,6 @@ import requests -try: - from configupdater import ConfigUpdater -except ImportError: - print("Warning: Cannot update .ini files without 'configupdater'") - print("Consider running 'python -m pip install configupdater'") - ConfigUpdater = None - sys.exit(1) - - # Maintainer Inputs ########################################################### print("""Hi there, this is a WarpX maintainer tool to update the source @@ -110,22 +101,6 @@ # Updates ##################################################################### -# run_test.sh (used also for Azure Pipelines) -run_test_path = str(REPO_DIR.joinpath("run_test.sh")) -with open(run_test_path, encoding="utf-8") as f: - run_test_content = f.read() - # branch/commit/tag (git fetcher) version - # cd amrex && git checkout COMMIT_TAG_OR_BRANCH && cd - - run_test_content = re.sub( - r"(.*cd\s+amrex.+git checkout\s+--detach\s+)(.+)(\s+&&\s.*)", - r"\g<1>{}\g<3>".format(amrex_new_branch), - run_test_content, - flags=re.MULTILINE, - ) - -with open(run_test_path, "w", encoding="utf-8") as f: - f.write(run_test_content) - # CI: legacy build check in .github/workflows/cuda.yml ci_gnumake_path = str(REPO_DIR.joinpath(".github/workflows/cuda.yml")) with open(ci_gnumake_path, encoding="utf-8") as f: @@ -142,23 +117,6 @@ with open(ci_gnumake_path, "w", encoding="utf-8") as f: f.write(ci_gnumake_content) -if ConfigUpdater is not None: - # WarpX-tests.ini - tests_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-tests.ini")) - cp = ConfigUpdater() - cp.optionxform = str - cp.read(tests_ini_path) - cp["AMReX"]["branch"].value = amrex_new_branch - cp.update_file() - - # WarpX-GPU-tests.ini - tests_gpu_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-GPU-tests.ini")) - cp = ConfigUpdater() - cp.optionxform = str - cp.read(tests_gpu_ini_path) - cp["AMReX"]["branch"].value = amrex_new_branch - cp.update_file() - # WarpX references to AMReX: cmake/dependencies/AMReX.cmake with open(amrex_cmake_path, encoding="utf-8") as f: amrex_cmake_content = f.read() diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index d6b1707e527..e3682b69ff5 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "4460afbbce250ac6b463ea2bee0d9930c5059d2f" +set(WarpX_amrex_branch "028638564f7be0694b9898f8d4088cdbf9a6f9f5" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 4c92ffa99ba..e93851443c0 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "da2d5a000330395b3fcbcb43a519b3c8a318c584" +set(WarpX_pyamrex_branch "41c856b8a588c3c8b04bb35d2d05b56f6ce0dd7f" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From edee5e995732b144d750a482938a1d9cd4a81cfe Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 13 Sep 2024 12:26:03 -0700 Subject: [PATCH 41/91] CI: remove slow test for 1D laser acceleration with fluid (#5261) * Remove slow test for 1D laser acceleration with fluid * Fully remove test --- .../laser_acceleration/CMakeLists.txt | 10 - .../laser_acceleration/analysis_1d_fluid.py | 193 ------------------ .../inputs_test_1d_laser_acceleration_fluid | 72 ------- .../test_1d_laser_acceleration_fluid.json | 15 -- 4 files changed, 290 deletions(-) delete mode 100755 Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py delete mode 100644 Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid delete mode 100644 Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json diff --git a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt index c26b06b380a..46e97a53d54 100644 --- a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt @@ -11,16 +11,6 @@ add_warpx_test( OFF # dependency ) -add_warpx_test( - test_1d_laser_acceleration_fluid # name - 1 # dims - 2 # nprocs - inputs_test_1d_laser_acceleration_fluid # inputs - analysis_1d_fluid.py # analysis - diags/diag1040000 # output - OFF # dependency -) - add_warpx_test( test_1d_laser_acceleration_fluid_boosted # name 1 # dims diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py deleted file mode 100755 index 593036bc3f6..00000000000 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2019-2023 Grant Johnson, Remi Lehe -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL -# -# This is a script that analyses the simulation results from -# the script `inputs_1d`. This simulates a 1D WFA with Pondermotive Envelope: -# REF: (Equations 20-23) https://journals.aps.org/rmp/pdf/10.1103/RevModPhys.81.1229 -import os -import sys - -import matplotlib - -matplotlib.use("Agg") -import matplotlib.pyplot as plt -import yt - -yt.funcs.mylog.setLevel(50) - -import numpy as np -from scipy.constants import c, e, epsilon_0, m_e - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - -# Parameters (these parameters must match the parameters in `inputs.multi.rt`) -n0 = 20.0e23 -# Plasma frequency -wp = np.sqrt((n0 * e**2) / (m_e * epsilon_0)) -kp = wp / c -tau = 15.0e-15 -a0 = 2.491668 -e = -e # Electrons -lambda_laser = 0.8e-6 - -zmin = -20e-6 -zmax = 100.0e-6 -Nz = 10240 - -# Compute the theory - -# Call the ode solver -from scipy.integrate import odeint - - -# ODE Function -def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): - phi1, phi2 = phi - a_sq = ( - a0**2 - * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) - * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 - ) - dphi1_dxi = phi2 - dphi2_dxi = kp**2 * ((1 + a_sq) / (2 * (1 + phi1) ** 2) - 0.5) - return [dphi1_dxi, dphi2_dxi] - - -# Call odeint to solve the ODE -xi_span = [-20e-6, 100e-6] -xi_0 = 0e-6 -phi0 = [0.0, 0.0] -dxi = (zmax - zmin) / Nz -xi = zmin + dxi * (0.5 + np.arange(Nz)) -phi = odeint(odefcn, phi0, xi, args=(kp, a0, c, tau, xi_0, lambda_laser)) - -# Change array direction to match the simulations -xi = -xi[::-1] -phi = phi[::-1] -xi_0 = -0e-6 -phi2 = phi[:, 0] -Ez = -phi[:, 1] - -# Compute the derived quantities -a_sq = ( - a0**2 - * np.exp(-2 * (xi - xi_0) ** 2 / (c**2 * tau**2)) - * np.sin(2 * np.pi * (xi - xi_0) / lambda_laser) ** 2 -) -gamma_perp_sq = 1 + a_sq -n = n0 * (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2) ** 2) -uz = (gamma_perp_sq - (1 + phi2) ** 2) / (2 * (1 + phi2)) -gamma = (gamma_perp_sq + (1 + phi2) ** 2) / (2 * (1 + phi2)) - -# Theory Components [convert to si] -uz *= c -J_th = np.multiply(np.divide(uz, gamma), n) -J_th *= e -rho_th = e * n -E_th = Ez -E_th *= (m_e * c * c) / e -V_th = np.divide(uz, gamma) -V_th /= c -# Remove the ions -rho_th = rho_th - e * n0 - -# Dictate which region to compare solutions over -# (Currently this is the full domain) -min_i = 0 -max_i = 10240 - -# Read the file -ds = yt.load(fn) -t0 = ds.current_time.to_value() -data = ds.covering_grid( - level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions -) -# Check the validity of the fields -error_rel = 0 -for field in ["Ez"]: - E_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] - # E_th = get_theoretical_field(field, t0) - max_error = ( - abs(E_sim[min_i:max_i] - E_th[min_i:max_i]).max() / abs(E_th[min_i:max_i]).max() - ) - print("%s: Max error: %.2e" % (field, max_error)) - error_rel = max(error_rel, max_error) - -# Check the validity of the currents -for field in ["Jz"]: - J_sim = data[("mesh", field)].to_ndarray()[:, 0, 0] - # J_th = get_theoretical_J_field(field, t0) - max_error = ( - abs(J_sim[min_i:max_i] - J_th[min_i:max_i]).max() / abs(J_th[min_i:max_i]).max() - ) - print("%s: Max error: %.2e" % (field, max_error)) - error_rel = max(error_rel, max_error) - -# Check the validity of the charge -for field in ["rho"]: - rho_sim = data[("boxlib", field)].to_ndarray()[:, 0, 0] - # rho_th = get_theoretical_rho_field(field, t0) - max_error = ( - abs(rho_sim[min_i:max_i] - rho_th[min_i:max_i]).max() - / abs(rho_th[min_i:max_i]).max() - ) - print("%s: Max error: %.2e" % (field, max_error)) - error_rel = max(error_rel, max_error) - -V_sim = np.divide(J_sim, rho_sim) -V_sim /= c - -# Create a figure with 2 rows and 2 columns -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) - -# Titles and labels -titles = ["Ez", "rho", "Jz", "Vz/c"] -xlabel = r"Xi" -ylabel = ["Ez", "rho", "Jz", "Vz/c"] - -# Plotting loop -for i in range(3): - ax = axes[i // 2, i % 2] # Get the current subplot - - # Plot theoretical data - ax.plot(xi, [E_th, rho_th, J_th, V_th][i], label="Theoretical") - - # Plot simulated data - ax.plot(xi, [E_sim, rho_sim, J_sim, V_sim][i], label="Simulated") - - # Set titles and labels - ax.set_title(f"{titles[i]} vs Xi") - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel[i]) - - # Add legend - ax.legend() - -# Adjust subplot layout -plt.tight_layout() - -# Save the figure -plt.savefig("wfa_fluid_nonlinear_1d_analysis.png") - -plt.show() - - -tolerance_rel = 0.20 - -print("error_rel : " + str(error_rel)) -print("tolerance_rel: " + str(tolerance_rel)) - -assert error_rel < tolerance_rel - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid deleted file mode 100644 index 73fa6b7283f..00000000000 --- a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration_fluid +++ /dev/null @@ -1,72 +0,0 @@ -################################# -####### GENERAL PARAMETERS ###### -################################# -max_step = 40000 -amr.n_cell = 10240 -amr.max_grid_size = 512 # maximum size of each AMReX box, used to decompose the domain -amr.blocking_factor = 512 # minimum size of each AMReX box, used to decompose the domain -geometry.dims = 1 -geometry.prob_lo = -120.e-6 # physical domain -geometry.prob_hi = 0.e-6 -amr.max_level = 0 # Maximum level in hierarchy (1 might be unstable, >1 is not supported) - -################################# -####### Boundary condition ###### -################################# -boundary.field_lo = pec -boundary.field_hi = pec - -################################# -############ NUMERICS ########### -################################# -warpx.verbose = 1 -warpx.do_dive_cleaning = 0 -warpx.use_filter = 0 -warpx.cfl = 0.45 #Fluid CFL < 0.5 -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1.0 # units of speed of light -warpx.do_dynamic_scheduling = 0 -warpx.serialize_initial_conditions = 1 - -################################# -############ PLASMA ############# -################################# -fluids.species_names = electrons ions - -electrons.species_type = electron -electrons.profile = parse_density_function -electrons.density_function(x,y,z) = "1.0e10 + 20.e23*((z*5.e4 + -0.5)*(z>10.e-6)*(z<30.e-6)) + 20.e23*((z>30.e-6))" -electrons.momentum_distribution_type = "at_rest" - -ions.charge = q_e -ions.mass = m_p -ions.profile = parse_density_function -ions.density_function(x,y,z) = "1.0e10 + 20.e23*((z*5.e4 + -0.5)*(z>10.e-6)*(z<30.e-6)) + 20.e23*((z>30.e-6))" -ions.momentum_distribution_type = "at_rest" - -# Order of particle shape factors -algo.particle_shape = 3 - -################################# -############ LASER ############## -################################# -lasers.names = laser1 -laser1.profile = Gaussian -laser1.position = 0. 0. -11.e-6 # This point is on the laser plane -laser1.direction = 0. 0. 1. # The plane normal direction -laser1.polarization = 0. 1. 0. # The main polarization vector -laser1.e_max = 10.e12 # Maximum amplitude of the laser field (in V/m) -laser1.profile_waist = 5.e-6 # The waist of the laser (in m) -laser1.profile_duration = 15.e-15 # The duration of the laser (in s) -laser1.profile_t_peak = 30.e-15 # Time at which the laser reaches its peak (in s) -laser1.profile_focal_distance = 100.e-6 # Focal distance from the antenna (in m) -laser1.wavelength = 0.8e-6 # The wavelength of the laser (in m) - -# Diagnostics -diagnostics.diags_names = diag1 - -# LAB -diag1.intervals = 20000 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diff --git a/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json b/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json deleted file mode 100644 index 2843e49ce22..00000000000 --- a/Regression/Checksum/benchmarks_json/test_1d_laser_acceleration_fluid.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "lev=0": { - "Bx": 14264658.38987597, - "By": 0.0, - "Bz": 0.0, - "Ex": 0.0, - "Ey": 4276056420659746.5, - "Ez": 762168740318568.1, - "jx": 0.0, - "jy": 7.47674123799233e+16, - "jz": 4.817762115932484e+17, - "rho": 1609691680.1267354 - } -} - From 878721f5d77bd37bba5a7b85c23612199033c831 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Fri, 13 Sep 2024 12:44:52 -0700 Subject: [PATCH 42/91] Refactor AddPlasma and AddPlasmaFlux (#5231) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- Source/Particles/AddPlasmaUtilities.H | 210 +++++++++ Source/Particles/AddPlasmaUtilities.cpp | 158 +++++++ Source/Particles/CMakeLists.txt | 1 + Source/Particles/Make.package | 1 + Source/Particles/PhysicalParticleContainer.H | 8 + .../Particles/PhysicalParticleContainer.cpp | 436 +++++------------- 6 files changed, 481 insertions(+), 333 deletions(-) create mode 100644 Source/Particles/AddPlasmaUtilities.H create mode 100644 Source/Particles/AddPlasmaUtilities.cpp diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H new file mode 100644 index 00000000000..8f0489e3921 --- /dev/null +++ b/Source/Particles/AddPlasmaUtilities.H @@ -0,0 +1,210 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + * Authors: Andrew Myers + */ +#ifndef WARPX_ADDPLASMAUTILITIES_H_ +#define WARPX_ADDPLASMAUTILITIES_H_ + +#include "Initialization/PlasmaInjector.H" + +#ifdef WARPX_QED +# include "Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.H" +# include "Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.H" +#endif + +#include +#include +#include +#include +#include +#include + +/* + Finds the overlap region between the given tile_realbox and part_realbox, returning true + if an overlap exists and false if otherwise. This also sets the parameters overlap_realbox, + overlap_box, and shifted to the appropriate values. + */ +bool find_overlap (const amrex::RealBox& tile_realbox, const amrex::RealBox& part_realbox, + const amrex::GpuArray& dx, + const amrex::GpuArray& prob_lo, + amrex::RealBox& overlap_realbox, amrex::Box& overlap_box, amrex::IntVect& shifted); + +/* + Finds the overlap region between the given tile_realbox, part_realbox and the surface used for + flux injection, returning true if an overlap exists and false if otherwise. This also sets the + parameters overlap_realbox, overlap_box, and shifted to the appropriate values. + */ +bool find_overlap_flux (const amrex::RealBox& tile_realbox, const amrex::RealBox& part_realbox, + const amrex::GpuArray& dx, + const amrex::GpuArray& prob_lo, + const PlasmaInjector& plasma_injector, + amrex::RealBox& overlap_realbox, amrex::Box& overlap_box, amrex::IntVect& shifted); + +/* + This computes the scale_fac (used for setting the particle weights) on a volumetric basis. + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_scale_fac_volume (const amrex::GpuArray& dx, + const amrex::Long pcount) { + using namespace amrex::literals; + return (pcount != 0) ? AMREX_D_TERM(dx[0],*dx[1],*dx[2])/pcount : 0.0_rt; +} + +/* + Given a refinement ratio, this computes the total increase in resolution for a plane + defined by the normal_axis. + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +int compute_area_weights (const amrex::IntVect& iv, const int normal_axis) { + int r = AMREX_D_TERM(iv[0],*iv[1],*iv[2]); +#if defined(WARPX_DIM_3D) + r /= iv[normal_axis]; +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + if (normal_axis == 0) { r /= iv[0]; } + else if (normal_axis == 2) { r /= iv[1]; } +#elif defined(WARPX_DIM_1D_Z) + if (normal_axis == 2) { r /= iv[0]; } +#endif + return r; +} + +/* + This computes the scale_fac (used for setting the particle weights) on a on area basis + (used for flux injection). + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_scale_fac_area (const amrex::GpuArray& dx, + const amrex::Real num_ppc_real, const int flux_normal_axis) { + using namespace amrex::literals; + amrex::Real scale_fac = AMREX_D_TERM(dx[0],*dx[1],*dx[2])/num_ppc_real; + // Scale particle weight by the area of the emitting surface, within one cell +#if defined(WARPX_DIM_3D) + scale_fac /= dx[flux_normal_axis]; +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + // When emission is in the r direction, the emitting surface is a cylinder. + // The factor 2*pi*r is added later below. + if (flux_normal_axis == 0) { scale_fac /= dx[0]; } + // When emission is in the z direction, the emitting surface is an annulus + // The factor 2*pi*r is added later below. + if (flux_normal_axis == 2) { scale_fac /= dx[1]; } + // When emission is in the theta direction (flux_normal_axis == 1), + // the emitting surface is a rectangle, within the plane of the simulation +#elif defined(WARPX_DIM_1D_Z) + if (flux_normal_axis == 2) { scale_fac /= dx[0]; } +#endif + return scale_fac; +} + +/* + These structs encapsulates several data structures needed for using the parser during plasma + injection. + */ +struct PlasmaParserWrapper +{ + PlasmaParserWrapper (std::size_t a_num_user_int_attribs, + std::size_t a_num_user_real_attribs, + const amrex::Vector< std::unique_ptr >& a_user_int_attrib_parser, + const amrex::Vector< std::unique_ptr >& a_user_real_attrib_parser); + + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > m_user_int_attrib_parserexec_pinned; + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > m_user_real_attrib_parserexec_pinned; +}; + +struct PlasmaParserHelper +{ + template + PlasmaParserHelper (SoAType& a_soa, std::size_t old_size, + const std::vector& a_user_int_attribs, + const std::vector& a_user_real_attribs, + std::map& a_particle_icomps, + std::map& a_particle_comps, + const PlasmaParserWrapper& wrapper) : + m_wrapper_ptr(&wrapper) { + m_pa_user_int_pinned.resize(a_user_int_attribs.size()); + m_pa_user_real_pinned.resize(a_user_real_attribs.size()); + +#ifdef AMREX_USE_GPU + m_d_pa_user_int.resize(a_user_int_attribs.size()); + m_d_pa_user_real.resize(a_user_real_attribs.size()); + m_d_user_int_attrib_parserexec.resize(a_user_int_attribs.size()); + m_d_user_real_attrib_parserexec.resize(a_user_real_attribs.size()); +#endif + + for (std::size_t ia = 0; ia < a_user_int_attribs.size(); ++ia) { + m_pa_user_int_pinned[ia] = a_soa.GetIntData(a_particle_icomps[a_user_int_attribs[ia]]).data() + old_size; + } + for (std::size_t ia = 0; ia < a_user_real_attribs.size(); ++ia) { + m_pa_user_real_pinned[ia] = a_soa.GetRealData(a_particle_comps[a_user_real_attribs[ia]]).data() + old_size; + } + +#ifdef AMREX_USE_GPU + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, m_pa_user_int_pinned.begin(), + m_pa_user_int_pinned.end(), m_d_pa_user_int.begin()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, m_pa_user_real_pinned.begin(), + m_pa_user_real_pinned.end(), m_d_pa_user_real.begin()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, wrapper.m_user_int_attrib_parserexec_pinned.begin(), + wrapper.m_user_int_attrib_parserexec_pinned.end(), m_d_user_int_attrib_parserexec.begin()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, wrapper.m_user_real_attrib_parserexec_pinned.begin(), + wrapper.m_user_real_attrib_parserexec_pinned.end(), m_d_user_real_attrib_parserexec.begin()); +#endif + } + + int** getUserIntDataPtrs (); + amrex::ParticleReal** getUserRealDataPtrs (); + [[nodiscard]] amrex::ParserExecutor<7> const* getUserIntParserExecData () const; + [[nodiscard]] amrex::ParserExecutor<7> const* getUserRealParserExecData () const; + + amrex::Gpu::PinnedVector m_pa_user_int_pinned; + amrex::Gpu::PinnedVector m_pa_user_real_pinned; + +#ifdef AMREX_USE_GPU + // To avoid using managed memory, we first define pinned memory vector, initialize on cpu, + // and them memcpy to device from host + amrex::Gpu::DeviceVector m_d_pa_user_int; + amrex::Gpu::DeviceVector m_d_pa_user_real; + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > m_d_user_int_attrib_parserexec; + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > m_d_user_real_attrib_parserexec; +#endif + const PlasmaParserWrapper* m_wrapper_ptr; +}; + +#ifdef WARPX_QED +struct QEDHelper +{ + template + QEDHelper (SoAType& a_soa, std::size_t old_size, + std::map& a_particle_comps, + bool a_has_quantum_sync, bool a_has_breit_wheeler, + const std::shared_ptr& a_shr_p_qs_engine, + const std::shared_ptr& a_shr_p_bw_engine) + : has_quantum_sync(a_has_quantum_sync), has_breit_wheeler(a_has_breit_wheeler) + { + if(has_quantum_sync){ + quantum_sync_get_opt = + a_shr_p_qs_engine->build_optical_depth_functor(); + p_optical_depth_QSR = a_soa.GetRealData( + a_particle_comps["opticalDepthQSR"]).data() + old_size; + } + if(has_breit_wheeler){ + breit_wheeler_get_opt = + a_shr_p_bw_engine->build_optical_depth_functor(); + p_optical_depth_BW = a_soa.GetRealData( + a_particle_comps["opticalDepthBW"]).data() + old_size; + } + } + + amrex::ParticleReal* p_optical_depth_QSR = nullptr; + amrex::ParticleReal* p_optical_depth_BW = nullptr; + + const bool has_quantum_sync; + const bool has_breit_wheeler; + + QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt; + BreitWheelerGetOpticalDepth breit_wheeler_get_opt; +}; +#endif + +#endif /*WARPX_ADDPLASMAUTILITIES_H_*/ diff --git a/Source/Particles/AddPlasmaUtilities.cpp b/Source/Particles/AddPlasmaUtilities.cpp new file mode 100644 index 00000000000..31066516477 --- /dev/null +++ b/Source/Particles/AddPlasmaUtilities.cpp @@ -0,0 +1,158 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + * Authors: Andrew Myers + */ +#include "AddPlasmaUtilities.H" + +#include + +bool find_overlap (const amrex::RealBox& tile_realbox, const amrex::RealBox& part_realbox, + const amrex::GpuArray& dx, + const amrex::GpuArray& prob_lo, + amrex::RealBox& overlap_realbox, amrex::Box& overlap_box, amrex::IntVect& shifted) +{ + using namespace amrex::literals; + + bool no_overlap = false; + for (int dir=0; dir= part_realbox.lo(dir) ) { + const amrex::Real ncells_adjust = std::floor( (part_realbox.hi(dir) - tile_realbox.hi(dir))/dx[dir] ); + overlap_realbox.setHi( dir, part_realbox.hi(dir) - std::max(ncells_adjust, 0._rt) * dx[dir]); + } else { + no_overlap = true; break; + } + // Count the number of cells in this direction in overlap_realbox + overlap_box.setSmall( dir, 0 ); + overlap_box.setBig( dir, + int( std::round((overlap_realbox.hi(dir)-overlap_realbox.lo(dir)) + /dx[dir] )) - 1); + shifted[dir] = + static_cast(std::round((overlap_realbox.lo(dir)-prob_lo[dir])/dx[dir])); + // shifted is exact in non-moving-window direction. That's all we care. + } + return no_overlap; +} + +bool find_overlap_flux (const amrex::RealBox& tile_realbox, const amrex::RealBox& part_realbox, + const amrex::GpuArray& dx, + const amrex::GpuArray& prob_lo, + const PlasmaInjector& plasma_injector, + amrex::RealBox& overlap_realbox, amrex::Box& overlap_box, amrex::IntVect& shifted) +{ + using namespace amrex::literals; + + bool no_overlap = false; + for (int dir=0; dir 0) { + if (plasma_injector.surface_flux_pos < tile_realbox.lo(dir) || + plasma_injector.surface_flux_pos >= tile_realbox.hi(dir)) { + no_overlap = true; + break; + } + } else { + if (plasma_injector.surface_flux_pos <= tile_realbox.lo(dir) || + plasma_injector.surface_flux_pos > tile_realbox.hi(dir)) { + no_overlap = true; + break; + } + } + overlap_realbox.setLo( dir, plasma_injector.surface_flux_pos ); + overlap_realbox.setHi( dir, plasma_injector.surface_flux_pos ); + overlap_box.setSmall( dir, 0 ); + overlap_box.setBig( dir, 0 ); + shifted[dir] = + static_cast(std::round((overlap_realbox.lo(dir)-prob_lo[dir])/dx[dir])); + } else { + if ( tile_realbox.lo(dir) <= part_realbox.hi(dir) ) { + const amrex::Real ncells_adjust = std::floor( (tile_realbox.lo(dir) - part_realbox.lo(dir))/dx[dir] ); + overlap_realbox.setLo( dir, part_realbox.lo(dir) + std::max(ncells_adjust, 0._rt) * dx[dir]); + } else { + no_overlap = true; break; + } + if ( tile_realbox.hi(dir) >= part_realbox.lo(dir) ) { + const amrex::Real ncells_adjust = std::floor( (part_realbox.hi(dir) - tile_realbox.hi(dir))/dx[dir] ); + overlap_realbox.setHi( dir, part_realbox.hi(dir) - std::max(ncells_adjust, 0._rt) * dx[dir]); + } else { + no_overlap = true; break; + } + // Count the number of cells in this direction in overlap_realbox + overlap_box.setSmall( dir, 0 ); + overlap_box.setBig( dir, + int( std::round((overlap_realbox.hi(dir)-overlap_realbox.lo(dir)) + /dx[dir] )) - 1); + shifted[dir] = + static_cast(std::round((overlap_realbox.lo(dir)-prob_lo[dir])/dx[dir])); + // shifted is exact in non-moving-window direction. That's all we care. + } + } + + return no_overlap; +} + +PlasmaParserWrapper::PlasmaParserWrapper (const std::size_t a_num_user_int_attribs, + const std::size_t a_num_user_real_attribs, + const amrex::Vector< std::unique_ptr >& a_user_int_attrib_parser, + const amrex::Vector< std::unique_ptr >& a_user_real_attrib_parser) + +{ + m_user_int_attrib_parserexec_pinned.resize(a_num_user_int_attribs); + m_user_real_attrib_parserexec_pinned.resize(a_num_user_real_attribs); + + for (std::size_t ia = 0; ia < a_num_user_int_attribs; ++ia) { + m_user_int_attrib_parserexec_pinned[ia] = a_user_int_attrib_parser[ia]->compile<7>(); + } + for (std::size_t ia = 0; ia < a_num_user_real_attribs; ++ia) { + m_user_real_attrib_parserexec_pinned[ia] = a_user_real_attrib_parser[ia]->compile<7>(); + } +} + +int** PlasmaParserHelper::getUserIntDataPtrs () { +#ifdef AMREX_USE_GPU + return m_d_pa_user_int.dataPtr(); +#else + return m_pa_user_int_pinned.dataPtr(); +#endif +} + +amrex::ParticleReal** PlasmaParserHelper::getUserRealDataPtrs () { +#ifdef AMREX_USE_GPU + return m_d_pa_user_real.dataPtr(); +#else + return m_pa_user_real_pinned.dataPtr(); +#endif +} + +amrex::ParserExecutor<7> const* PlasmaParserHelper::getUserIntParserExecData () const { +#ifdef AMREX_USE_GPU + return m_d_user_int_attrib_parserexec.dataPtr(); +#else + return m_wrapper_ptr->m_user_int_attrib_parserexec_pinned.dataPtr(); +#endif +} + +amrex::ParserExecutor<7> const* PlasmaParserHelper::getUserRealParserExecData () const { +#ifdef AMREX_USE_GPU + return m_d_user_real_attrib_parserexec.dataPtr(); +#else + return m_wrapper_ptr->m_user_real_attrib_parserexec_pinned.dataPtr(); +#endif +} diff --git a/Source/Particles/CMakeLists.txt b/Source/Particles/CMakeLists.txt index 67af14ef889..6b434c0a4e1 100644 --- a/Source/Particles/CMakeLists.txt +++ b/Source/Particles/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + AddPlasmaUtilities.cpp MultiParticleContainer.cpp ParticleBoundaries.cpp PhotonParticleContainer.cpp diff --git a/Source/Particles/Make.package b/Source/Particles/Make.package index 58cbe11a980..69918f69940 100644 --- a/Source/Particles/Make.package +++ b/Source/Particles/Make.package @@ -1,3 +1,4 @@ +CEXE_sources += AddPlasmaUtilities.cpp CEXE_sources += MultiParticleContainer.cpp CEXE_sources += WarpXParticleContainer.cpp CEXE_sources += RigidInjectedParticleContainer.cpp diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index 5d9b41b8b75..8102fc96a91 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -392,6 +392,14 @@ public: } protected: + + /* + Finds the box defining the region where refine injection should be used, if that + option is enabled. Currently this only works for numLevels() == 2 and static mesh + refinement. + */ + bool findRefinedInjectionBox (amrex::Box& fine_injection_box, amrex::IntVect& rrfac); + std::string species_name; std::vector> plasma_injectors; diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 94a65303cc5..d1a19f06993 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -15,6 +15,7 @@ #include "Initialization/InjectorMomentum.H" #include "Initialization/InjectorPosition.H" #include "MultiParticleContainer.H" +#include "Particles/AddPlasmaUtilities.H" #ifdef WARPX_QED # include "Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.H" # include "Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.H" @@ -217,8 +218,7 @@ namespace const GpuArray& pa, long& ip, const bool& do_field_ionization, int* pi #ifdef WARPX_QED - ,const bool& has_quantum_sync, amrex::ParticleReal* AMREX_RESTRICT p_optical_depth_QSR - ,const bool& has_breit_wheeler, amrex::ParticleReal* AMREX_RESTRICT p_optical_depth_BW + ,const QEDHelper& qed_helper #endif ) noexcept { @@ -227,8 +227,8 @@ namespace } if (do_field_ionization) {pi[ip] = 0;} #ifdef WARPX_QED - if (has_quantum_sync) {p_optical_depth_QSR[ip] = 0._rt;} - if (has_breit_wheeler) {p_optical_depth_BW[ip] = 0._rt;} + if (qed_helper.has_quantum_sync) {qed_helper.p_optical_depth_QSR[ip] = 0._rt;} + if (qed_helper.has_breit_wheeler) {qed_helper.p_optical_depth_BW[ip] = 0._rt;} #endif idcpu[ip] = amrex::ParticleIdCpus::Invalid; @@ -964,22 +964,9 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int amrex::LayoutData* cost = WarpX::getCosts(lev); - const int nlevs = numLevels(); - static bool refine_injection = false; - static Box fine_injection_box; - static amrex::IntVect rrfac(AMREX_D_DECL(1,1,1)); - // This does not work if the mesh is dynamic. But in that case, we should - // not use refined injected either. We also assume there is only one fine level. - if (WarpX::moving_window_active(WarpX::GetInstance().getistep(0)+1) and WarpX::refine_plasma - and do_continuous_injection and nlevs == 2) - { - refine_injection = true; - fine_injection_box = ParticleBoxArray(1).minimalBox(); - fine_injection_box.setSmall(WarpX::moving_window_dir, std::numeric_limits::lowest()/2); - fine_injection_box.setBig(WarpX::moving_window_dir, std::numeric_limits::max()/2); - rrfac = m_gdb->refRatio(0); - fine_injection_box.coarsen(rrfac); - } + Box fine_injection_box; + amrex::IntVect rrfac(AMREX_D_DECL(1,1,1)); + const bool refine_injection = findRefinedInjectionBox(fine_injection_box, rrfac); InjectorPosition* inj_pos = plasma_injector.getInjectorPosition(); InjectorDensity* inj_rho = plasma_injector.getInjectorDensity(); @@ -995,18 +982,12 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int const bool radially_weighted = plasma_injector.radially_weighted; #endif - - // User-defined integer and real attributes: prepare parsers - const auto n_user_int_attribs = static_cast(m_user_int_attribs.size()); - const auto n_user_real_attribs = static_cast(m_user_real_attribs.size()); - amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec_pinned(n_user_int_attribs); - amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec_pinned(n_user_real_attribs); - for (int ia = 0; ia < n_user_int_attribs; ++ia) { - user_int_attrib_parserexec_pinned[ia] = m_user_int_attrib_parser[ia]->compile<7>(); - } - for (int ia = 0; ia < n_user_real_attribs; ++ia) { - user_real_attrib_parserexec_pinned[ia] = m_user_real_attrib_parser[ia]->compile<7>(); - } + auto n_user_int_attribs = static_cast(m_user_int_attribs.size()); + auto n_user_real_attribs = static_cast(m_user_real_attribs.size()); + const PlasmaParserWrapper plasma_parser_wrapper (m_user_int_attribs.size(), + m_user_real_attribs.size(), + m_user_int_attrib_parser, + m_user_real_attrib_parser); MFItInfo info; if (do_tiling && Gpu::notInLaunchRegion()) { @@ -1032,30 +1013,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int RealBox overlap_realbox; Box overlap_box; IntVect shifted; - bool no_overlap = false; - - for (int dir=0; dir= part_realbox.lo(dir) ) { - const Real ncells_adjust = std::floor( (part_realbox.hi(dir) - tile_realbox.hi(dir))/dx[dir] ); - overlap_realbox.setHi( dir, part_realbox.hi(dir) - std::max(ncells_adjust, 0._rt) * dx[dir]); - } else { - no_overlap = true; break; - } - // Count the number of cells in this direction in overlap_realbox - overlap_box.setSmall( dir, 0 ); - overlap_box.setBig( dir, - int( std::round((overlap_realbox.hi(dir)-overlap_realbox.lo(dir)) - /dx[dir] )) - 1); - shifted[dir] = - static_cast(std::round((overlap_realbox.lo(dir)-problo[dir])/dx[dir])); - // shifted is exact in non-moving-window direction. That's all we care. - } + const bool no_overlap = find_overlap(tile_realbox, part_realbox, dx, problo, overlap_realbox, overlap_box, shifted); if (no_overlap) { continue; // Go to the next tile } @@ -1110,19 +1068,9 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int } return 0; }; - const int flag_pcount = checker(); - if (flag_pcount == 1) { - pcounts[index] = num_ppc*r; - } else { - pcounts[index] = 0; - } + pcounts[index] = checker() ? num_ppc*r : 0; } -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(k); -#endif -#if defined(WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif }); // Max number of new particles. All of them are created, @@ -1160,40 +1108,12 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int pa[ia] = soa.GetRealData(ia).data() + old_size; } uint64_t * AMREX_RESTRICT pa_idcpu = soa.GetIdCPUData().data() + old_size; - // user-defined integer and real attributes - amrex::Gpu::PinnedVector pa_user_int_pinned(n_user_int_attribs); - amrex::Gpu::PinnedVector pa_user_real_pinned(n_user_real_attribs); - for (int ia = 0; ia < n_user_int_attribs; ++ia) { - pa_user_int_pinned[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; - } - for (int ia = 0; ia < n_user_real_attribs; ++ia) { - pa_user_real_pinned[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; - } -#ifdef AMREX_USE_GPU - // To avoid using managed memory, we first define pinned memory vector, initialize on cpu, - // and them memcpy to device from host - amrex::Gpu::DeviceVector d_pa_user_int(n_user_int_attribs); - amrex::Gpu::DeviceVector d_pa_user_real(n_user_real_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_int_attrib_parserexec(n_user_int_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_real_attrib_parserexec(n_user_real_attribs); - amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_int_pinned.begin(), - pa_user_int_pinned.end(), d_pa_user_int.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_real_pinned.begin(), - pa_user_real_pinned.end(), d_pa_user_real.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, user_int_attrib_parserexec_pinned.begin(), - user_int_attrib_parserexec_pinned.end(), d_user_int_attrib_parserexec.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, user_real_attrib_parserexec_pinned.begin(), - user_real_attrib_parserexec_pinned.end(), d_user_real_attrib_parserexec.begin()); - int** pa_user_int_data = d_pa_user_int.dataPtr(); - ParticleReal** pa_user_real_data = d_pa_user_real.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = d_user_int_attrib_parserexec.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = d_user_real_attrib_parserexec.dataPtr(); -#else - int** pa_user_int_data = pa_user_int_pinned.dataPtr(); - ParticleReal** pa_user_real_data = pa_user_real_pinned.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec_pinned.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec_pinned.dataPtr(); -#endif + + PlasmaParserHelper plasma_parser_helper (soa, old_size, m_user_int_attribs, m_user_real_attribs, particle_icomps, particle_comps, plasma_parser_wrapper); + int** pa_user_int_data = plasma_parser_helper.getUserIntDataPtrs(); + ParticleReal** pa_user_real_data = plasma_parser_helper.getUserRealDataPtrs(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = plasma_parser_helper.getUserIntParserExecData(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = plasma_parser_helper.getUserRealParserExecData(); int* pi = nullptr; if (do_field_ionization) { @@ -1201,34 +1121,9 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int } #ifdef WARPX_QED - //Pointer to the optical depth component - amrex::ParticleReal* p_optical_depth_QSR = nullptr; - amrex::ParticleReal* p_optical_depth_BW = nullptr; - - // If a QED effect is enabled, the corresponding optical depth - // has to be initialized - const bool loc_has_quantum_sync = has_quantum_sync(); - const bool loc_has_breit_wheeler = has_breit_wheeler(); - if (loc_has_quantum_sync) { - p_optical_depth_QSR = soa.GetRealData( - particle_comps["opticalDepthQSR"]).data() + old_size; - } - if(loc_has_breit_wheeler) { - p_optical_depth_BW = soa.GetRealData( - particle_comps["opticalDepthBW"]).data() + old_size; - } - - //If needed, get the appropriate functors from the engines - QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt; - BreitWheelerGetOpticalDepth breit_wheeler_get_opt; - if(loc_has_quantum_sync){ - quantum_sync_get_opt = - m_shr_p_qs_engine->build_optical_depth_functor(); - } - if(loc_has_breit_wheeler){ - breit_wheeler_get_opt = - m_shr_p_bw_engine->build_optical_depth_functor(); - } + const QEDHelper qed_helper(soa, old_size, particle_comps, + has_quantum_sync(), has_breit_wheeler(), + m_shr_p_qs_engine, m_shr_p_bw_engine); #endif const bool loc_do_field_ionization = do_field_ionization; @@ -1246,18 +1141,14 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv = IntVect(AMREX_D_DECL(i, j, k)); + amrex::ignore_unused(j,k); const auto index = overlap_box.index(iv); #ifdef WARPX_DIM_RZ Real theta_offset = 0._rt; if (rz_random_theta) { theta_offset = amrex::Random(engine) * 2._rt * MathConst::pi; } #endif - Real scale_fac = 0.0_rt; - if( pcounts[index] != 0) { - amrex::Real const dV = AMREX_D_TERM(dx[0], *dx[1], *dx[2]); - scale_fac = dV/pcounts[index]; - } - + const Real scale_fac = compute_scale_fac_volume(dx, pcounts[index]); for (int i_part = 0; i_part < pcounts[index]; ++i_part) { long ip = poffset[index] + i_part; @@ -1281,8 +1172,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int if (!box_contains) { ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW + ,qed_helper #endif ); continue; @@ -1319,8 +1209,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int if (!inj_pos->insideBounds(xb, yb, z0)) { ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW + ,qed_helper #endif ); continue; @@ -1333,8 +1222,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int if ( dens < density_min ){ ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW + ,qed_helper #endif ); continue; @@ -1351,8 +1239,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int if (!inj_pos->insideBounds(xb, yb, z0_lab)) { ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW + ,qed_helper #endif ); continue; @@ -1363,8 +1250,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int if ( dens < density_min ){ ZeroInitializeAndSetNegativeID(pa_idcpu, pa, ip, loc_do_field_ionization, pi #ifdef WARPX_QED - ,loc_has_quantum_sync, p_optical_depth_QSR - ,loc_has_breit_wheeler, p_optical_depth_BW + ,qed_helper #endif ); continue; @@ -1388,12 +1274,12 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int } #ifdef WARPX_QED - if(loc_has_quantum_sync){ - p_optical_depth_QSR[ip] = quantum_sync_get_opt(engine); + if(qed_helper.has_quantum_sync){ + qed_helper.p_optical_depth_QSR[ip] = qed_helper.quantum_sync_get_opt(engine); } - if(loc_has_breit_wheeler){ - p_optical_depth_BW[ip] = breit_wheeler_get_opt(engine); + if(qed_helper.has_breit_wheeler){ + qed_helper.p_optical_depth_BW[ip] = qed_helper.breit_wheeler_get_opt(engine); } #endif // Initialize user-defined integers with user-defined parser @@ -1481,25 +1367,6 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const auto dx = geom.CellSizeArray(); const auto problo = geom.ProbLoArray(); - Real scale_fac = 0._rt; - // Scale particle weight by the area of the emitting surface, within one cell -#if defined(WARPX_DIM_3D) - scale_fac = dx[0]*dx[1]*dx[2]/dx[plasma_injector.flux_normal_axis]/num_ppc_real; -#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) - scale_fac = dx[0]*dx[1]/num_ppc_real; - // When emission is in the r direction, the emitting surface is a cylinder. - // The factor 2*pi*r is added later below. - if (plasma_injector.flux_normal_axis == 0) { scale_fac /= dx[0]; } - // When emission is in the z direction, the emitting surface is an annulus - // The factor 2*pi*r is added later below. - if (plasma_injector.flux_normal_axis == 2) { scale_fac /= dx[1]; } - // When emission is in the theta direction (flux_normal_axis == 1), - // the emitting surface is a rectangle, within the plane of the simulation -#elif defined(WARPX_DIM_1D_Z) - scale_fac = dx[0]/num_ppc_real; - if (plasma_injector.flux_normal_axis == 2) { scale_fac /= dx[0]; } -#endif - amrex::LayoutData* cost = WarpX::getCosts(0); // Create temporary particle container to which particles will be added; @@ -1510,19 +1377,9 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, for (int ic = 0; ic < NumRuntimeIntComps(); ++ic) { tmp_pc.AddIntComp(false); } tmp_pc.defineAllParticleTiles(); - const int nlevs = numLevels(); - static bool refine_injection = false; - static Box fine_injection_box; - static amrex::IntVect rrfac(AMREX_D_DECL(1,1,1)); - // This does not work if the mesh is dynamic. But in that case, we should - // not use refined injected either. We also assume there is only one fine level. - if (WarpX::refine_plasma && nlevs == 2) - { - refine_injection = true; - fine_injection_box = ParticleBoxArray(1).minimalBox(); - rrfac = m_gdb->refRatio(0); - fine_injection_box.coarsen(rrfac); - } + Box fine_injection_box; + amrex::IntVect rrfac(AMREX_D_DECL(1,1,1)); + const bool refine_injection = findRefinedInjectionBox(fine_injection_box, rrfac); InjectorPosition* flux_pos = plasma_injector.getInjectorFluxPosition(); InjectorFlux* inj_flux = plasma_injector.getInjectorFlux(); @@ -1536,6 +1393,13 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const bool radially_weighted = plasma_injector.radially_weighted; #endif + auto n_user_int_attribs = static_cast(m_user_int_attribs.size()); + auto n_user_real_attribs = static_cast(m_user_real_attribs.size()); + const PlasmaParserWrapper plasma_parser_wrapper (m_user_int_attribs.size(), + m_user_real_attribs.size(), + m_user_int_attrib_parser, + m_user_real_attrib_parser); + MFItInfo info; if (do_tiling && Gpu::notInLaunchRegion()) { info.EnableTiling(tile_size); @@ -1560,61 +1424,7 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, RealBox overlap_realbox; Box overlap_box; IntVect shifted; - bool no_overlap = false; - - for (int dir=0; dir 0) { - if (plasma_injector.surface_flux_pos < tile_realbox.lo(dir) || - plasma_injector.surface_flux_pos >= tile_realbox.hi(dir)) { - no_overlap = true; - break; - } - } else { - if (plasma_injector.surface_flux_pos <= tile_realbox.lo(dir) || - plasma_injector.surface_flux_pos > tile_realbox.hi(dir)) { - no_overlap = true; - break; - } - } - overlap_realbox.setLo( dir, plasma_injector.surface_flux_pos ); - overlap_realbox.setHi( dir, plasma_injector.surface_flux_pos ); - overlap_box.setSmall( dir, 0 ); - overlap_box.setBig( dir, 0 ); - shifted[dir] = - static_cast(std::round((overlap_realbox.lo(dir)-problo[dir])/dx[dir])); - } else { - if ( tile_realbox.lo(dir) <= part_realbox.hi(dir) ) { - const Real ncells_adjust = std::floor( (tile_realbox.lo(dir) - part_realbox.lo(dir))/dx[dir] ); - overlap_realbox.setLo( dir, part_realbox.lo(dir) + std::max(ncells_adjust, 0._rt) * dx[dir]); - } else { - no_overlap = true; break; - } - if ( tile_realbox.hi(dir) >= part_realbox.lo(dir) ) { - const Real ncells_adjust = std::floor( (part_realbox.hi(dir) - tile_realbox.hi(dir))/dx[dir] ); - overlap_realbox.setHi( dir, part_realbox.hi(dir) - std::max(ncells_adjust, 0._rt) * dx[dir]); - } else { - no_overlap = true; break; - } - // Count the number of cells in this direction in overlap_realbox - overlap_box.setSmall( dir, 0 ); - overlap_box.setBig( dir, - int( std::round((overlap_realbox.hi(dir)-overlap_realbox.lo(dir)) - /dx[dir] )) - 1); - shifted[dir] = - static_cast(std::round((overlap_realbox.lo(dir)-problo[dir])/dx[dir])); - // shifted is exact in non-moving-window direction. That's all we care. - } - } + const bool no_overlap = find_overlap_flux(tile_realbox, part_realbox, dx, problo, plasma_injector, overlap_realbox, overlap_box, shifted); if (no_overlap) { continue; // Go to the next tile } @@ -1632,6 +1442,7 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, Gpu::DeviceVector offset(overlap_box.numPts()); auto *pcounts = counts.data(); const amrex::IntVect lrrfac = rrfac; + const int flux_normal_axis = plasma_injector.flux_normal_axis; Box fine_overlap_box; // default Box is NOT ok(). if (refine_injection) { fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); @@ -1649,17 +1460,13 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, auto index = overlap_box.index(iv); int r; if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = AMREX_D_TERM(lrrfac[0],*lrrfac[1],*lrrfac[2]); + r = compute_area_weights(lrrfac, flux_normal_axis); } else { r = 1; } pcounts[index] = num_ppc_int*r; } -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(k); -#elif defined(WARPX_DIM_1D_Z) amrex::ignore_unused(j,k); -#endif }); // Max number of new particles. All of them are created, @@ -1694,46 +1501,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } uint64_t * AMREX_RESTRICT pa_idcpu = soa.GetIdCPUData().data() + old_size; - // user-defined integer and real attributes - const auto n_user_int_attribs = static_cast(m_user_int_attribs.size()); - const auto n_user_real_attribs = static_cast(m_user_real_attribs.size()); - amrex::Gpu::PinnedVector pa_user_int_pinned(n_user_int_attribs); - amrex::Gpu::PinnedVector pa_user_real_pinned(n_user_real_attribs); - amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec_pinned(n_user_int_attribs); - amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec_pinned(n_user_real_attribs); - for (int ia = 0; ia < n_user_int_attribs; ++ia) { - pa_user_int_pinned[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; - user_int_attrib_parserexec_pinned[ia] = m_user_int_attrib_parser[ia]->compile<7>(); - } - for (int ia = 0; ia < n_user_real_attribs; ++ia) { - pa_user_real_pinned[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; - user_real_attrib_parserexec_pinned[ia] = m_user_real_attrib_parser[ia]->compile<7>(); - } -#ifdef AMREX_USE_GPU - // To avoid using managed memory, we first define pinned memory vector, initialize on cpu, - // and them memcpy to device from host - amrex::Gpu::DeviceVector d_pa_user_int(n_user_int_attribs); - amrex::Gpu::DeviceVector d_pa_user_real(n_user_real_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_int_attrib_parserexec(n_user_int_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_real_attrib_parserexec(n_user_real_attribs); - amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_int_pinned.begin(), - pa_user_int_pinned.end(), d_pa_user_int.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_real_pinned.begin(), - pa_user_real_pinned.end(), d_pa_user_real.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, user_int_attrib_parserexec_pinned.begin(), - user_int_attrib_parserexec_pinned.end(), d_user_int_attrib_parserexec.begin()); - amrex::Gpu::copyAsync(Gpu::hostToDevice, user_real_attrib_parserexec_pinned.begin(), - user_real_attrib_parserexec_pinned.end(), d_user_real_attrib_parserexec.begin()); - int** pa_user_int_data = d_pa_user_int.dataPtr(); - ParticleReal** pa_user_real_data = d_pa_user_real.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = d_user_int_attrib_parserexec.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = d_user_real_attrib_parserexec.dataPtr(); -#else - int** pa_user_int_data = pa_user_int_pinned.dataPtr(); - ParticleReal** pa_user_real_data = pa_user_real_pinned.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec_pinned.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec_pinned.dataPtr(); -#endif + PlasmaParserHelper plasma_parser_helper (soa, old_size, m_user_int_attribs, m_user_real_attribs, particle_icomps, particle_comps, plasma_parser_wrapper); + int** pa_user_int_data = plasma_parser_helper.getUserIntDataPtrs(); + ParticleReal** pa_user_real_data = plasma_parser_helper.getUserRealDataPtrs(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = plasma_parser_helper.getUserIntParserExecData(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = plasma_parser_helper.getUserRealParserExecData(); int* p_ion_level = nullptr; if (do_field_ionization) { @@ -1741,34 +1513,9 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } #ifdef WARPX_QED - //Pointer to the optical depth component - amrex::ParticleReal* p_optical_depth_QSR = nullptr; - amrex::ParticleReal* p_optical_depth_BW = nullptr; - - // If a QED effect is enabled, the corresponding optical depth - // has to be initialized - const bool loc_has_quantum_sync = has_quantum_sync(); - const bool loc_has_breit_wheeler = has_breit_wheeler(); - if (loc_has_quantum_sync) { - p_optical_depth_QSR = soa.GetRealData( - particle_comps["opticalDepthQSR"]).data() + old_size; - } - if(loc_has_breit_wheeler) { - p_optical_depth_BW = soa.GetRealData( - particle_comps["opticalDepthBW"]).data() + old_size; - } - - //If needed, get the appropriate functors from the engines - QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt; - BreitWheelerGetOpticalDepth breit_wheeler_get_opt; - if(loc_has_quantum_sync){ - quantum_sync_get_opt = - m_shr_p_qs_engine->build_optical_depth_functor(); - } - if(loc_has_breit_wheeler){ - breit_wheeler_get_opt = - m_shr_p_bw_engine->build_optical_depth_functor(); - } + const QEDHelper qed_helper(soa, old_size, particle_comps, + has_quantum_sync(), has_breit_wheeler(), + m_shr_p_qs_engine, m_shr_p_bw_engine); #endif const bool loc_do_field_ionization = do_field_ionization; @@ -1787,6 +1534,24 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, { const IntVect iv = IntVect(AMREX_D_DECL(i, j, k)); const auto index = overlap_box.index(iv); + + Real scale_fac = compute_scale_fac_area(dx, num_ppc_real, flux_normal_axis); + + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); + auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); + + if (flux_pos->overlapsWith(lo, hi)) + { + int r; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + r = compute_area_weights(lrrfac, flux_normal_axis); + } else { + r = 1; + } + scale_fac /= r; + } + amrex::ignore_unused(j,k); + for (int i_part = 0; i_part < pcounts[index]; ++i_part) { const long ip = poffset[index] + i_part; @@ -1878,14 +1643,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } #ifdef WARPX_QED - if (loc_has_quantum_sync) { - p_optical_depth_QSR[ip] = quantum_sync_get_opt(engine); + if(qed_helper.has_quantum_sync){ + qed_helper.p_optical_depth_QSR[ip] = qed_helper.quantum_sync_get_opt(engine); } - if(loc_has_breit_wheeler){ - p_optical_depth_BW[ip] = breit_wheeler_get_opt(engine); + if(qed_helper.has_breit_wheeler){ + qed_helper.p_optical_depth_BW[ip] = qed_helper.breit_wheeler_get_opt(engine); } #endif + // Initialize user-defined integers with user-defined parser for (int ia = 0; ia < n_user_int_attribs; ++ia) { pa_user_int_data[ia][ip] = static_cast(user_int_parserexec_data[ia](pos.x, pos.y, pos.z, u.x, u.y, u.z, t)); @@ -1967,26 +1733,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // are in the right tile.) tmp_pc.Redistribute(); - // Add the particles to the current container, tile by tile - for (int lev=0; levaddParticles(tmp_pc, true); } void @@ -3412,6 +3160,28 @@ void PhysicalParticleContainer::resample (const int timestep, const bool verbose WARPX_PROFILE_VAR_STOP(blp_resample_actual); } +bool +PhysicalParticleContainer::findRefinedInjectionBox (amrex::Box& a_fine_injection_box, amrex::IntVect& a_rrfac) +{ + WARPX_PROFILE("PhysicalParticleContainer::findRefinedInjectionBox"); + + // This does not work if the mesh is dynamic. But in that case, we should + // not use refined injected either. We also assume there is only one fine level. + static bool refine_injection = false; + static Box fine_injection_box; + static amrex::IntVect rrfac(AMREX_D_DECL(1,1,1)); + if (!refine_injection and WarpX::moving_window_active(WarpX::GetInstance().getistep(0)+1) and WarpX::refine_plasma and do_continuous_injection and numLevels() == 2) { + refine_injection = true; + fine_injection_box = ParticleBoxArray(1).minimalBox(); + fine_injection_box.setSmall(WarpX::moving_window_dir, std::numeric_limits::lowest()/2); + fine_injection_box.setBig(WarpX::moving_window_dir, std::numeric_limits::max()/2); + rrfac = m_gdb->refRatio(0); + fine_injection_box.coarsen(rrfac); + } + a_fine_injection_box = fine_injection_box; + a_rrfac = rrfac; + return refine_injection; +} #ifdef WARPX_QED From b474e861d4177e013033ecf777c28a1e94ab643c Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 13 Sep 2024 13:15:28 -0700 Subject: [PATCH 43/91] [Hackathon] Update Source/Parallelization/WarpXComm_K.H (#5246) * Updage Source/Parallelization/WarpXComm_K.H * Add comment "Unused dimensions are considered nodal" --- Source/Parallelization/WarpXComm_K.H | 210 ++++++--------------------- 1 file changed, 43 insertions(+), 167 deletions(-) diff --git a/Source/Parallelization/WarpXComm_K.H b/Source/Parallelization/WarpXComm_K.H index c3362087ad9..79f2b34fba0 100644 --- a/Source/Parallelization/WarpXComm_K.H +++ b/Source/Parallelization/WarpXComm_K.H @@ -44,18 +44,19 @@ void warpx_interp (int j, int k, int l, // Refinement ratio const int rj = rr[0]; - const int rk = (AMREX_SPACEDIM == 1) ? 1 : rr[1]; - const int rl = (AMREX_SPACEDIM <= 2) ? 1 : rr[2]; + const int rk = (AMREX_SPACEDIM > 1) ? rr[1] : 1; + const int rl = (AMREX_SPACEDIM > 2) ? rr[2] : 1; // Staggering (0: cell-centered; 1: nodal) + // Unused dimensions are considered nodal. const int sj = arr_stag[0]; - const int sk = (AMREX_SPACEDIM == 1) ? 0 : arr_stag[1]; - const int sl = (AMREX_SPACEDIM <= 2) ? 0 : arr_stag[2]; + const int sk = (AMREX_SPACEDIM > 1) ? arr_stag[1] : 1; + const int sl = (AMREX_SPACEDIM > 2) ? arr_stag[2] : 1; // Number of points used for interpolation from coarse grid to fine grid const int nj = 2; - const int nk = 2; - const int nl = 2; + const int nk = (AMREX_SPACEDIM > 1) ? 2 : 1; + const int nl = (AMREX_SPACEDIM > 2) ? 2 : 1; const int jc = (sj == 0) ? amrex::coarsen(j - rj/2, rj) : amrex::coarsen(j, rj); const int kc = (sk == 0) ? amrex::coarsen(k - rk/2, rk) : amrex::coarsen(k, rk); @@ -133,38 +134,20 @@ void warpx_interp (int j, int k, int l, // Refinement ratio const int rj = rr[0]; -#if defined(WARPX_DIM_1D_Z) - constexpr int rk = 1; - constexpr int rl = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int rk = rr[1]; - constexpr int rl = 1; -#else - const int rk = rr[1]; - const int rl = rr[2]; -#endif + const int rk = (AMREX_SPACEDIM > 1) ? rr[1] : 1; + const int rl = (AMREX_SPACEDIM > 2) ? rr[2] : 1; // Staggering of fine array (0: cell-centered; 1: nodal) + // Unused dimensions are considered nodal. const int sj_fp = arr_fine_stag[0]; -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int sk_fp = arr_fine_stag[1]; -#elif defined(WARPX_DIM_3D) - const int sk_fp = arr_fine_stag[1]; - const int sl_fp = arr_fine_stag[2]; -#endif + const int sk_fp = (AMREX_SPACEDIM > 1) ? arr_fine_stag[1] : 1; + const int sl_fp = (AMREX_SPACEDIM > 2) ? arr_fine_stag[2] : 1; // Staggering of coarse array (0: cell-centered; 1: nodal) + // Unused dimensions are considered nodal. const int sj_cp = arr_coarse_stag[0]; -#if defined(WARPX_DIM_1D_Z) - constexpr int sk_cp = 0; - constexpr int sl_cp = 0; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int sk_cp = arr_coarse_stag[1]; - constexpr int sl_cp = 0; -#else - const int sk_cp = arr_coarse_stag[1]; - const int sl_cp = arr_coarse_stag[2]; -#endif + const int sk_cp = (AMREX_SPACEDIM > 1) ? arr_coarse_stag[1] : 1; + const int sl_cp = (AMREX_SPACEDIM > 2) ? arr_coarse_stag[2] : 1; // Number of points used for interpolation from coarse grid to fine grid int nj; @@ -182,27 +165,19 @@ void warpx_interp (int j, int k, int l, // 1) Interpolation from coarse nodal to fine nodal nj = 2; -#if defined(WARPX_DIM_1D_Z) - nk = 1; - nl = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - nk = 2; - nl = 1; -#else - nk = 2; - nl = 2; -#endif + nk = (AMREX_SPACEDIM > 1) ? 2 : 1; + nl = (AMREX_SPACEDIM > 2) ? 2 : 1; for (int jj = 0; jj < nj; jj++) { for (int kk = 0; kk < nk; kk++) { for (int ll = 0; ll < nl; ll++) { auto c = arr_tmp_zeropad(jc+jj,kc+kk,lc+ll); c *= (rj - amrex::Math::abs(j - (jc + jj) * rj)) / static_cast(rj); -#if (AMREX_SPACEDIM >= 2) +#if (AMREX_SPACEDIM > 1) c *= (rk - amrex::Math::abs(k - (kc + kk) * rk)) / static_cast(rk); -#endif -#if (AMREX_SPACEDIM == 3) +#if (AMREX_SPACEDIM > 2) c *= (rl - amrex::Math::abs(l - (lc + ll) * rl)) / static_cast(rl); +#endif #endif tmp += c; } @@ -212,16 +187,8 @@ void warpx_interp (int j, int k, int l, // 2) Interpolation from coarse staggered to fine nodal nj = 2; -#if defined(WARPX_DIM_1D_Z) - nk = 1; - nl = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - nk = 2; - nl = 1; -#else - nk = 2; - nl = 2; -#endif + nk = (AMREX_SPACEDIM > 1) ? 2 : 1; + nl = (AMREX_SPACEDIM > 2) ? 2 : 1; const int jn = (sj_cp == 1) ? j : j - rj / 2; const int kn = (sk_cp == 1) ? k : k - rk / 2; @@ -236,11 +203,11 @@ void warpx_interp (int j, int k, int l, for (int ll = 0; ll < nl; ll++) { auto c = arr_coarse_zeropad(jc+jj,kc+kk,lc+ll); c *= (rj - amrex::Math::abs(jn - (jc + jj) * rj)) / static_cast(rj); -#if (AMREX_SPACEDIM >= 2) +#if (AMREX_SPACEDIM > 1) c *= (rk - amrex::Math::abs(kn - (kc + kk) * rk)) / static_cast(rk); -#endif -#if (AMREX_SPACEDIM == 3) +#if (AMREX_SPACEDIM > 2) c *= (rl - amrex::Math::abs(ln - (lc + ll) * rl)) / static_cast(rl); +#endif #endif coarse += c; } @@ -250,28 +217,12 @@ void warpx_interp (int j, int k, int l, // 3) Interpolation from fine staggered to fine nodal nj = (sj_fp == 0) ? 2 : 1; -#if defined(WARPX_DIM_1D_Z) - nk = 1; - nl = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - nk = (sk_fp == 0) ? 2 : 1; - nl = 1; -#else nk = (sk_fp == 0) ? 2 : 1; nl = (sl_fp == 0) ? 2 : 1; -#endif const int jm = (sj_fp == 0) ? j-1 : j; -#if defined(WARPX_DIM_1D_Z) - const int km = k; - const int lm = l; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int km = (sk_fp == 0) ? k-1 : k; - const int lm = l; -#else const int km = (sk_fp == 0) ? k-1 : k; const int lm = (sl_fp == 0) ? l-1 : l; -#endif for (int jj = 0; jj < nj; jj++) { for (int kk = 0; kk < nk; kk++) { @@ -285,6 +236,7 @@ void warpx_interp (int j, int k, int l, // Final result arr_aux(j,k,l) = tmp + (fine - coarse); } + /** * \brief Interpolation function called within WarpX::UpdateAuxilaryDataStagToNodal * to interpolate data from the coarse and fine grids to the fine aux grid, @@ -320,13 +272,10 @@ void warpx_interp (int j, int k, int l, // - (x,y,z) in 3D // Staggering of fine array (0: cell-centered; 1: nodal) + // Unused dimensions are considered nodal. const int sj_fp = arr_fine_stag[0]; -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int sk_fp = arr_fine_stag[1]; -#elif defined(WARPX_DIM_3D) - const int sk_fp = arr_fine_stag[1]; - const int sl_fp = arr_fine_stag[2]; -#endif + const int sk_fp = (AMREX_SPACEDIM > 1) ? arr_fine_stag[1] : 1; + const int sl_fp = (AMREX_SPACEDIM > 2) ? arr_fine_stag[2] : 1; // Number of points used for interpolation from coarse grid to fine grid int nj; @@ -338,28 +287,12 @@ void warpx_interp (int j, int k, int l, // 3) Interpolation from fine staggered to fine nodal nj = (sj_fp == 0) ? 2 : 1; -#if defined(WARPX_DIM_1D_Z) - nk = 1; - nl = 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - nk = (sk_fp == 0) ? 2 : 1; - nl = 1; -#else nk = (sk_fp == 0) ? 2 : 1; nl = (sl_fp == 0) ? 2 : 1; -#endif - const int jm = (sj_fp == 0) ? j-1 : j; -#if defined(WARPX_DIM_1D_Z) - const int km = k; - const int lm = l; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int km = (sk_fp == 0) ? k-1 : k; - const int lm = l; -#else - const int km = (sk_fp == 0) ? k-1 : k; - const int lm = (sl_fp == 0) ? l-1 : l; -#endif + int const jm = (sj_fp == 0) ? j-1 : j; + int const km = (sk_fp == 0) ? k-1 : k; + int const lm = (sl_fp == 0) ? l-1 : l; for (int jj = 0; jj < nj; jj++) { for (int kk = 0; kk < nk; kk++) { @@ -418,11 +351,7 @@ void warpx_interp (const int j, }; // Avoid compiler warnings -#if defined(WARPX_DIM_1D_Z) amrex::ignore_unused(nox, noy, stencil_coeffs_x, stencil_coeffs_y); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(noy, stencil_coeffs_y); -#endif // If dst_nodal = true , we are centering from a staggered grid to a nodal grid // If dst_nodal = false, we are centering from a nodal grid to a staggered grid @@ -432,70 +361,32 @@ void warpx_interp (const int j, const int shift = (dst_nodal) ? 0 : 1; // Staggering (s = 0 if cell-centered, s = 1 if nodal) + // Unused dimensions are considered nodal. const int sj = (dst_nodal) ? src_stag[0] : dst_stag[0]; -#if (AMREX_SPACEDIM >= 2) - const int sk = (dst_nodal) ? src_stag[1] : dst_stag[1]; -#endif -#if defined(WARPX_DIM_3D) - const int sl = (dst_nodal) ? src_stag[2] : dst_stag[2]; -#endif + const int sk = (AMREX_SPACEDIM > 1) ? ((dst_nodal) ? src_stag[1] : dst_stag[1]) : 1; + const int sl = (AMREX_SPACEDIM > 2) ? ((dst_nodal) ? src_stag[2] : dst_stag[2]) : 1; // Interpolate along j,k,l only if source MultiFab is staggered along j,k,l const bool interp_j = (sj == 0); -#if (AMREX_SPACEDIM >= 2) const bool interp_k = (sk == 0); -#endif -#if defined(WARPX_DIM_3D) const bool interp_l = (sl == 0); -#endif -#if defined(WARPX_DIM_1D_Z) - const int noj = noz; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const int noj = nox; - const int nok = noz; -#elif defined(WARPX_DIM_3D) - const int noj = nox; - const int nok = noy; - const int nol = noz; -#endif + const int noj = AMREX_D_PICK(noz, nox, nox); + const int nok = AMREX_D_PICK(0 , noz, noy); + const int nol = AMREX_D_PICK(0 , 0 , noz); // Additional normalization factor const amrex::Real wj = (interp_j) ? 0.5_rt : 1.0_rt; -#if defined(WARPX_DIM_1D_Z) - constexpr amrex::Real wk = 1.0_rt; - constexpr amrex::Real wl = 1.0_rt; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real wk = (interp_k) ? 0.5_rt : 1.0_rt; - constexpr amrex::Real wl = 1.0_rt; -#elif defined(WARPX_DIM_3D) const amrex::Real wk = (interp_k) ? 0.5_rt : 1.0_rt; const amrex::Real wl = (interp_l) ? 0.5_rt : 1.0_rt; -#endif - // Min and max for interpolation loop along j + // Min and max for interpolation loop const int jmin = (interp_j) ? j - noj/2 + shift : j; const int jmax = (interp_j) ? j + noj/2 + shift - 1 : j; - - // Min and max for interpolation loop along k -#if defined(WARPX_DIM_1D_Z) - // k = 0 always - const int kmin = k; - const int kmax = k; -#else const int kmin = (interp_k) ? k - nok/2 + shift : k; const int kmax = (interp_k) ? k + nok/2 + shift - 1 : k; -#endif - - // Min and max for interpolation loop along l -#if (AMREX_SPACEDIM <= 2) - // l = 0 always - const int lmin = l; - const int lmax = l; -#elif defined(WARPX_DIM_3D) const int lmin = (interp_l) ? l - nol/2 + shift : l; const int lmax = (interp_l) ? l + nol/2 + shift - 1 : l; -#endif // Number of interpolation points const int nj = jmax - jmin; @@ -543,31 +434,16 @@ void warpx_interp (const int j, amrex::Real res = 0.0_rt; -#if defined(WARPX_DIM_1D_Z) - amrex::Real const* scj = stencil_coeffs_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Real const* scj = stencil_coeffs_x; - amrex::Real const* sck = stencil_coeffs_z; -#elif defined(WARPX_DIM_3D) - amrex::Real const* scj = stencil_coeffs_x; - amrex::Real const* sck = stencil_coeffs_y; - amrex::Real const* scl = stencil_coeffs_z; -#endif + amrex::Real const* scj = AMREX_D_PICK(stencil_coeffs_z, stencil_coeffs_x, stencil_coeffs_x); + amrex::Real const* sck = AMREX_D_PICK(nullptr , stencil_coeffs_z, stencil_coeffs_y); + amrex::Real const* scl = AMREX_D_PICK(nullptr , nullptr , stencil_coeffs_z); for (int ll = 0; ll <= nl; ll++) { -#if defined(WARPX_DIM_3D) - const amrex::Real cl = (interp_l)? scl[ll] : 1.0_rt; -#else - const amrex::Real cl = 1.0_rt; -#endif + const amrex::Real cl = (interp_l)? scl[ll] : 1.0_rt; for (int kk = 0; kk <= nk; kk++) { -#if (AMREX_SPACEDIM >= 2) const amrex::Real ck = (interp_k)? sck[kk] : 1.0_rt; -#else - const amrex::Real ck = 1.0_rt; -#endif for (int jj = 0; jj <= nj; jj++) { const amrex::Real cj = (interp_j)? scj[jj] : 1.0_rt; From 396cc5aded805c69faf064d04ed834ccaa5ad428 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Fri, 13 Sep 2024 13:30:48 -0700 Subject: [PATCH 44/91] Add ionization documentation (#5251) * Add initial ionization documentation For WIP pull request * Added ionization test file documentation * Add implementation details Document the equations very briefly. * added fiure and changed includes * added figure and changed includes * some LaTeX fixes * Align ADK factor calculation * Move test documentation to the test directory - move field ionization test docs the test directory - rename `ionization` test to `field_ionization` to be more precise * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Included time dilation in docs for ionization rate in lab frame --------- Co-authored-by: Johannes Van de Wetering Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/insitu.yml | 4 +- Docs/source/latex_theory/allbibs.bib | 42 ++++++++++++ .../source/theory/multiphysics/ionization.rst | 67 ++++++++++++++++++- Docs/source/usage/examples.rst | 5 ++ Docs/source/usage/examples/field_ionization | 1 + Examples/Tests/CMakeLists.txt | 2 +- .../CMakeLists.txt | 0 Examples/Tests/field_ionization/README.rst | 59 ++++++++++++++++ .../analysis.py | 0 .../catalyst_pipeline.py | 0 .../inputs_test_2d_ionization_boost | 0 .../inputs_test_2d_ionization_lab | 0 .../inputs_test_2d_ionization_picmi.py | 0 13 files changed, 176 insertions(+), 4 deletions(-) create mode 120000 Docs/source/usage/examples/field_ionization rename Examples/Tests/{ionization => field_ionization}/CMakeLists.txt (100%) create mode 100644 Examples/Tests/field_ionization/README.rst rename Examples/Tests/{ionization => field_ionization}/analysis.py (100%) rename Examples/Tests/{ionization => field_ionization}/catalyst_pipeline.py (100%) rename Examples/Tests/{ionization => field_ionization}/inputs_test_2d_ionization_boost (100%) rename Examples/Tests/{ionization => field_ionization}/inputs_test_2d_ionization_lab (100%) rename Examples/Tests/{ionization => field_ionization}/inputs_test_2d_ionization_picmi.py (100%) diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index be93dfb9beb..35f16842935 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -101,8 +101,8 @@ jobs: cmake --build build -j 10 - name: 2D Test run: | - cp Examples/Tests/ionization/inputs_test_2d_ionization_lab . - cp Examples/Tests/ionization/catalyst_pipeline.py . + cp Examples/Tests/field_ionization/inputs_test_2d_ionization_lab . + cp Examples/Tests/field_ionization/catalyst_pipeline.py . mpiexec -n 2 ./build/bin/warpx.2d \ inputs_test_2d_ionization_lab \ catalyst.script_paths = catalyst_pipeline.py\ diff --git a/Docs/source/latex_theory/allbibs.bib b/Docs/source/latex_theory/allbibs.bib index b3475c5a81b..62810ca5d6a 100644 --- a/Docs/source/latex_theory/allbibs.bib +++ b/Docs/source/latex_theory/allbibs.bib @@ -2187,3 +2187,45 @@ @book{godfrey1985iprop title = {{The IPROP Three-Dimensional Beam Propagation Code}}, year = {1985} } + +@article{Ammosov1986, +title = {Tunnel ionization of complex atoms and of atomic ions in an alternating electromagnetic field}, +volume = {64}, +issn = {0044-4510}, +doi = {10.1117/12.938695}, +number = {December 1986}, +journal = {Sov. Phys. JETP}, +author = {Ammosov, M. V. and Delone, N. B. and Krainov, V. P.}, +year = {1986}, +pmid = {22232002}, +note = {ISBN: 0892526998}, +pages = {1191--1194}, +} + + +@article{zhang_empirical_2014, +title = {Empirical formula for over-barrier strong-field ionization}, +volume = {90}, +issn = {1050-2947, 1094-1622}, +doi = {10.1103/PhysRevA.90.043410}, +language = {en}, +number = {4}, +journal = {Physical Review A}, +author = {Zhang, Qingbin and Lan, Pengfei and Lu, Peixiang}, +month = oct, +year = {2014}, +pages = {043410}, +} + + +@book{Mulser2010, +title = {High {Power} {Laser}-{Matter} {Interaction}}, +volume = {238}, +isbn = {978-3-540-50669-0}, +publisher = {Springer Berlin Heidelberg}, +author = {Mulser, Peter and Bauer, Dieter}, +year = {2010}, +pmid = {25246403}, +doi = {10.1007/978-3-540-46065-7}, +note = {Series Title: Springer Tracts in Modern Physics}, +} diff --git a/Docs/source/theory/multiphysics/ionization.rst b/Docs/source/theory/multiphysics/ionization.rst index d93781603d9..11abea386c8 100644 --- a/Docs/source/theory/multiphysics/ionization.rst +++ b/Docs/source/theory/multiphysics/ionization.rst @@ -3,6 +3,71 @@ Ionization ========== +Field Ionization +---------------- + +Under the influence of a sufficiently strong external electric field atoms become ionized. +Particularly the dynamics of interactions between ultra-high intensity laser pulses and matter, e.g., Laser-Plasma Acceleration (LPA) with ionization injection, or Laser-Plasma Interactions with solid density targets (LPI) can depend on field ionization dynamics as well. + +WarpX models field ionization based on a description of the Ammosov-Delone-Krainov model:cite:p:`mpion-Ammosov1986` following :cite:t:`mpion-ChenPRSTAB13`. + +Implementation Details and Assumptions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + .. note:: - This section will be added soon! + The current implementation makes the following assumptions + + * Energy for ionization processes is not removed from the electromagnetic fields + * Only one single-level ionization process can occur per macroparticle and time step + * Ionization happens at the beginning of the PIC loop before the field solve + * Angular momentum quantum number :math:`l = 0` and magnetic quantum number :math:`m = 0` + +The model implements the following equations (assumptions to :math:`l` and :math:`m` have already been applied). + +The electric field amplitude is calculated in the particle's frame of reference. + +.. math:: + + \begin{aligned} + \vec{E}_\mathrm{dc} &= \sqrt{ - \frac{1}{\mathrm{c}^2} \left( \vec{u} \cdot \vec{E} \right)^2 + + \left( \gamma \vec{E} + \vec{u} \times \vec{B} \right)^2 } + \\ + \gamma &= \sqrt{1 + \frac{\vec{u}^2}{\mathrm{c}^2}} + \end{aligned} + +Here, :math:`\vec{u} = (u_x, u_y, u_z)` is the momentum normalized to the particle mass, :math:`u_i = (\beta \gamma)_i \mathrm{c}`. +:math:`E_\mathrm{dc} = |\vec{E}_\mathrm{dc}|` is the DC-field in the frame of the particle. + +.. math:: + + \begin{aligned} + P &= 1 - \mathrm{e}^{-W\mathrm{d}\tau/\gamma} + \\ + W &= \omega_\mathrm{a} \mathcal{C}^2_{n^* l^*} \frac{U_\mathrm{ion}}{2 U_H} + \left[ 2 \frac{E_\mathrm{a}}{E_\mathrm{dc}} \left( \frac{U_\mathrm{ion}}{U_\mathrm{H}} \right)^{3/2} \right]^{2n^*-1} + \times \exp\left[ - \frac{2}{3} \frac{E_\mathrm{a}}{E_\mathrm{dc}} \left( \frac{U_\mathrm{ion}}{U_\mathrm{H}} \right)^{3/2} \right] + \\ + \mathcal{C}^2_{n^* l^*} &= \frac{2^{2n^*}}{n^* \Gamma(n^* + l^* + 1) \Gamma(n^* - l^*)} + \end{aligned} + +where :math:`\mathrm{d}\tau` is the simulation timestep, which is divided by the particle :math:`\gamma` to account for time dilation. The quantities are: :math:`\omega_\mathrm{a}`, the atomic unit frequency, :math:`U_\mathrm{ion}`, the ionization potential, :math:`U_\mathrm{H}`, Hydrogen ground state ionization potential, :math:`E_\mathrm{a}`, the atomic unit electric field, :math:`n^* = Z \sqrt{U_\mathrm{H}/U_\mathrm{ion}}`, the effective principal quantum number (*Attention!* :math:`Z` is the ionization state *after ionization*.) , :math:`l^* = n_0^* - 1`, the effective orbital quantum number. + +Empirical Extension to Over-the-Barrier Regime for Hydrogen +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For hydrogen, WarpX offers the modified empirical ADK extension to the Over-the-Barrier (OTB) published in :cite:t:`mpion-zhang_empirical_2014` Eq. (8). + +.. math:: + + W_\mathrm{M} = \exp\left[ -\left( a_1 \frac{E^2}{E_\mathrm{b}} + a_2 \frac{E}{E_\mathrm{b}} + a_3 \right) \right] W_\mathrm{ADK} + +The parameters :math:`a_1` through :math:`a_3` are independent of :math:`E` and can be found in the same reference. :math:`E_\mathrm{b}` is the classical Barrier Suppresion Ionization (BSI) field strength :math:`E_\mathrm{b} = U_\mathrm{ion}^2 / (4 Z)` given here in atomic units (AU). For a detailed description of conversion between unit systems consider the book by :cite:t:`mpion-Mulser2010`. + +Testing +^^^^^^^ + +* `Testing the field ionization module <../../../../Examples/Tests/field_ionization/README.rst>`_. + +.. bibliography:: + :keyprefix: mpion- diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index 244fbda6f75..f1bd2ec4266 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -137,6 +137,11 @@ An example of initializing the fields by accessing their data through Python, ad Many Further Examples, Demos and Tests -------------------------------------- +.. toctree:: + :maxdepth: 1 + + examples/field_ionization/README.rst + WarpX runs over 200 integration tests on a variety of modeling cases, which validate and demonstrate its functionality. Please see the `Examples/Tests/ `__ directory for many more examples. diff --git a/Docs/source/usage/examples/field_ionization b/Docs/source/usage/examples/field_ionization new file mode 120000 index 00000000000..b1c3e38dab2 --- /dev/null +++ b/Docs/source/usage/examples/field_ionization @@ -0,0 +1 @@ +../../../../Examples/Tests/field_ionization/ \ No newline at end of file diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 108a28a6539..6fea9368e78 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -25,7 +25,7 @@ add_subdirectory(gaussian_beam) add_subdirectory(implicit) add_subdirectory(initial_distribution) add_subdirectory(initial_plasma_profile) -add_subdirectory(ionization) +add_subdirectory(field_ionization) add_subdirectory(ion_stopping) add_subdirectory(langmuir) add_subdirectory(langmuir_fluids) diff --git a/Examples/Tests/ionization/CMakeLists.txt b/Examples/Tests/field_ionization/CMakeLists.txt similarity index 100% rename from Examples/Tests/ionization/CMakeLists.txt rename to Examples/Tests/field_ionization/CMakeLists.txt diff --git a/Examples/Tests/field_ionization/README.rst b/Examples/Tests/field_ionization/README.rst new file mode 100644 index 00000000000..c2f11ed1a40 --- /dev/null +++ b/Examples/Tests/field_ionization/README.rst @@ -0,0 +1,59 @@ +.. _examples-tests-field_ionization: + +Field Ionization +================ + +Run Test +-------- + +For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. + +.. tab-set:: + + .. tab-item:: lab frame + + This example can be run **either** as: + + * **Python** script: ``python3 inputs_test_2d_ionization_picmi.py`` or + * WarpX **executable** using an input file: ``warpx.2d inputs_test_2d_ionization_lab max_step=1600`` + + .. tab-set:: + + .. tab-item:: Python: Script + + .. literalinclude:: inputs_test_2d_ionization_picmi.py + :language: python3 + :caption: You can copy this file from ``Examples/Tests/field_ionization/inputs_test_2d_ionization_picmi.py``. + + .. tab-item:: Executable: Input File + + .. literalinclude:: inputs_test_2d_ionization_lab + :language: ini + :caption: You can copy this file from ``Examples/Tests/field_ionization/inputs_test_2d_ionization_lab``. + + .. tab-item:: boosted frame + + This example can be run as: + + * WarpX **executable** using an input file: ``warpx.2d inputs_test_2d_ionization_boost max_step=420`` + + .. literalinclude:: inputs_test_2d_ionization_boost + :language: ini + :caption: You can copy this file from ``Examples/Tests/field_ionization/inputs_test_2d_ionization_boost``. + +Analyze +------- + +.. dropdown:: Script ``analysis.py`` + + .. literalinclude:: analysis.py + :language: python3 + :caption: You can copy this file from ``Examples/Tests/field_ionization/analysis.py``. + +Visualize +--------- + +.. figure:: https://gist.githubusercontent.com/johvandewetering/48d092c003915f1d1689b507caa2865b/raw/29f5d12ed77831047ca12f456a07dbf3b99770d5/image_ionization.png + :alt: Electric field of the laser pulse with (top) ions with ionization levels and (bottom) ionized electrons. + + Electric field of the laser pulse with (top) ions with ionization levels and (bottom) ionized electrons. diff --git a/Examples/Tests/ionization/analysis.py b/Examples/Tests/field_ionization/analysis.py similarity index 100% rename from Examples/Tests/ionization/analysis.py rename to Examples/Tests/field_ionization/analysis.py diff --git a/Examples/Tests/ionization/catalyst_pipeline.py b/Examples/Tests/field_ionization/catalyst_pipeline.py similarity index 100% rename from Examples/Tests/ionization/catalyst_pipeline.py rename to Examples/Tests/field_ionization/catalyst_pipeline.py diff --git a/Examples/Tests/ionization/inputs_test_2d_ionization_boost b/Examples/Tests/field_ionization/inputs_test_2d_ionization_boost similarity index 100% rename from Examples/Tests/ionization/inputs_test_2d_ionization_boost rename to Examples/Tests/field_ionization/inputs_test_2d_ionization_boost diff --git a/Examples/Tests/ionization/inputs_test_2d_ionization_lab b/Examples/Tests/field_ionization/inputs_test_2d_ionization_lab similarity index 100% rename from Examples/Tests/ionization/inputs_test_2d_ionization_lab rename to Examples/Tests/field_ionization/inputs_test_2d_ionization_lab diff --git a/Examples/Tests/ionization/inputs_test_2d_ionization_picmi.py b/Examples/Tests/field_ionization/inputs_test_2d_ionization_picmi.py similarity index 100% rename from Examples/Tests/ionization/inputs_test_2d_ionization_picmi.py rename to Examples/Tests/field_ionization/inputs_test_2d_ionization_picmi.py From 0ab57d3c183972a17e2f1dbd43b471bcbea38e4b Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 13 Sep 2024 13:58:51 -0700 Subject: [PATCH 45/91] Reduce time in beam-beam CI test - follow up (#5263) * fixed longitudinal resolution, updated image with new test * removed white line --- Examples/Physics_applications/beam_beam_collision/README.rst | 4 ++-- .../beam_beam_collision/inputs_test_3d_beam_beam_collision | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Examples/Physics_applications/beam_beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README.rst index 4f89365c8f0..a7a06521218 100644 --- a/Examples/Physics_applications/beam_beam_collision/README.rst +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -39,7 +39,7 @@ We compare different results: * (blue) large-scale WarpX simulation (high resolution and ad hoc generated tables ; * (black) literature results from :cite:t:`ex-Yakimenko2019`. -The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 128`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. For the large-scale simulation we have used the following options: +The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. For the large-scale simulation we have used the following options: .. code-block:: ini @@ -63,7 +63,7 @@ The small-scale simulation has been performed with a resolution of ``nx = 64, ny qed_bw.tab_pair_frac_how_many=512 qed_bw.save_table_in=my_bw_table.txt -.. figure:: https://user-images.githubusercontent.com/17280419/291749626-aa61fff2-e6d2-45a3-80ee-84b2851ea0bf.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTEiLCJleHAiOjE3MDMwMzQzNTEsIm5iZiI6MTcwMzAzNDA1MSwicGF0aCI6Ii8xNzI4MDQxOS8yOTE3NDk2MjYtYWE2MWZmZjItZTZkMi00NWEzLTgwZWUtODRiMjg1MWVhMGJmLnBuZz9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFJV05KWUFYNENTVkVINTNBJTJGMjAyMzEyMjAlMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjMxMjIwVDAxMDA1MVomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWFiYzY2MGQyYzIyZGIzYzUxOWI3MzNjZTk5ZDM1YzgyNmY4ZDYxOGRlZjAyZTIwNTAyMTc3NTgwN2Q0YjEwNGMmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.I96LQpjqmFXirPDVnBlFQIkCuenR6IuOSY0OIIQvtCo +.. figure:: https://gist.github.com/user-attachments/assets/2dd43782-d039-4faa-9d27-e3cf8fb17352 :alt: Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. :width: 100% diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index fcbd8a202e3..e856a078003 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -29,7 +29,6 @@ my_constants.nx = 64 my_constants.ny = 64 my_constants.nz = 64 - # TIME my_constants.T = 0.7*Lz/clight my_constants.dt = sigmaz/clight/10. From 3974fac65086ea7e4f62caa749efa21515a477dc Mon Sep 17 00:00:00 2001 From: Christos Tsolakis <6725596+ChristosT@users.noreply.github.com> Date: Fri, 13 Sep 2024 19:07:57 -0400 Subject: [PATCH 46/91] docs: improve build and usage instructions of catalyst (#5264) --- Docs/source/dataanalysis/catalyst.rst | 136 +++++++----------- .../catalyst/catalyst_simple_pipeline.py | 101 +++++++++++++ 2 files changed, 153 insertions(+), 84 deletions(-) create mode 100644 Docs/source/dataanalysis/catalyst/catalyst_simple_pipeline.py diff --git a/Docs/source/dataanalysis/catalyst.rst b/Docs/source/dataanalysis/catalyst.rst index 5a9f1432695..97e634c5c6a 100644 --- a/Docs/source/dataanalysis/catalyst.rst +++ b/Docs/source/dataanalysis/catalyst.rst @@ -9,28 +9,25 @@ visualization and analysis capabilities, which is what this document will focus Enabling Catalyst ----------------- -In order to use Catalyst with WarpX, you must `build Catalyst 2 `_ and `build `__ or `install `__ ParaView 5.9+. Afterward, AMReX must be built with ``AMReX_CONDUIT=TRUE``, -``AMReX_CATALYST=TRUE``, ``Conduit_DIR=/path/to/conduit``, and ``Catalyst_DIR=/path/to/catalyst`` (``/path/to/catalyst`` should be the directory containing ``catalyst-config.cmake``, not the path to the implementation). - -Once AMReX is appropriately built, WarpX can be built with the following options: - -.. code-block:: cmake - - WarpX_amrex_internal=FALSE - AMReX_DIR="/path/to/amrex/build" - -If they cannot be found, ``Conduit_DIR`` and ``Catalyst_DIR`` will have to be set again. Ensure that AMReX is built with all required options, some common ones being: - -.. code-block:: cmake - - AMReX_MPI=TRUE - AMReX_MPI_THREAD_MULTIPLE=TRUE - AMReX_LINEAR_SOLVERS=TRUE - AMReX_PARTICLES=TRUE - AMReX_PARTICLES_PRECISION=DOUBLE - AMReX_PIC=TRUE - AMReX_TINY_PROFILE=TRUE - +In order to use Catalyst with WarpX, we need to ensure that we will be using the same version of +conduit across all libraries i.e Catalyst, AMReX and ParaView. One way to achieve this is to +build conduit externally and use it for compiling all the above packages. +This ensures compatibility when passing conduit nodes between WarpX and ParaView. + +First, we build +`Conduit `_ and then +build `Catalyst 2 `_ +using the conduit library created in the previous step. +The latter can be achieved by adding the installation path of conduit to the environmental +variable `CMAKE_PREFIX_PATH` and setting `CATALYST_WITH_EXTERNAL_CONDUIT=ON` during the configuration step of Catalyst. + +Then we build ParaView master (on a commit after 2024.07.01, tested on ``4ef351a54ff747ef7169e2e52e77d9703a9dfa77``) following the developer instructions provided +`here `__ . +A representative set of options for a headless ParaView installation is provided +`here `__ +Afterward, WarpX must be built with ``WarpX_CATALYST=ON``. +Also, make sure to provide the installed paths of Conduit and Catalyst via +`CMAKE_PREFIX_PATH` before configuring WarpX. Inputs File Configuration ------------------------- @@ -43,6 +40,10 @@ In addition to configuring the diagnostics, the following parameters must be inc * ``catalyst.implementation`` (default ``paraview``): The name of the implementation being used (case sensitive). * ``catalyst.implementation_search_paths``: The locations to search for the given implementation. The specific file being searched for will be ``catalyst_{implementation}.so``. +The latter two can also be given via the environmental variables +`CATALYST_IMPLEMENTATION_NAME` and `CATALYST_IMPLEMENTATION_PATHS` +respectively. + Because the scripts and implementations are global, Catalyst does not benefit from nor differentiate between multiple diagnostics. @@ -53,66 +54,10 @@ Catalyst uses the files specified in ``catalyst.script_paths`` to run all analys The following script, :code:`simple_catalyst_pipeline.py`, automatically detects the type of data for both the mesh and particles, then creates an extractor for them. In most cases, these will be saved as ``.VTPC`` files which can be read with the ``XML Partitioned Dataset Collection Reader``. -.. code-block:: python - - from paraview.simple import * - from paraview import catalyst - - # Helper function - def create_extractor(data_node, filename="Dataset"): - VTK_TYPES = ["vtkImageData", "vtkRectilinearGrid", "vtkStructuredGrid", "vtkPolyData", "vtkUnstructuredGrid", "vtkUniformGridAMR", "vtkMultiBlockDataSet", "vtkPartitionedDataSet", "vtkPartitionedDataSetCollection", "vtkHyperTreeGrid"] - FILE_ASSOCIATIONS = ["VTI", "VTR", "VTS", "VTP", "VTU", "VTH", "VTM", "VTPD", "VTPC", "HTG"] - clientside_data = data_node.GetClientSideObject().GetOutputDataObject(0) # Gets the dataobject from the default output port - - # Loop is required because .IsA() detects valid classes that inherit from the VTK_TYPES - for i, vtk_type in enumerate(VTK_TYPES): - if (clientside_data.IsA(vtk_type)): - filetype = FILE_ASSOCIATIONS[i] - extractor = CreateExtractor(filetype, data_node, registrationName=f"_{filetype}") - extractor.Writer.FileName = filename + "_{timestep:}" + f".{filetype}" - return extractor - - raise RuntimeError(f"Unsupported data type: {clientside_data.GetClassName()}") +.. literalinclude:: catalyst/catalyst_simple_pipeline.py + :language: python + :caption: You can copy this file from ``Docs/source/dataanalysis/catalyst/catalyst_simple_pipeline.py``. - # Camera settings - paraview.simple._DisableFirstRenderCameraReset() # Prevents the camera from being shown - - # Options - options = catalyst.Options() - - options.CatalystLiveTrigger = "TimeStep" # "Python", "TimeStep", "TimeValue" - options.EnableCatalystLive = 0 # 0 (disabled), 1 (enabled) - if (options.EnableCatalystLive == 1): - options.CatalystLiveURL = "localhost:22222" # localhost:22222 is default - - options.ExtractsOutputDirectory = "datasets" # Base for where all files are saved - options.GenerateCinemaSpecification = 0 # 0 (disabled), 1 (enabled), generates additional descriptor files for cinema exports - options.GlobalTrigger = "TimeStep" # "Python", "TimeStep", "TimeValue" - - meshSource = PVTrivialProducer(registrationName="mesh") # "mesh" is the node where the mesh data is stored - create_extractor(meshSource, filename="meshdata") - particleSource = PVTrivialProducer(registrationName="particles") # "particles" is the node where particle data is stored - create_extractor(particleSource, filename="particledata") - - # Called on catalyst initialize (after Cxx side initialize) - def catalyst_initialize(): - return - - # Called on catalyst execute (after Cxx side update) - def catalyst_execute(info): - print(f"Time: {info.time}, Timestep: {info.timestep}, Cycle: {info.cycle}") - return - - # Callback if global trigger is set to "Python" - def is_activated(controller): - return True - - # Called on catalyst finalize (after Cxx side finalize) - def catalyst_finalize(): - return - - if __name__ == '__main__': - paraview.simple.SaveExtractsUsingCatalystOptions(options) For the case of ParaView Catalyst, pipelines are run with ParaView's included ``pvbatch`` executable and use the ``paraview`` library to modify the data. While pipeline scripts @@ -159,9 +104,32 @@ Steps one is advised so that proper scaling and framing can be done, however in Replay ------ +Catalyst 2.0 supports generating binary data dumps for the conduit nodes passed to each ``catalyst_`` call at each iteration. This allows to debug/adapt catalyst scripts without having to rerun the simulation each time. + +To generate the data dumps one must first set the environmental variable ``CATALYST_DATA_DUMP_DIRECTORY`` to the path where the dumps should be saved. Then, run the simulation as normal but replace ``catalyst.implementation=stub`` either in the calling script of WarpX or as an additional argument. + +This will run the simulation and write the conduit nodes under ``CATALYST_DATA_DUMP_DIRECTORY``. + +Afterward, one can replay the generated nodes by setting up the `CATALYST_IMPLEMENTATION_*` variables for the `catalyst_replay` executable (which can be found in the catalyst build directory) appropriately. For example: + +.. code-block:: bash -Catalyst 2 supports replay capabilities, which can be read about `here `_. + # dump conduit nodes + export CATALYST_DATA_DUMP_DIRECTORY=./raw_data + mpiexec -n N /bin/warpx.2d ./inputs_2d catalyst.script_paths=catalyst_pipeline.py catalyst.implementation="stub" + # validate that files have been written + ls ./raw_data/ + ... many files of the format XXXX.conduit_bin.Y.Z -.. note:: + # replay them + export CATALYST_IMPLEMENTATION_NAME=paraview + export CATALYST_IMPLEMENTATION_PATHS=/lib/catalyst + export CATALYST_IMPLEMENTATION_PREFER_ENV=YES + export CATALYST_DEBUG=1 # optional but helps to make sure the right paths are used + export PYTHONPATH=${PYTHONPATH}/$(pwd) # or the path containing catalyst_pipeline.py in general + # N needs to be the same as when we generated the dump + mpiexec -n N /bin/catalyst_replay ./raw_data + # check extractor output e.g + ls ./datasets/ - * TODO: Add more extensive documentation on replay +For more information see the documentation for catalyst replay `here `__ . diff --git a/Docs/source/dataanalysis/catalyst/catalyst_simple_pipeline.py b/Docs/source/dataanalysis/catalyst/catalyst_simple_pipeline.py new file mode 100644 index 00000000000..c74b5d3206d --- /dev/null +++ b/Docs/source/dataanalysis/catalyst/catalyst_simple_pipeline.py @@ -0,0 +1,101 @@ +from paraview import catalyst +from paraview.simple import * # noqa: F403 + + +# Helper function +def create_data_extractor(data_node, filename="Dataset"): + """Creates a data extractor that saves `data_node` to a datafile named `filename`. + The filetype is chosen based on the type of `data_note`. + + Note: no rendering is performed by such an extractor. The data are + written directly to a file via VTK. + """ + VTK_TYPES = [ + "vtkImageData", + "vtkRectilinearGrid", + "vtkStructuredGrid", + "vtkPolyData", + "vtkUnstructuredGrid", + "vtkUniformGridAMR", + "vtkMultiBlockDataSet", + "vtkPartitionedDataSet", + "vtkPartitionedDataSetCollection", + "vtkHyperTreeGrid", + ] + FILE_ASSOCIATIONS = [ + "VTI", + "VTR", + "VTS", + "VTP", + "VTU", + "VTH", + "VTM", + "VTPD", + "VTPC", + "HTG", + ] + clientside_data = data_node.GetClientSideObject().GetOutputDataObject( + 0 + ) # Gets the dataobject from the default output port + + # Loop is required because .IsA() detects valid classes that inherit from the VTK_TYPES + for i, vtk_type in enumerate(VTK_TYPES): + if clientside_data.IsA(vtk_type): + filetype = FILE_ASSOCIATIONS[i] + extractor = CreateExtractor( + filetype, data_node, registrationName=f"_{filetype}" + ) + extractor.Writer.FileName = filename + "_{timestep:}" + f".{filetype}" + return extractor + + raise RuntimeError(f"Unsupported data type: {clientside_data.GetClassName()}") + + +# Camera settings +paraview.simple._DisableFirstRenderCameraReset() # Prevents the camera from being shown + +# Options +options = catalyst.Options() + +options.CatalystLiveTrigger = "TimeStep" # "Python", "TimeStep", "TimeValue" +options.EnableCatalystLive = 0 # 0 (disabled), 1 (enabled) +if options.EnableCatalystLive == 1: + options.CatalystLiveURL = "localhost:22222" # localhost:22222 is default + +options.ExtractsOutputDirectory = "datasets" # Base for where all files are saved +options.GenerateCinemaSpecification = 0 # 0 (disabled), 1 (enabled), generates additional descriptor files for cinema exports +options.GlobalTrigger = "TimeStep" # "Python", "TimeStep", "TimeValue" + +meshSource = PVTrivialProducer( + registrationName="mesh" +) # "mesh" is the node where the mesh data is stored +create_extractor(meshSource, filename="meshdata") +particleSource = PVTrivialProducer( + registrationName="particles" +) # "particles" is the node where particle data is stored +create_extractor(particleSource, filename="particledata") + + +# Called on catalyst initialize (after Cxx side initialize) +def catalyst_initialize(): + return + + +# Called on catalyst execute (after Cxx side update) +def catalyst_execute(info): + print(f"Time: {info.time}, Timestep: {info.timestep}, Cycle: {info.cycle}") + return + + +# Callback if global trigger is set to "Python" +def is_activated(controller): + return True + + +# Called on catalyst finalize (after Cxx side finalize) +def catalyst_finalize(): + return + + +if __name__ == "__main__": + paraview.simple.SaveExtractsUsingCatalystOptions(options) From 5c1603ea2fe87b8fd2ea61ca0bafa220a3efccab Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Sat, 14 Sep 2024 01:13:44 +0200 Subject: [PATCH 47/91] Replace "std::endl" with "\n" (except for error messages) (#5183) * replace std::endl with \n * revert to std::endl when flushing the buffer seems intentional * Update Source/Diagnostics/BTDiagnostics.cpp Co-authored-by: Axel Huebl * Update Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp Co-authored-by: Axel Huebl * Update Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp Co-authored-by: Axel Huebl * Update Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp Co-authored-by: Axel Huebl * Update Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp Co-authored-by: Axel Huebl * Update Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp Co-authored-by: Axel Huebl * Update Source/Particles/PhysicalParticleContainer.cpp Co-authored-by: Axel Huebl * Update Tools/QedTablesUtils/Source/ArgParser/QedTablesArgParser.cpp Co-authored-by: Axel Huebl * Update Tools/QedTablesUtils/Source/QedTableGenerator.cpp Co-authored-by: Axel Huebl * Update Tools/QedTablesUtils/Source/QedTableReader.cpp Co-authored-by: Axel Huebl --------- Co-authored-by: Axel Huebl --- Source/Diagnostics/BTDiagnostics.cpp | 6 ++-- .../FlushFormats/FlushFormatCatalyst.cpp | 2 +- .../Diagnostics/ReducedDiags/BeamRelevant.cpp | 6 ++-- .../Diagnostics/ReducedDiags/ChargeOnEB.cpp | 3 +- .../ReducedDiags/ColliderRelevant.cpp | 2 +- .../ReducedDiags/DifferentialLuminosity.cpp | 2 +- .../Diagnostics/ReducedDiags/FieldEnergy.cpp | 2 +- .../Diagnostics/ReducedDiags/FieldMaximum.cpp | 2 +- .../ReducedDiags/FieldMomentum.cpp | 2 +- .../Diagnostics/ReducedDiags/FieldProbe.cpp | 4 +-- .../ReducedDiags/FieldReduction.cpp | 2 +- .../ReducedDiags/LoadBalanceCosts.cpp | 6 ++-- .../ReducedDiags/LoadBalanceEfficiency.cpp | 2 +- .../ReducedDiags/ParticleEnergy.cpp | 2 +- .../ReducedDiags/ParticleExtrema.cpp | 2 +- .../ReducedDiags/ParticleHistogram.cpp | 2 +- .../ReducedDiags/ParticleMomentum.cpp | 2 +- .../ReducedDiags/ParticleNumber.cpp | 2 +- .../Diagnostics/ReducedDiags/ReducedDiags.cpp | 2 +- .../Diagnostics/ReducedDiags/RhoMaximum.cpp | 2 +- Source/Diagnostics/WarpXOpenPMD.cpp | 2 +- .../ImplicitSolvers/SemiImplicitEM.cpp | 19 ++++++------- .../ImplicitSolvers/ThetaImplicitEM.cpp | 21 +++++++------- Source/Initialization/WarpXInitData.cpp | 2 +- Source/NonlinearSolvers/NewtonSolver.H | 28 +++++++++---------- Source/NonlinearSolvers/PicardSolver.H | 14 +++++----- .../Particles/PhysicalParticleContainer.cpp | 6 ++-- .../Source/ArgParser/QedTablesArgParser.cpp | 4 +-- Tools/QedTablesUtils/Source/QedTableCommons.H | 2 +- .../Source/QedTableGenerator.cpp | 2 +- .../QedTablesUtils/Source/QedTableReader.cpp | 2 +- 31 files changed, 77 insertions(+), 80 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 1cee9909226..6fdb605f8dc 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -170,18 +170,18 @@ void BTDiagnostics::DerivedInitData () if (final_snapshot_fill_iteration > warpx.maxStep()) { warpx.updateMaxStep(final_snapshot_fill_iteration); amrex::Print()<<"max_step insufficient to fill all BTD snapshots. Automatically increased to: " - << final_snapshot_fill_iteration << std::endl; + << final_snapshot_fill_iteration << "\n"; } if (final_snapshot_fill_time > warpx.stopTime()) { warpx.updateStopTime(final_snapshot_fill_time); amrex::Print()<<"stop_time insufficient to fill all BTD snapshots. Automatically increased to: " - << final_snapshot_fill_time << std::endl; + << final_snapshot_fill_time << "\n"; } if (warpx.maxStep() == std::numeric_limits::max() && warpx.stopTime() == std::numeric_limits::max()) { amrex::Print()<<"max_step unspecified and stop time unspecified. Setting max step to " - <Verbose()) { return; } - amrex::Print() << std::endl; - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << "----------- SEMI IMPLICIT EM SOLVER PARAMETERS ------------" << std::endl; - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << "max particle iterations: " << m_max_particle_iterations << std::endl; - amrex::Print() << "particle tolerance: " << m_particle_tolerance << std::endl; + amrex::Print() << "\n"; + amrex::Print() << "-----------------------------------------------------------\n"; + amrex::Print() << "----------- SEMI IMPLICIT EM SOLVER PARAMETERS ------------\n"; + amrex::Print() << "-----------------------------------------------------------\n"; + amrex::Print() << "max particle iterations: " << m_max_particle_iterations << "\n"; + amrex::Print() << "particle tolerance: " << m_particle_tolerance << "\n"; if (m_nlsolver_type==NonlinearSolverType::Picard) { - amrex::Print() << "Nonlinear solver type: Picard" << std::endl; + amrex::Print() << "Nonlinear solver type: Picard\n"; } else if (m_nlsolver_type==NonlinearSolverType::Newton) { - amrex::Print() << "Nonlinear solver type: Newton" << std::endl; + amrex::Print() << "Nonlinear solver type: Newton\n"; } m_nlsolver->PrintParams(); - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << std::endl; + amrex::Print() << "-----------------------------------------------------------\n\n"; } void SemiImplicitEM::OneStep ( amrex::Real a_time, diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 4c86389797f..3d74ddfde69 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -56,22 +56,21 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) void ThetaImplicitEM::PrintParameters () const { if (!m_WarpX->Verbose()) { return; } - amrex::Print() << std::endl; - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << "----------- THETA IMPLICIT EM SOLVER PARAMETERS -----------" << std::endl; - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << "Time-bias parameter theta: " << m_theta << std::endl; - amrex::Print() << "max particle iterations: " << m_max_particle_iterations << std::endl; - amrex::Print() << "particle tolerance: " << m_particle_tolerance << std::endl; + amrex::Print() << "\n"; + amrex::Print() << "-----------------------------------------------------------\n"; + amrex::Print() << "----------- THETA IMPLICIT EM SOLVER PARAMETERS -----------\n"; + amrex::Print() << "-----------------------------------------------------------\n"; + amrex::Print() << "Time-bias parameter theta: " << m_theta << "\n"; + amrex::Print() << "max particle iterations: " << m_max_particle_iterations << "\n"; + amrex::Print() << "particle tolerance: " << m_particle_tolerance << "\n"; if (m_nlsolver_type==NonlinearSolverType::Picard) { - amrex::Print() << "Nonlinear solver type: Picard" << std::endl; + amrex::Print() << "Nonlinear solver type: Picard\n"; } else if (m_nlsolver_type==NonlinearSolverType::Newton) { - amrex::Print() << "Nonlinear solver type: Newton" << std::endl; + amrex::Print() << "Nonlinear solver type: Newton\n"; } m_nlsolver->PrintParams(); - amrex::Print() << "-----------------------------------------------------------" << std::endl; - amrex::Print() << std::endl; + amrex::Print() << "-----------------------------------------------------------\n\n"; } void ThetaImplicitEM::OneStep ( const amrex::Real a_time, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index de763831d98..49b0d439c50 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -830,7 +830,7 @@ WarpX::computeMaxStepBoostAccelerator() { static_cast(interaction_time_boost/dt[maxLevel()]); max_step = computed_max_step; Print()<<"max_step computed in computeMaxStepBoostAccelerator: " - <m_verbose?"true":"false") << std::endl; - amrex::Print() << "Newton max iterations: " << m_maxits << std::endl; - amrex::Print() << "Newton relative tolerance: " << m_rtol << std::endl; - amrex::Print() << "Newton absolute tolerance: " << m_atol << std::endl; - amrex::Print() << "Newton require convergence: " << (m_require_convergence?"true":"false") << std::endl; - amrex::Print() << "GMRES verbose: " << m_gmres_verbose_int << std::endl; - amrex::Print() << "GMRES restart length: " << m_gmres_restart_length << std::endl; - amrex::Print() << "GMRES max iterations: " << m_gmres_maxits << std::endl; - amrex::Print() << "GMRES relative tolerance: " << m_gmres_rtol << std::endl; - amrex::Print() << "GMRES absolute tolerance: " << m_gmres_atol << std::endl; + amrex::Print() << "Newton verbose: " << (this->m_verbose?"true":"false") << "\n"; + amrex::Print() << "Newton max iterations: " << m_maxits << "\n"; + amrex::Print() << "Newton relative tolerance: " << m_rtol << "\n"; + amrex::Print() << "Newton absolute tolerance: " << m_atol << "\n"; + amrex::Print() << "Newton require convergence: " << (m_require_convergence?"true":"false") << "\n"; + amrex::Print() << "GMRES verbose: " << m_gmres_verbose_int << "\n"; + amrex::Print() << "GMRES restart length: " << m_gmres_restart_length << "\n"; + amrex::Print() << "GMRES max iterations: " << m_gmres_maxits << "\n"; + amrex::Print() << "GMRES relative tolerance: " << m_gmres_rtol << "\n"; + amrex::Print() << "GMRES absolute tolerance: " << m_gmres_atol << "\n"; } private: @@ -261,19 +261,19 @@ void NewtonSolver::Solve ( Vec& a_U, if (norm_abs < m_rtol) { amrex::Print() << "Newton: exiting at iteration = " << std::setw(3) << iter - << ". Satisfied absolute tolerance " << m_atol << std::endl; + << ". Satisfied absolute tolerance " << m_atol << "\n"; break; } if (norm_rel < m_rtol) { amrex::Print() << "Newton: exiting at iteration = " << std::setw(3) << iter - << ". Satisfied relative tolerance " << m_rtol << std::endl; + << ". Satisfied relative tolerance " << m_rtol << "\n"; break; } if (norm_abs > 100._rt*norm0) { amrex::Print() << "Newton: exiting at iteration = " << std::setw(3) << iter - << ". SOLVER DIVERGED! relative tolerance = " << m_rtol << std::endl; + << ". SOLVER DIVERGED! relative tolerance = " << m_rtol << "\n"; std::stringstream convergenceMsg; convergenceMsg << "Newton: exiting at iteration " << std::setw(3) << iter << ". SOLVER DIVERGED! absolute norm = " << norm_abs << @@ -291,7 +291,7 @@ void NewtonSolver::Solve ( Vec& a_U, iter++; if (iter >= m_maxits) { amrex::Print() << "Newton: exiting at iter = " << std::setw(3) << iter - << ". Maximum iteration reached: iter = " << m_maxits << std::endl; + << ". Maximum iteration reached: iter = " << m_maxits << "\n"; break; } diff --git a/Source/NonlinearSolvers/PicardSolver.H b/Source/NonlinearSolvers/PicardSolver.H index f05b9a106e6..4eed4d6c2e0 100644 --- a/Source/NonlinearSolvers/PicardSolver.H +++ b/Source/NonlinearSolvers/PicardSolver.H @@ -55,10 +55,10 @@ public: void PrintParams () const override { - amrex::Print() << "Picard max iterations: " << m_maxits << std::endl; - amrex::Print() << "Picard relative tolerance: " << m_rtol << std::endl; - amrex::Print() << "Picard absolute tolerance: " << m_atol << std::endl; - amrex::Print() << "Picard require convergence: " << (m_require_convergence?"true":"false") << std::endl; + amrex::Print() << "Picard max iterations: " << m_maxits << "\n"; + amrex::Print() << "Picard relative tolerance: " << m_rtol << "\n"; + amrex::Print() << "Picard absolute tolerance: " << m_atol << "\n"; + amrex::Print() << "Picard require convergence: " << (m_require_convergence?"true":"false") << "\n"; } private: @@ -179,19 +179,19 @@ void PicardSolver::Solve ( Vec& a_U, if (norm_abs < m_atol) { amrex::Print() << "Picard: exiting at iter = " << std::setw(3) << iter - << ". Satisfied absolute tolerance " << m_atol << std::endl; + << ". Satisfied absolute tolerance " << m_atol << "\n"; break; } if (norm_rel < m_rtol) { amrex::Print() << "Picard: exiting at iter = " << std::setw(3) << iter - << ". Satisfied relative tolerance " << m_rtol << std::endl; + << ". Satisfied relative tolerance " << m_rtol << "\n"; break; } if (iter >= m_maxits) { amrex::Print() << "Picard: exiting at iter = " << std::setw(3) << iter - << ". Maximum iteration reached: iter = " << m_maxits << std::endl; + << ". Maximum iteration reached: iter = " << m_maxits << "\n"; break; } diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index d1a19f06993..0617b36a273 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -2976,10 +2976,10 @@ PhysicalParticleContainer::ImplicitPushXP (WarpXParIter& pti, #if !defined(AMREX_USE_GPU) std::stringstream convergenceMsg; convergenceMsg << "Picard solver for particle failed to converge after " << - iter << " iterations. " << std::endl; + iter << " iterations.\n"; convergenceMsg << "Position step norm is " << step_norm << - " and the tolerance is " << particle_tolerance << std::endl; - convergenceMsg << " ux = " << ux[ip] << ", uy = " << uy[ip] << ", uz = " << uz[ip] << std::endl; + " and the tolerance is " << particle_tolerance << "\n"; + convergenceMsg << " ux = " << ux[ip] << ", uy = " << uy[ip] << ", uz = " << uz[ip] << "\n"; convergenceMsg << " xp = " << xp << ", yp = " << yp << ", zp = " << zp; ablastr::warn_manager::WMRecordWarning("ImplicitPushXP", convergenceMsg.str()); #endif diff --git a/Tools/QedTablesUtils/Source/ArgParser/QedTablesArgParser.cpp b/Tools/QedTablesUtils/Source/ArgParser/QedTablesArgParser.cpp index 41d27477dcf..a92f191037d 100644 --- a/Tools/QedTablesUtils/Source/ArgParser/QedTablesArgParser.cpp +++ b/Tools/QedTablesUtils/Source/ArgParser/QedTablesArgParser.cpp @@ -85,7 +85,7 @@ ArgParser::ParseArgs (const std::vector& keys, const int argc, char const* void ArgParser::PrintHelp (const vector& cmd_list) { - cout << "Command line options: " << endl; + cout << "Command line options:\n"; for (const auto& el : cmd_list){ const auto type = get<1>(el); @@ -102,7 +102,7 @@ ArgParser::PrintHelp (const vector& cmd_list) cout << get<0>(el) << " " << stype << " " << get<2>(el) - << endl; + << "\n"; } } diff --git a/Tools/QedTablesUtils/Source/QedTableCommons.H b/Tools/QedTablesUtils/Source/QedTableCommons.H index 40551b9e13c..2233513bc97 100644 --- a/Tools/QedTablesUtils/Source/QedTableCommons.H +++ b/Tools/QedTablesUtils/Source/QedTableCommons.H @@ -19,7 +19,7 @@ void AbortWithMessage(const std::string& msg) void SuccessExit() { - std::cout << "___________________________" << std::endl; + std::cout << "___________________________\n"; exit(0); } diff --git a/Tools/QedTablesUtils/Source/QedTableGenerator.cpp b/Tools/QedTablesUtils/Source/QedTableGenerator.cpp index 1ea62b5c6ed..7bfa5787ec8 100644 --- a/Tools/QedTablesUtils/Source/QedTableGenerator.cpp +++ b/Tools/QedTablesUtils/Source/QedTableGenerator.cpp @@ -49,7 +49,7 @@ void GenerateTableQS (const ParsedArgs& args, const string& outfile_name); int main (int argc, char** argv) { - cout << "### QED Table Generator ###" << endl; + cout << "### QED Table Generator ###\n"; const auto args_map = ParseArgs(line_commands, argc, argv); if (args_map.empty() || Contains(args_map, "-h")){ diff --git a/Tools/QedTablesUtils/Source/QedTableReader.cpp b/Tools/QedTablesUtils/Source/QedTableReader.cpp index ba9d58775f2..27b284f34c2 100644 --- a/Tools/QedTablesUtils/Source/QedTableReader.cpp +++ b/Tools/QedTablesUtils/Source/QedTableReader.cpp @@ -57,7 +57,7 @@ class qs_photon_emission_table_wrapper : int main (int argc, char** argv) { - cout << "### QED Table Reader ###" << endl; + cout << "### QED Table Reader ###\n"; const auto args_map = ParseArgs(line_commands, argc, argv); if (args_map.empty() || Contains(args_map, "-h")){ From 55dd436f8e198193f499c3ef1840938bb8a8249c Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Sat, 14 Sep 2024 01:36:13 +0200 Subject: [PATCH 48/91] fix C++20 issue: template-id not allowed for constructor/destructor in C++20 (#5268) --- Source/NonlinearSolvers/JacobianFunctionMF.H | 4 ++-- Source/NonlinearSolvers/NewtonSolver.H | 4 ++-- Source/NonlinearSolvers/NonlinearSolver.H | 4 ++-- Source/NonlinearSolvers/PicardSolver.H | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Source/NonlinearSolvers/JacobianFunctionMF.H b/Source/NonlinearSolvers/JacobianFunctionMF.H index 823523df23c..d5c2b6cbac9 100644 --- a/Source/NonlinearSolvers/JacobianFunctionMF.H +++ b/Source/NonlinearSolvers/JacobianFunctionMF.H @@ -21,8 +21,8 @@ class JacobianFunctionMF using RT = typename T::value_type; - JacobianFunctionMF() = default; - ~JacobianFunctionMF() = default; + JacobianFunctionMF() = default; + ~JacobianFunctionMF() = default; // Default move and copy operations JacobianFunctionMF(const JacobianFunctionMF&) = default; diff --git a/Source/NonlinearSolvers/NewtonSolver.H b/Source/NonlinearSolvers/NewtonSolver.H index 814ef2b8dc3..742e139a5f5 100644 --- a/Source/NonlinearSolvers/NewtonSolver.H +++ b/Source/NonlinearSolvers/NewtonSolver.H @@ -28,9 +28,9 @@ class NewtonSolver : public NonlinearSolver { public: - NewtonSolver() = default; + NewtonSolver() = default; - ~NewtonSolver() override = default; + ~NewtonSolver() override = default; // Prohibit Move and Copy operations NewtonSolver(const NewtonSolver&) = delete; diff --git a/Source/NonlinearSolvers/NonlinearSolver.H b/Source/NonlinearSolvers/NonlinearSolver.H index 5587826474c..6e64f1eb113 100644 --- a/Source/NonlinearSolvers/NonlinearSolver.H +++ b/Source/NonlinearSolvers/NonlinearSolver.H @@ -28,9 +28,9 @@ class NonlinearSolver { public: - NonlinearSolver() = default; + NonlinearSolver() = default; - virtual ~NonlinearSolver() = default; + virtual ~NonlinearSolver() = default; // Prohibit Move and Copy operations NonlinearSolver(const NonlinearSolver&) = delete; diff --git a/Source/NonlinearSolvers/PicardSolver.H b/Source/NonlinearSolvers/PicardSolver.H index 4eed4d6c2e0..f6c47c4f4bc 100644 --- a/Source/NonlinearSolvers/PicardSolver.H +++ b/Source/NonlinearSolvers/PicardSolver.H @@ -26,9 +26,9 @@ class PicardSolver : public NonlinearSolver { public: - PicardSolver() = default; + PicardSolver() = default; - ~PicardSolver() override = default; + ~PicardSolver() override = default; // Prohibit Move and Copy operations PicardSolver(const PicardSolver&) = delete; From 0fc5fc1a4832cc8daabf78b4908de28732ce6ed3 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:02:45 -0700 Subject: [PATCH 49/91] Docs: add documentation on PSATD-JRhom (#5247) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni --- Docs/source/latex_theory/allbibs.bib | 15 ++++++ Docs/source/theory/pic.rst | 77 ++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/Docs/source/latex_theory/allbibs.bib b/Docs/source/latex_theory/allbibs.bib index 62810ca5d6a..e44ab5cf112 100644 --- a/Docs/source/latex_theory/allbibs.bib +++ b/Docs/source/latex_theory/allbibs.bib @@ -2188,6 +2188,21 @@ @book{godfrey1985iprop year = {1985} } +@article{shapovalPRE2024, +author = {Shapoval, Olga and Zoni, Edoardo and Lehe, Remi and Thevenet, Maxence and Vay, Jean-Luc}, +doi = {10.1103/PhysRevE.110.025206}, +issue = {2}, +journal = {Phys. Rev. E}, +month = {Aug}, +numpages = {19}, +pages = {025206}, +publisher = {American Physical Society}, +title = {{Pseudospectral particle-in-cell formulation with arbitrary charge and current-density time dependencies for the modeling of relativistic plasmas}}, +url = {https://link.aps.org/doi/10.1103/PhysRevE.110.025206}, +volume = {110}, +year = {2024} +} + @article{Ammosov1986, title = {Tunnel ionization of complex atoms and of atomic ions in an alternating electromagnetic field}, volume = {64}, diff --git a/Docs/source/theory/pic.rst b/Docs/source/theory/pic.rst index 820cdba50e6..8356ba9f0f8 100644 --- a/Docs/source/theory/pic.rst +++ b/Docs/source/theory/pic.rst @@ -434,6 +434,83 @@ of this model can be found in the section .. _current_deposition: +Pseudo Spectral Analytical Time Domain with arbitrary charge and current-density time dependencies (PSATD-JRhom) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In :cite:`pt-shapovalPRE2024` we introduce a formulation of the particle-in-cell (PIC) method for the modeling of relativistic plasmas, which leverages the ability of the pseudo-spectral analytical time-domain solver (PSATD) to handle arbitrary time dependencies of the charge and current densities during one PIC cycle (up to second-order polynomial dependencies here). +The formulation is applied to a modified set of Maxwell's equations, which in Fourier space reads + +.. math:: + + \begin{align} + \frac{\partial\boldsymbol{\widetilde{E}}}{\partial t} & = i\boldsymbol{k}\times\boldsymbol{\widetilde{B}}-\boldsymbol{\widetilde{J}} + i\boldsymbol{k}{\widetilde{F}} \,, \\ + \frac{\partial\boldsymbol{\widetilde{B}}}{\partial t} & = -i\boldsymbol{k}\times\boldsymbol{\widetilde{E}} \,, \\ + \frac{\partial{\widetilde{F}}}{\partial t} & = i\boldsymbol{k}\cdot\boldsymbol{\widetilde{E}} - \widetilde{\rho} \,. + \end{align} + +Here, in addition to the usual Maxwell-Faraday and Ampere-Maxwell equations, the system contains an extra equation for the scalar field :math:`\widetilde{F}`, which propagates deviations to Gauss' law (if Gauss' law is verified in the PIC simulation, :math:`\widetilde{F}=0` and the modified Maxwell’s equations reduce to the standard Maxwell's equations). +These additional terms were introduced in :cite:p:`pt-Vayfed1996,pt-Munzjcp2000` from the potential formulation in the Lorenz gauge and used as a propagative divergence cleaning procedure, as an alternative to the Langdon-Marder :cite:p:`pt-Langdoncpc92,pt-Marderjcp87` diffusive procedures. +The above-mentioned earlier works :cite:p:`pt-Vayfed1996,pt-Munzjcp2000` considered this formulation in the context of the standard PIC method using FDTD discretization, while the PSATD-JRhom method introduced in :cite:`pt-shapovalPRE2024` exploits the PSATD discretization of the modified Maxwell's equations. +In contrast to the standard PSATD algorithm :cite:p:`pt-VayJCP2013`, where :math:`\boldsymbol{\widetilde{J}}` is assumed to be constant in time and :math:`\widetilde{\rho}` is assumed to be linear in time, within a given time step :math:`\Delta t`, the PSATD-JRhom provides more general time dependencies for :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` within one timestep, which can be divided into :math:`m` subintervals of equal size :math:`\delta t = \Delta t/m`. +During these subintervals, :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` are considered to be either **piecewise constant** (macro-particles deposit their density in the middle of each time subinterval), **piecewise linear** (macro-particles deposit their density at the edge of each time subinterval), or **piecewise quadratic** (macro-particles deposit their density at the edge of each time subinterval) in time. + +.. _fig-psatd_jrhom: + +.. figure:: https://gist.githubusercontent.com/oshapoval/88a73cada764364ad4ffce13563cedf1/raw/697ce1897cde0416bebdde8f1c1e8fcf859cb419/psatd_jrhom.png + :alt: figure not found, caption only + + Diagrams illustrating various time dependencies of the current density :math:`\boldsymbol{\widetilde{J}}` and charge density :math:`\widetilde{\rho}` for constant/linear (CL), both constant (CC), linear (LL) and quadratic (QQ) dependencies with :math:`m` subintervals: (first column) :math:`m=1`, (second) :math:`m=2` and (third) :math:`m=4`. CL1 corresponds to the standard PSATD PIC method. The triangle and circle glyphs represent the times at which the macroparticles deposit :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` on the grid, respectively. The dashed and solid lines represent the assumed time dependency of :math:`\boldsymbol{\widetilde{J}}` and :math:`\widetilde{\rho}` within one time step, when integrating the Maxwell equations analytically. + +Using the piecewise definition of :math:`\widetilde{\rho}` and :math:`\boldsymbol{\widetilde{J}}`, the modified Maxwell's equations can be integrated analytically over one time step :math:`\Delta t`, i.e., from :math:`t=n\Delta t` to :math:`t=(n+1)\Delta t`. +In practice, this is done by sequentially integrating these equations over each subinterval :math:`\ell \in [0,m-1]`. +The final discretized equations write as: + +.. math:: + + \begin{align} + \begin{split} + \boldsymbol{\widetilde{E}}^{n+(\ell+1)/m} & = C{\boldsymbol{\widetilde{J}}}^{n+\ell/m}+ic^2\frac{S}{ck}\boldsymbol{k}\times{\boldsymbol{\widetilde{J}}}^{n+\ell/m}+ic^2\frac{S}{ck}\widetilde{F}^{n+\ell/m}\boldsymbol{k} \\ + &\quad + \frac{1}{\varepsilon_0 ck}\left(Y_3\boldsymbol{a_J} + Y_2\boldsymbol{b_J} - S\boldsymbol{c_J}\right) + + \frac{ic^2}{\varepsilon_0 c^2k^2}\left({Y_1}a_{\rho}-Y_{5}b_{\rho}-Y_{4}c_{\rho}\right)\boldsymbol{k}, + \end{split} + \\[4pt] + \begin{split} + \boldsymbol{\widetilde{B}}^{n+(\ell+1)/m} & = C {\boldsymbol{\widetilde{B}}}^{n+\ell/m}-i\frac{S}{ck}\boldsymbol{k}\times{\boldsymbol{\widetilde{E}}}^{n+\ell/m} - \frac{i}{\varepsilon_0 c^2k^2}\boldsymbol{k}\times\left(Y_1\boldsymbol{a_J} -Y_5\boldsymbol{b_J} -Y_4\boldsymbol{c_J} \right), + \end{split} + \\[4pt] + \begin{split} + \widetilde{F}^{n+(\ell+1)/m} & = C \widetilde{F}^{n+\ell/m}+i\frac{S}{ck}\boldsymbol{k} \cdot {\boldsymbol{\widetilde{E}}}^{n+\ell/m}+\frac{i}{\varepsilon_0 c^2k^2}\boldsymbol{k}\cdot\left(Y_1\boldsymbol{a_J}-Y_5\boldsymbol{b_J}-Y_4\boldsymbol{c_J}\right) \\ + &\quad + \frac{1}{\varepsilon_0 ck}\left({Y_3}a_{\rho}+{Y_2}b_{\rho}-Sc_{\rho}\right), + \end{split} + \end{align} + +where + +.. math:: + + \begin{aligned} + C &= \cos(ck\delta t), \ S = \sin(ck\delta t), + \\ + Y_1 & = \frac{(1-C)(8-c^2k^2\delta t^2)-4Sck\delta t}{2 c^2 k^2 \delta t^2}, + \\ + Y_2 & = \frac{2(C-1)+ S ck\delta t }{2 ck\delta t}, + \\ + Y_3 & = \frac{S(8- c^2k^2\delta t^2 ) - 4ck\delta t(1+C)}{2c^2 k^2 \delta t^2}, + \\ + Y_4 &= (1-C), \ Y_5 = \frac{(1+C) ck\delta t - 2S}{2ck \delta t}. + \end{aligned} + +Here, :math:`\boldsymbol{a_J}, \boldsymbol{b_J}, \boldsymbol{c_J}, a_{\rho}, b_{\rho}, c_{\rho}` are polynomial coefficients based on the time dependencies of the current and charge densities, as shown in the following table: + +.. _fig-j_rho_table: + +.. figure:: + https://gist.githubusercontent.com/oshapoval/88a73cada764364ad4ffce13563cedf1/raw/ebc249f8e875a952c65a5319fd523821baccfd5a/j_rho_table.png + :alt: figure not found, caption only + + Polynomial coefficients based on the time dependency of the current and charge densities :math:`{\boldsymbol{\widetilde{J}}}(t)` and :math:`\widetilde{\rho}(t)` over one time subinterval, :math:`\delta t = \Delta t/m`. + +Detailed analysis and tests revealed that, under certain conditions, the formulation can expand the range of numerical parameters under which PIC simulations are stable and accurate when modeling relativistic plasmas, such as, e.g., plasma-based particle accelerators. + Current deposition ------------------ From 01a6d1891f1c94e935e2c8ff4481abd6edc78f96 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:43:35 -0700 Subject: [PATCH 50/91] ES solver cleanup - 2024 hackathon (#5241) * initial commit with start of new BoundaryHandler class * renaming BoundaryHandler to PoissonBoundaryHandler, defining new base class ES and derived ExplicitES * snake case for poissonboundaryhandler * adding files for existing ES solvers * add `m_electrostatic_solver` to WarpX.cpp * create base class for ElectrostaticSolvers * continue refining electrostatic base class * more refinements on the base electrostatic solver * add relativistic ES solver * remove constr and readparam * moving functions to lab frame ES * cmakelist and make package * various bug fixes * more bug fixes * fixing compilation errors * warpx object for dmap * add getMaxNormRho function * more bug fixes * moved calculation of beta, check if fft and check if 3d out of loop over levels * swap calculation of beta, and check if fft and check if 3d * dx_igf becomes dx_scaled, remove duplicate code to calculate dx_scaled * more progress on bug fixing * more bug fixes * getting close... * adds interpolatePhiBetweenLevels * yeeaaassss * a few warnings * fix various warnings * fix max_norm_b * more warnings * remove comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more fixes * move `ReadParameters` to base class for electrostatic solvers * force `ComputeSpaceCharge` to be implemented in child classes * remove trampoline class * fix condition for mfl access * reuse finest_level and ng * remove namespace ablastr::constant::SI * pybuild fixes * fix for initial ES solve * fix bugs * add back bool to check if any potentials are set * second try for fixing potential specified check * comments for base class * include in make packag * bug fixes with number of levels in ES solvers * another fix for `lev <= num_levels` * more doxygen comments * adding defaults back for toleance, max iters, verbosity * add doxygen to PoissonSolver.H * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * oops values were defined, still amrex namespace was complaining in ci and const for required precision * removing default for beta as well * call `DefinePhiBCs` when ES solver is relativistic * fix build failures * always run solve when ES relativistic is used * fix double counting of E-field in relativistic solves * removing tmp file * pass geom ref * attempt to fix clang-tidy errors * second attempt to fix clang-tidy issue * minor clean-ups Signed-off-by: roelof-groenewald * remove var not used anymore * clean up definition * cosmetic indentation * Update Source/ablastr/fields/PoissonSolver.H Remi's suggestion Co-authored-by: Remi Lehe * Update Source/ablastr/fields/PoissonSolver.H Co-authored-by: Remi Lehe * clean and remove empty functions and calls to these functions --------- Signed-off-by: roelof-groenewald Co-authored-by: RevathiJambunathan Co-authored-by: Arianna Formenti Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Co-authored-by: Remi Lehe --- Source/FieldSolver/CMakeLists.txt | 3 +- Source/FieldSolver/ElectrostaticSolver.H | 125 -- Source/FieldSolver/ElectrostaticSolver.cpp | 1147 ----------------- .../ElectrostaticSolvers/CMakeLists.txt | 10 + .../ElectrostaticSolver.H | 163 +++ .../ElectrostaticSolver.cpp | 533 ++++++++ .../ElectrostaticSolver_fwd.H | 15 + .../ElectrostaticSolvers/LabFrameExplicitES.H | 42 + .../LabFrameExplicitES.cpp | 256 ++++ .../ElectrostaticSolvers/Make.package | 6 + .../PoissonBoundaryHandler.H | 142 ++ .../PoissonBoundaryHandler.cpp | 187 +++ .../RelativisticExplicitES.H | 84 ++ .../RelativisticExplicitES.cpp | 168 +++ .../MagnetostaticSolver.cpp | 7 +- Source/FieldSolver/Make.package | 2 + Source/FieldSolver/WarpXSolveFieldsES.cpp | 30 + Source/Initialization/WarpXInitData.cpp | 3 + Source/Python/WarpX.cpp | 18 +- Source/WarpX.H | 37 +- Source/WarpX.cpp | 67 +- Source/ablastr/fields/PoissonSolver.H | 225 ++-- 22 files changed, 1814 insertions(+), 1456 deletions(-) delete mode 100644 Source/FieldSolver/ElectrostaticSolver.H delete mode 100644 Source/FieldSolver/ElectrostaticSolver.cpp create mode 100644 Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt create mode 100644 Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp create mode 100644 Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp create mode 100644 Source/FieldSolver/ElectrostaticSolvers/Make.package create mode 100644 Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp create mode 100644 Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp create mode 100644 Source/FieldSolver/WarpXSolveFieldsES.cpp diff --git a/Source/FieldSolver/CMakeLists.txt b/Source/FieldSolver/CMakeLists.txt index 859117eb214..896b3f04fc2 100644 --- a/Source/FieldSolver/CMakeLists.txt +++ b/Source/FieldSolver/CMakeLists.txt @@ -2,13 +2,14 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE - ElectrostaticSolver.cpp WarpXPushFieldsEM.cpp WarpXPushFieldsHybridPIC.cpp WarpX_QED_Field_Pushers.cpp + WarpXSolveFieldsES.cpp ) endforeach() +add_subdirectory(ElectrostaticSolvers) add_subdirectory(FiniteDifferenceSolver) add_subdirectory(MagnetostaticSolver) add_subdirectory(ImplicitSolvers) diff --git a/Source/FieldSolver/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolver.H deleted file mode 100644 index c5c2ce8b6eb..00000000000 --- a/Source/FieldSolver/ElectrostaticSolver.H +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2021 Modern Electron - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_ELECTROSTATICSOLVER_H_ -#define WARPX_ELECTROSTATICSOLVER_H_ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -namespace ElectrostaticSolver { - - struct PhiCalculatorEB { - - amrex::Real t; - amrex::ParserExecutor<4> potential_eb; - - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - amrex::Real operator()(const amrex::Real x, const amrex::Real z) const noexcept { - using namespace amrex::literals; - return potential_eb(x, 0.0_rt, z, t); - } - - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - amrex::Real operator()(const amrex::Real x, const amrex::Real y, const amrex::Real z) const noexcept { - return potential_eb(x, y, z, t); - } - }; - - class PoissonBoundaryHandler { - public: - - amrex::Array lobc, hibc; - bool bcs_set = false; - std::array dirichlet_flag; - bool has_non_periodic = false; - bool phi_EB_only_t = true; - - void definePhiBCs (const amrex::Geometry& geom); - - void buildParsers (); - void buildParsersEB (); - - /* \brief Sets the EB potential string and updates the function parser - * - * \param [in] potential The string value of the potential - */ - void setPotentialEB(const std::string& potential) { - potential_eb_str = potential; - buildParsersEB(); - } - - [[nodiscard]] PhiCalculatorEB - getPhiEB(amrex::Real t) const noexcept - { - return PhiCalculatorEB{t, potential_eb}; - } - - // set default potentials to zero in order for current tests to pass - // but forcing the user to specify a potential might be better - std::string potential_xlo_str = "0"; - std::string potential_xhi_str = "0"; - std::string potential_ylo_str = "0"; - std::string potential_yhi_str = "0"; - std::string potential_zlo_str = "0"; - std::string potential_zhi_str = "0"; - std::string potential_eb_str = "0"; - - amrex::ParserExecutor<1> potential_xlo; - amrex::ParserExecutor<1> potential_xhi; - amrex::ParserExecutor<1> potential_ylo; - amrex::ParserExecutor<1> potential_yhi; - amrex::ParserExecutor<1> potential_zlo; - amrex::ParserExecutor<1> potential_zhi; - amrex::ParserExecutor<1> potential_eb_t; - amrex::ParserExecutor<4> potential_eb; - - private: - - amrex::Parser potential_xlo_parser; - amrex::Parser potential_xhi_parser; - amrex::Parser potential_ylo_parser; - amrex::Parser potential_yhi_parser; - amrex::Parser potential_zlo_parser; - amrex::Parser potential_zhi_parser; - amrex::Parser potential_eb_parser; - }; - - /** use amrex to directly calculate the electric field since with EB's the - * - * simple finite difference scheme in WarpX::computeE sometimes fails - */ - class EBCalcEfromPhiPerLevel { - private: - amrex::Vector< - amrex::Array - > m_e_field; - - public: - EBCalcEfromPhiPerLevel(amrex::Vector > e_field) - : m_e_field(std::move(e_field)) {} - - void operator()(amrex::MLMG & mlmg, int const lev) { - using namespace amrex::literals; - - mlmg.getGradSolution({m_e_field[lev]}); - for (auto &field: m_e_field[lev]) { - field->mult(-1._rt); - } - } - }; -} // namespace ElectrostaticSolver - -#endif // WARPX_ELECTROSTATICSOLVER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp deleted file mode 100644 index 22d1c663a53..00000000000 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ /dev/null @@ -1,1147 +0,0 @@ -/* Copyright 2019 Remi Lehe - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#include "WarpX.H" - -#include "FieldSolver/ElectrostaticSolver.H" -#include "EmbeddedBoundary/Enabled.H" -#include "Fluids/MultiFluidContainer.H" -#include "Fluids/WarpXFluidContainer.H" -#include "Parallelization/GuardCellManager.H" -#include "Particles/MultiParticleContainer.H" -#include "Particles/WarpXParticleContainer.H" -#include "Python/callbacks.H" -#include "Utils/Parser/ParserUtils.H" -#include "Utils/WarpXAlgorithmSelection.H" -#include "Utils/WarpXConst.H" -#include "Utils/TextMsg.H" -#include "Utils/WarpXProfilerWrapper.H" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef AMREX_USE_EB -# include -#endif - -#include -#include -#include - -using namespace amrex; -using namespace warpx::fields; - -void -WarpX::ComputeSpaceChargeField (bool const reset_fields) -{ - WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); - if (reset_fields) { - // Reset all E and B fields to 0, before calculating space-charge fields - WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); - for (int lev = 0; lev <= max_level; lev++) { - for (int comp=0; comp<3; comp++) { - Efield_fp[lev][comp]->setVal(0); - Bfield_fp[lev][comp]->setVal(0); - } - } - } - - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { - AddSpaceChargeFieldLabFrame(); - } - else { - // Loop over the species and add their space-charge contribution to E and B. - // Note that the fields calculated here does not include the E field - // due to simulation boundary potentials - for (int ispecies=0; ispeciesnSpecies(); ispecies++){ - WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); - if (species.initialize_self_fields || - (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic)) { - AddSpaceChargeField(species); - } - } - - // Add the field due to the boundary potentials - if (m_boundary_potential_specified || - (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic)){ - AddBoundaryField(); - } - } -} - -/* Compute the potential `phi` by solving the Poisson equation with the - simulation specific boundary conditions and boundary values, then add the - E field due to that `phi` to `Efield_fp`. -*/ -void -WarpX::AddBoundaryField () -{ - WARPX_PROFILE("WarpX::AddBoundaryField"); - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - - // Allocate fields for charge and potential - const int num_levels = max_level + 1; - Vector > rho(num_levels); - Vector > phi(num_levels); - // Use number of guard cells used for local deposition of rho - const amrex::IntVect ng = guard_cells.ng_depos_rho; - for (int lev = 0; lev <= max_level; lev++) { - BoxArray nba = boxArray(lev); - nba.surroundingNodes(); - rho[lev] = std::make_unique(nba, DistributionMap(lev), 1, ng); - rho[lev]->setVal(0.); - phi[lev] = std::make_unique(nba, DistributionMap(lev), 1, 1); - phi[lev]->setVal(0.); - } - - // Set the boundary potentials appropriately - setPhiBC(phi); - - // beta is zero for boundaries - const std::array beta = {0._rt}; - - // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, self_fields_required_precision, - self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); - - // Compute the corresponding electric and magnetic field, from the potential phi. - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); -} - -void -WarpX::AddSpaceChargeField (WarpXParticleContainer& pc) -{ - WARPX_PROFILE("WarpX::AddSpaceChargeField"); - - if (pc.getCharge() == 0) { - return; - } - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - -#ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, - "Error: RZ electrostatic only implemented for a single mode"); -#endif - - // Allocate fields for charge and potential - const int num_levels = max_level + 1; - Vector > rho(num_levels); - Vector > rho_coarse(num_levels); // Used in order to interpolate between levels - Vector > phi(num_levels); - // Use number of guard cells used for local deposition of rho - const amrex::IntVect ng = guard_cells.ng_depos_rho; - for (int lev = 0; lev <= max_level; lev++) { - BoxArray nba = boxArray(lev); - nba.surroundingNodes(); - rho[lev] = std::make_unique(nba, DistributionMap(lev), 1, ng); - rho[lev]->setVal(0.); - phi[lev] = std::make_unique(nba, DistributionMap(lev), 1, 1); - phi[lev]->setVal(0.); - if (lev > 0) { - // For MR levels: allocated the coarsened version of rho - BoxArray cba = nba; - cba.coarsen(refRatio(lev-1)); - rho_coarse[lev] = std::make_unique(cba, DistributionMap(lev), 1, ng); - rho_coarse[lev]->setVal(0.); - } - } - - // Deposit particle charge density (source of Poisson solver) - // The options below are identical to those in MultiParticleContainer::DepositCharge - bool const local = true; - bool const reset = false; - bool const apply_boundary_and_scale_volume = true; - bool const interpolate_across_levels = false; - if ( !pc.do_not_deposit) { - pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, - interpolate_across_levels); - } - for (int lev = 0; lev <= max_level; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels - - // Get the particle beta vector - bool const local_average = false; // Average across all MPI ranks - std::array beta_pr = pc.meanParticleVelocity(local_average); - std::array beta; - for (int i=0 ; i < static_cast(beta.size()) ; i++) { - beta[i] = beta_pr[i]/PhysConst::c; // Normalize - } - - // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, pc.self_fields_required_precision, - pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, - pc.self_fields_verbosity ); - - // Compute the corresponding electric and magnetic field, from the potential phi - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); - -} - -void -WarpX::AddSpaceChargeFieldLabFrame () -{ - WARPX_PROFILE("WarpX::AddSpaceChargeFieldLabFrame"); - - // Store the boundary conditions for the field solver if they haven't been - // stored yet - if (!m_poisson_boundary_handler.bcs_set) { - m_poisson_boundary_handler.definePhiBCs(Geom(0)); - } - -#ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, - "Error: RZ electrostatic only implemented for a single mode"); -#endif - - // Deposit particle charge density (source of Poisson solver) - mypc->DepositCharge(rho_fp, 0.0_rt); - if (do_fluid_species) { - int const lev = 0; - myfl->DepositCharge( lev, *rho_fp[lev] ); - } - for (int lev = 0; lev <= max_level; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels -#ifndef WARPX_DIM_RZ - for (int lev = 0; lev <= finestLevel(); lev++) { - // Reflect density over PEC boundaries, if needed. - ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); - } -#endif - - // beta is zero in lab frame - // Todo: use simpler finite difference form with beta=0 - const std::array beta = {0._rt}; - - // set the boundary potentials appropriately - setPhiBC(phi_fp); - - // Compute the potential phi, by solving the Poisson equation - if (IsPythonCallbackInstalled("poissonsolver")) { - - // Use the Python level solver (user specified) - ExecutePythonCallback("poissonsolver"); - - } else { - -#if defined(WARPX_DIM_1D_Z) - // Use the tridiag solver with 1D - computePhiTriDiagonal(rho_fp, phi_fp); -#else - // Use the AMREX MLMG or the FFT (IGF) solver otherwise - computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, - self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity); -#endif - - } - - // Compute the electric field. Note that if an EB is used the electric - // field will be calculated in the computePhi call. - if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } - else { - if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } - } - - // Compute the magnetic field - computeB( Bfield_fp, phi_fp, beta ); -} - -/* Compute the potential `phi` by solving the Poisson equation with `rho` as - a source, assuming that the source moves at a constant speed \f$\vec{\beta}\f$. - This uses the amrex solver. - - More specifically, this solves the equation - \f[ - \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} - \f] - - \param[in] rho The charge density a given species - \param[out] phi The potential to be computed by this function - \param[in] beta Represents the velocity of the source of `phi` - \param[in] required_precision The relative convergence threshold for the MLMG solver - \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver - \param[in] max_iters The maximum number of iterations allowed for the MLMG solver - \param[in] verbosity The verbosity setting for the MLMG solver -*/ -void -WarpX::computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array const beta, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const { - // create a vector to our fields, sorted by level - amrex::Vector sorted_rho; - amrex::Vector sorted_phi; - for (int lev = 0; lev <= finest_level; ++lev) { - sorted_rho.emplace_back(rho[lev].get()); - sorted_phi.emplace_back(phi[lev].get()); - } - - std::optional post_phi_calculation; -#ifdef AMREX_USE_EB - std::optional > eb_farray_box_factory; -#else - std::optional > const eb_farray_box_factory; -#endif - if (EB::enabled()) - { - // EB: use AMReX to directly calculate the electric field since with EB's the - // simple finite difference scheme in WarpX::computeE sometimes fails - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - // TODO: maybe make this a helper function or pass Efield_fp directly - amrex::Vector< - amrex::Array - > e_field; - for (int lev = 0; lev <= finest_level; ++lev) { - e_field.push_back( -# if defined(WARPX_DIM_1D_Z) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# elif defined(WARPX_DIM_3D) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } -# endif - ); - } - post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); - } - -#ifdef AMREX_USE_EB - amrex::Vector< - amrex::EBFArrayBoxFactory const * - > factories; - for (int lev = 0; lev <= finest_level; ++lev) { - factories.push_back(&WarpX::fieldEBFactory(lev)); - } - eb_farray_box_factory = factories; -#endif - } - - bool const is_solver_igf_on_lev0 = - WarpX::poisson_solver_id == PoissonSolverAlgo::IntegratedGreenFunction; - - ablastr::fields::computePhi( - sorted_rho, - sorted_phi, - beta, - required_precision, - absolute_tolerance, - max_iters, - verbosity, - this->geom, - this->dmap, - this->grids, - WarpX::grid_type, - this->m_poisson_boundary_handler, - is_solver_igf_on_lev0, - EB::enabled(), - WarpX::do_single_precision_comms, - this->ref_ratio, - post_phi_calculation, - gett_new(0), - eb_farray_box_factory - ); - -} - - -/* \brief Set Dirichlet boundary conditions for the electrostatic solver. - - The given potential's values are fixed on the boundaries of the given - dimension according to the desired values from the simulation input file, - boundary.potential_lo and boundary.potential_hi. - - \param[inout] phi The electrostatic potential - \param[in] idim The dimension for which the Dirichlet boundary condition is set -*/ -void -WarpX::setPhiBC ( amrex::Vector>& phi ) const -{ - // check if any dimension has non-periodic boundary conditions - if (!m_poisson_boundary_handler.has_non_periodic) { return; } - - // get the boundary potentials at the current time - amrex::Array phi_bc_values_lo; - amrex::Array phi_bc_values_hi; - phi_bc_values_lo[WARPX_ZINDEX] = m_poisson_boundary_handler.potential_zlo(gett_new(0)); - phi_bc_values_hi[WARPX_ZINDEX] = m_poisson_boundary_handler.potential_zhi(gett_new(0)); -#ifndef WARPX_DIM_1D_Z - phi_bc_values_lo[0] = m_poisson_boundary_handler.potential_xlo(gett_new(0)); - phi_bc_values_hi[0] = m_poisson_boundary_handler.potential_xhi(gett_new(0)); -#endif -#if defined(WARPX_DIM_3D) - phi_bc_values_lo[1] = m_poisson_boundary_handler.potential_ylo(gett_new(0)); - phi_bc_values_hi[1] = m_poisson_boundary_handler.potential_yhi(gett_new(0)); -#endif - - auto dirichlet_flag = m_poisson_boundary_handler.dirichlet_flag; - - // loop over all mesh refinement levels and set the boundary values - for (int lev=0; lev <= max_level; lev++) { - - amrex::Box domain = Geom(lev).Domain(); - domain.surroundingNodes(); - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - // Extract the potential - auto phi_arr = phi[lev]->array(mfi); - // Extract tileboxes for which to loop - const Box& tb = mfi.tilebox( phi[lev]->ixType().toIntVect() ); - - // loop over dimensions - for (int idim=0; idim, 3> >& E, - const amrex::Vector >& phi, - std::array const beta ) const -{ - for (int lev = 0; lev <= max_level; lev++) { - - const Real* dx = Geom(lev).CellSize(); - -#ifdef AMREX_USE_OMP -# pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { -#if defined(WARPX_DIM_3D) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dy = 1._rt/dx[1]; - const Real inv_dz = 1._rt/dx[2]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dz = 1._rt/dx[1]; -#else - const Real inv_dz = 1._rt/dx[0]; -#endif - const amrex::IntVect ex_type = E[lev][0]->ixType().toIntVect(); - const amrex::IntVect ey_type = E[lev][1]->ixType().toIntVect(); - const amrex::IntVect ez_type = E[lev][2]->ixType().toIntVect(); - - const amrex::Box& tbx = mfi.tilebox(ex_type); - const amrex::Box& tby = mfi.tilebox(ey_type); - const amrex::Box& tbz = mfi.tilebox(ez_type); - - const auto& phi_arr = phi[lev]->array(mfi); - const auto& Ex_arr = (*E[lev][0])[mfi].array(); - const auto& Ey_arr = (*E[lev][1])[mfi].array(); - const auto& Ez_arr = (*E[lev][2])[mfi].array(); - - const Real beta_x = beta[0]; - const Real beta_y = beta[1]; - const Real beta_z = beta[2]; - - // Calculate the electric field - // Use discretized derivative that matches the staggering of the grid. - // Nodal solver - if (ex_type == amrex::IntVect::TheNodeVector() && - ey_type == amrex::IntVect::TheNodeVector() && - ez_type == amrex::IntVect::TheNodeVector()) - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - + beta_x*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - + beta_y*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - +(beta_y*beta_y-1._rt)*0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - + beta_y*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) - + beta_z*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - +beta_x*beta_y*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); - } - ); -#else - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); - } - ); -#endif - } - else // Staggered solver - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt) *inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i ,j ,k )) - + beta_x*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) - + phi_arr(i+1,j+1,k )-phi_arr(i+1,j-1,k )) - + beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) - + phi_arr(i+1,j ,k+1)-phi_arr(i+1,j ,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ey_arr(i,j,k) += - + beta_y*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) - + phi_arr(i+1,j+1,k )-phi_arr(i-1,j+1,k )) - +(beta_y*beta_y-1._rt) *inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j ,k )) - + beta_y*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) - + phi_arr(i ,j+1,k+1)-phi_arr(i ,j+1,k-1)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - + beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) - + phi_arr(i+1,j ,k+1)-phi_arr(i-1,j ,k+1)) - + beta_z*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) - + phi_arr(i ,j+1,k+1)-phi_arr(i ,j-1,k+1)) - +(beta_z*beta_z-1._rt) *inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k )); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ex_arr(i,j,k) += - +(beta_x*beta_x-1._rt)*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k)) - +beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j-1,k)); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i-1,j+1,k)) - +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k)); - } - ); - ignore_unused(beta_y); -#else - amrex::ParallelFor( tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Ez_arr(i,j,k) += - +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k)); - } - ); - ignore_unused(beta_x,beta_y); -#endif - } - } - } -} - - -/* \brief Compute the magnetic field that corresponds to `phi`, and - add it to the set of MultiFab `B`. - - The magnetic field is calculated by assuming that the source that - produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: - \f[ - \vec{B} = -\frac{1}{c}\vec{\beta}\times\vec{\nabla}\phi - \f] - (this represents the term \f$\vec{\nabla} \times \vec{A}\f$, in the case of a moving source) - - \param[inout] E Electric field on the grid - \param[in] phi The potential from which to compute the electric field - \param[in] beta Represents the velocity of the source of `phi` -*/ -void -WarpX::computeB (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array const beta ) const -{ - // return early if beta is 0 since there will be no B-field - if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } - - for (int lev = 0; lev <= max_level; lev++) { - - const Real* dx = Geom(lev).CellSize(); - -#ifdef AMREX_USE_OMP -# pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { -#if defined(WARPX_DIM_3D) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dy = 1._rt/dx[1]; - const Real inv_dz = 1._rt/dx[2]; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const Real inv_dx = 1._rt/dx[0]; - const Real inv_dz = 1._rt/dx[1]; -#else - const Real inv_dz = 1._rt/dx[0]; -#endif - const amrex::IntVect bx_type = B[lev][0]->ixType().toIntVect(); - const amrex::IntVect by_type = B[lev][1]->ixType().toIntVect(); - const amrex::IntVect bz_type = B[lev][2]->ixType().toIntVect(); - - const amrex::Box& tbx = mfi.tilebox(bx_type); - const amrex::Box& tby = mfi.tilebox(by_type); - const amrex::Box& tbz = mfi.tilebox(bz_type); - - const auto& phi_arr = phi[lev]->array(mfi); - const auto& Bx_arr = (*B[lev][0])[mfi].array(); - const auto& By_arr = (*B[lev][1])[mfi].array(); - const auto& Bz_arr = (*B[lev][2])[mfi].array(); - - const Real beta_x = beta[0]; - const Real beta_y = beta[1]; - const Real beta_z = beta[2]; - - constexpr Real inv_c = 1._rt/PhysConst::c; - - // Calculate the magnetic field - // Use discretized derivative that matches the staggering of the grid. - // Nodal solver - if (bx_type == amrex::IntVect::TheNodeVector() && - by_type == amrex::IntVect::TheNodeVector() && - bz_type == amrex::IntVect::TheNodeVector()) - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k-1)) - +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j-1,k ))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i-1,j,k )) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k-1))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)) - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k))); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j+1,k)-phi_arr(i,j-1,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k))); - } - ); -#else - amrex::ParallelFor( tbx, tby, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); - ignore_unused(beta_z,tbz,Bz_arr); -#endif - } - else // Staggered solver - { -#if defined(WARPX_DIM_3D) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k ) - + phi_arr(i,j+1,k+1)-phi_arr(i,j+1,k )) - +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j ,k ) - + phi_arr(i,j+1,k+1)-phi_arr(i,j ,k+1))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i ,j,k ) - + phi_arr(i+1,j,k+1)-phi_arr(i ,j,k+1)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k ) - + phi_arr(i+1,j,k+1)-phi_arr(i+1,j,k ))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k)) - +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k))); - } - ); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ParallelFor( tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i,j+1,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k)) - +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) - + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bz_arr(i,j,k) += inv_c * ( - +beta_y*inv_dx*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); -#else - amrex::ParallelFor( tbx, tby, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Bx_arr(i,j,k) += inv_c * ( - -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - By_arr(i,j,k) += inv_c * ( - +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); - } - ); - ignore_unused(beta_z,tbz,Bz_arr); -#endif - } - } - } -} - -/* \brief Compute the potential by solving Poisson's equation with - a 1D tridiagonal solve. - - \param[in] rho The charge density a given species - \param[out] phi The potential to be computed by this function -*/ -void -WarpX::computePhiTriDiagonal (const amrex::Vector >& rho, - amrex::Vector >& phi) const -{ - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(max_level == 0, - "The tridiagonal solver cannot be used with mesh refinement"); - - const int lev = 0; - - const amrex::Real* dx = Geom(lev).CellSize(); - const amrex::Real xmin = Geom(lev).ProbLo(0); - const amrex::Real xmax = Geom(lev).ProbHi(0); - const int nx_full_domain = static_cast( (xmax - xmin)/dx[0] + 0.5_rt ); - - int nx_solve_min = 1; - int nx_solve_max = nx_full_domain - 1; - - auto field_boundary_lo0 = WarpX::field_boundary_lo[0]; - auto field_boundary_hi0 = WarpX::field_boundary_hi[0]; - if (field_boundary_lo0 == FieldBoundaryType::Neumann || field_boundary_lo0 == FieldBoundaryType::Periodic) { - // Neumann or periodic boundary condition - // Solve for the point on the lower boundary - nx_solve_min = 0; - } - if (field_boundary_hi0 == FieldBoundaryType::Neumann || field_boundary_hi0 == FieldBoundaryType::Periodic) { - // Neumann or periodic boundary condition - // Solve for the point on the upper boundary - nx_solve_max = nx_full_domain; - } - - // Create a 1-D MultiFab that covers all of x. - // The tridiag solve will be done in this MultiFab and then copied out afterwards. - const amrex::IntVect lo_full_domain(AMREX_D_DECL(0,0,0)); - const amrex::IntVect hi_full_domain(AMREX_D_DECL(nx_full_domain,0,0)); - const amrex::Box box_full_domain_node(lo_full_domain, hi_full_domain, amrex::IntVect::TheNodeVector()); - const BoxArray ba_full_domain_node(box_full_domain_node); - const amrex::Vector pmap = {0}; // The data will only be on processor 0 - const amrex::DistributionMapping dm_full_domain(pmap); - - // Put the data in the pinned arena since the tridiag solver will be done on the CPU, but have - // the data readily accessible from the GPU. - auto phi1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - auto zwork1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - auto rho1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); - - if (field_boundary_lo0 == FieldBoundaryType::PEC || field_boundary_hi0 == FieldBoundaryType::PEC) { - // Copy from phi to get the boundary values - phi1d_mf.ParallelCopy(*phi[lev], 0, 0, 1); - } - rho1d_mf.ParallelCopy(*rho[lev], 0, 0, 1); - - // Multiplier on the charge density - const amrex::Real norm = dx[0]*dx[0]/PhysConst::ep0; - rho1d_mf.mult(norm); - - // Use the MFIter loop since when parallel, only process zero has a FAB. - // This skips the loop on all other processors. - for (MFIter mfi(phi1d_mf); mfi.isValid(); ++mfi) { - - const auto& phi1d_arr = phi1d_mf[mfi].array(); - const auto& zwork1d_arr = zwork1d_mf[mfi].array(); - const auto& rho1d_arr = rho1d_mf[mfi].array(); - - // The loops are always performed on the CPU - - amrex::Real diag = 2._rt; - - // The initial values depend on the boundary condition - if (field_boundary_lo0 == FieldBoundaryType::PEC) { - - phi1d_arr(1,0,0) = (phi1d_arr(0,0,0) + rho1d_arr(1,0,0))/diag; - - } else if (field_boundary_lo0 == FieldBoundaryType::Neumann) { - - // Neumann boundary condition - phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; - - zwork1d_arr(1,0,0) = 2._rt/diag; - diag = 2._rt - zwork1d_arr(1,0,0); - phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; - - } else if (field_boundary_lo0 == FieldBoundaryType::Periodic) { - - phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; - - zwork1d_arr(1,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(1,0,0); - phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; - - } - - // Loop upward, calculating the Gaussian elimination multipliers and right hand sides - for (int i_up = 2 ; i_up < nx_solve_max ; i_up++) { - - zwork1d_arr(i_up,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(i_up,0,0); - phi1d_arr(i_up,0,0) = (rho1d_arr(i_up,0,0) - (-1._rt)*phi1d_arr(i_up-1,0,0))/diag; - - } - - // The last value depend on the boundary condition - amrex::Real zwork_product = 1.; // Needed for parallel boundaries - if (field_boundary_hi0 == FieldBoundaryType::PEC) { - - int const nxm1 = nx_full_domain - 1; - zwork1d_arr(nxm1,0,0) = 1._rt/diag; - diag = 2._rt - zwork1d_arr(nxm1,0,0); - phi1d_arr(nxm1,0,0) = (phi1d_arr(nxm1+1,0,0) + rho1d_arr(nxm1,0,0) - (-1._rt)*phi1d_arr(nxm1-1,0,0))/diag; - - } else if (field_boundary_hi0 == FieldBoundaryType::Neumann) { - - // Neumann boundary condition - zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; - diag = 2._rt - 2._rt*zwork1d_arr(nx_full_domain,0,0); - if (diag == 0._rt) { - // This happens if the lower boundary is also Neumann. - // It this case, the potential is relative to an arbitrary constant, - // so set the upper boundary to zero to force a value. - phi1d_arr(nx_full_domain,0,0) = 0.; - } else { - phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(nx_full_domain,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; - } - - } else if (field_boundary_hi0 == FieldBoundaryType::Periodic) { - - zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; - - for (int i = 1 ; i <= nx_full_domain ; i++) { - zwork_product *= zwork1d_arr(i,0,0); - } - - diag = 2._rt - zwork1d_arr(nx_full_domain,0,0) - zwork_product; - // Note that rho1d_arr(0,0,0) is used to ensure that the same value is used - // on both boundaries. - phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(0,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; - - } - - // Loop downward to calculate the phi - if (field_boundary_lo0 == FieldBoundaryType::Periodic) { - - // With periodic, the right hand column adds an extra term for all rows - for (int i_down = nx_full_domain-1 ; i_down >= 0 ; i_down--) { - zwork_product /= zwork1d_arr(i_down+1,0,0); - phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0) + zwork_product*phi1d_arr(nx_full_domain,0,0); - } - - } else { - - for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { - phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0); - } - - } - - } - - // Copy phi1d to phi - phi[lev]->ParallelCopy(phi1d_mf, 0, 0, 1); -} - -void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs (const amrex::Geometry& geom) -{ -#ifdef WARPX_DIM_RZ - if (geom.ProbLo(0) == 0){ - lobc[0] = LinOpBCType::Neumann; - dirichlet_flag[0] = false; - - // handle the r_max boundary explicitly - if (WarpX::field_boundary_hi[0] == FieldBoundaryType::PEC) { - hibc[0] = LinOpBCType::Dirichlet; - dirichlet_flag[1] = true; - } - else if (WarpX::field_boundary_hi[0] == FieldBoundaryType::Neumann) { - hibc[0] = LinOpBCType::Neumann; - dirichlet_flag[1] = false; - } - else { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, - "Field boundary condition at the outer radius must be either PEC or neumann " - "when using the electrostatic solver" - ); - } - } - const int dim_start = 1; -#else - const int dim_start = 0; - amrex::ignore_unused(geom); -#endif - for (int idim=dim_start; idim(); - potential_xhi = potential_xhi_parser.compile<1>(); - potential_ylo = potential_ylo_parser.compile<1>(); - potential_yhi = potential_yhi_parser.compile<1>(); - potential_zlo = potential_zlo_parser.compile<1>(); - potential_zhi = potential_zhi_parser.compile<1>(); - - buildParsersEB(); -} - -void ElectrostaticSolver::PoissonBoundaryHandler::buildParsersEB () -{ - potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"x", "y", "z", "t"}); - - // check if the EB potential is a function of space or only of time - const std::set eb_symbols = potential_eb_parser.symbols(); - if ((eb_symbols.count("x") != 0) || (eb_symbols.count("y") != 0) - || (eb_symbols.count("z") != 0)) { - potential_eb = potential_eb_parser.compile<4>(); - phi_EB_only_t = false; - } - else { - potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"t"}); - potential_eb_t = potential_eb_parser.compile<1>(); - } -} diff --git a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt new file mode 100644 index 00000000000..39c4478c110 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt @@ -0,0 +1,10 @@ +foreach(D IN LISTS WarpX_DIMS) + warpx_set_suffix_dims(SD ${D}) + target_sources(lib_${SD} + PRIVATE + ElectrostaticSolver.cpp + LabFrameExplicitES.cpp + PoissonBoundaryHandler.cpp + RelativisticExplicitES.cpp + ) +endforeach() diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H new file mode 100644 index 00000000000..8d23088799f --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -0,0 +1,163 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ELECTROSTATICSOLVER_H_ +#define WARPX_ELECTROSTATICSOLVER_H_ + +#include "PoissonBoundaryHandler.H" +#include "Fluids/MultiFluidContainer.H" +#include "Particles/MultiParticleContainer.H" +#include "Utils/WarpXProfilerWrapper.H" +#include "WarpX.H" + +#include + + +/** + * \brief Base class for Electrostatic Solver + * + */ +class ElectrostaticSolver +{ +public: + ElectrostaticSolver() = default; + ElectrostaticSolver( int nlevs_max ); + + virtual ~ElectrostaticSolver(); + + // Prohibit Move and Copy operations + ElectrostaticSolver(const ElectrostaticSolver&) = delete; + ElectrostaticSolver& operator=(const ElectrostaticSolver&) = delete; + ElectrostaticSolver(ElectrostaticSolver&&) = delete; + ElectrostaticSolver& operator=(ElectrostaticSolver&&) = delete; + + void ReadParameters (); + + virtual void InitData () {} + + /** + * \brief Computes charge density, rho, and solves Poisson's equation + * to obtain the associated electrostatic potential, phi. + * Using the electrostatic potential, the electric field is computed + * in lab frame, and if relativistic, then the electric and magnetic + * fields are computed using potential, phi, and + * velocity of source for potential, beta. + * This function must be defined in the derived classes. + */ + virtual void ComputeSpaceChargeField ( + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& charge_buf, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& phi_fp, + [[maybe_unused]] MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) = 0; + + /** + * \brief Set Dirichlet boundary conditions for the electrostatic solver. + * The given potential's values are fixed on the boundaries of the given + * dimension according to the desired values from the simulation input file, + * boundary.potential_lo and boundary.potential_hi. + * \param[inout] phi The electrostatic potential + * \param[in] idim The dimension for which the Dirichlet boundary condition is set + */ + void setPhiBC ( + amrex::Vector>& phi, + amrex::Real t + ) const; + + /** + * Compute the potential `phi` by solving the Poisson equation with `rho` as + * a source, assuming that the source moves at a constant speed \f$\vec{\beta}\f$. + * This uses the amrex solver. + * More specifically, this solves the equation + * \f[ + * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} + * \f] + * \param[out] phi The potential to be computed by this function + * \param[in] rho The charge density for a given species (relativistic solver) + * or total charge density (labframe solver) + * \param[in] beta Represents the velocity of the source of `phi` + * \param[in] required_precision The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + */ + void computePhi ( + const amrex::Vector >& rho, + amrex::Vector >& phi, + std::array beta, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity + ) const; + + /** + * \brief Compute the electric field that corresponds to `phi`, and + * add it to the set of MultiFab `E`. + * The electric field is calculated by assuming that the source that + * produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: + * \f[ + * \vec{E} = -\vec{\nabla}\phi + \vec{\beta}(\vec{\beta} \cdot \vec{\nabla}\phi) + * \f] + * (where the second term represent the term \f$\partial_t \vec{A}\f$, in + * the case of a moving source) + * + * \param[inout] E Electric field on the grid + * \param[in] phi The potential from which to compute the electric field + * \param[in] beta Represents the velocity of the source of `phi` + */ + void computeE ( + amrex::Vector, 3> >& E, + const amrex::Vector >& phi, + std::array beta + ) const; + + /** + * \brief Compute the magnetic field that corresponds to `phi`, and + * add it to the set of MultiFab `B`. + *The magnetic field is calculated by assuming that the source that + *produces the `phi` potential is moving with a constant speed \f$\vec{\beta}\f$: + *\f[ + * \vec{B} = -\frac{1}{c}\vec{\beta}\times\vec{\nabla}\phi + *\f] + *(this represents the term \f$\vec{\nabla} \times \vec{A}\f$, in the case of a moving source) + * + *\param[inout] B Electric field on the grid + *\param[in] phi The potential from which to compute the electric field + *\param[in] beta Represents the velocity of the source of `phi` + */ + void computeB ( + amrex::Vector, 3> >& B, + const amrex::Vector >& phi, + std::array beta + ) const; + + /** Maximum levels for the electrostatic solver grid */ + int num_levels; + + /** Boundary handler object to set potential for EB and on the domain boundary */ + std::unique_ptr m_poisson_boundary_handler; + + /** Parameters for MLMG Poisson solve */ + amrex::Real self_fields_required_precision = 1e-11; + amrex::Real self_fields_absolute_tolerance = 0.0; + /** Limit on number of MLMG iterations */ + int self_fields_max_iters = 200; + /** Verbosity for the MLMG solver. + * 0 : no verbosity + * 1 : timing and convergence at the end of MLMG + * 2 : convergence progress at every MLMG iteration + */ + int self_fields_verbosity = 2; +}; + +#endif // WARPX_ELECTROSTATICSOLVER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp new file mode 100644 index 00000000000..895615a5b21 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -0,0 +1,533 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ + +#include "ElectrostaticSolver.H" +#include +#include "EmbeddedBoundary/Enabled.H" + +using namespace amrex; + +ElectrostaticSolver::ElectrostaticSolver (int nlevs_max) : num_levels{nlevs_max} +{ + // Create an instance of the boundary handler to properly set boundary + // conditions + m_poisson_boundary_handler = std::make_unique(); +} + +ElectrostaticSolver::~ElectrostaticSolver () = default; + +void ElectrostaticSolver::ReadParameters () { + + ParmParse const pp_warpx("warpx"); + + // Note that with the relativistic version, these parameters would be + // input for each species. + utils::parser::queryWithParser( + pp_warpx, "self_fields_required_precision", self_fields_required_precision); + utils::parser::queryWithParser( + pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); + utils::parser::queryWithParser( + pp_warpx, "self_fields_max_iters", self_fields_max_iters); + pp_warpx.query("self_fields_verbosity", self_fields_verbosity); +} + +void +ElectrostaticSolver::setPhiBC ( + amrex::Vector>& phi, + amrex::Real t +) const +{ + // check if any dimension has non-periodic boundary conditions + if (!m_poisson_boundary_handler->has_non_periodic) { return; } + + // get the boundary potentials at the current time + amrex::Array phi_bc_values_lo; + amrex::Array phi_bc_values_hi; + phi_bc_values_lo[WARPX_ZINDEX] = m_poisson_boundary_handler->potential_zlo(t); + phi_bc_values_hi[WARPX_ZINDEX] = m_poisson_boundary_handler->potential_zhi(t); +#ifndef WARPX_DIM_1D_Z + phi_bc_values_lo[0] = m_poisson_boundary_handler->potential_xlo(t); + phi_bc_values_hi[0] = m_poisson_boundary_handler->potential_xhi(t); +#endif +#if defined(WARPX_DIM_3D) + phi_bc_values_lo[1] = m_poisson_boundary_handler->potential_ylo(t); + phi_bc_values_hi[1] = m_poisson_boundary_handler->potential_yhi(t); +#endif + + auto dirichlet_flag = m_poisson_boundary_handler->dirichlet_flag; + + auto & warpx = WarpX::GetInstance(); + + // loop over all mesh refinement levels and set the boundary values + for (int lev=0; lev < num_levels; lev++) { + + amrex::Box domain = warpx.Geom(lev).Domain(); + domain.surroundingNodes(); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Extract the potential + auto phi_arr = phi[lev]->array(mfi); + // Extract tileboxes for which to loop + const Box& tb = mfi.tilebox( phi[lev]->ixType().toIntVect() ); + + // loop over dimensions + for (int idim=0; idim >& rho, + amrex::Vector >& phi, + std::array const beta, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) const { + // create a vector to our fields, sorted by level + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; + for (int lev = 0; lev < num_levels; ++lev) { + sorted_rho.emplace_back(rho[lev].get()); + sorted_phi.emplace_back(phi[lev].get()); + } + + std::optional post_phi_calculation; +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > const eb_farray_box_factory; +#endif + auto & warpx = WarpX::GetInstance(); + if (EB::enabled()) + { + if (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || + WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + { + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev < num_levels; ++lev) { + e_field.push_back( +#if defined(WARPX_DIM_1D_Z) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#elif defined(WARPX_DIM_3D) + amrex::Array{ + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 1), + warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + } +#endif + ); + } + post_phi_calculation = EBCalcEfromPhiPerLevel(e_field); + } +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev < num_levels; ++lev) { + factories.push_back(&warpx.fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; +#endif + } + + bool const is_solver_igf_on_lev0 = + WarpX::poisson_solver_id == PoissonSolverAlgo::IntegratedGreenFunction; + + ablastr::fields::computePhi( + sorted_rho, + sorted_phi, + beta, + required_precision, + absolute_tolerance, + max_iters, + verbosity, + warpx.Geom(), + warpx.DistributionMap(), + warpx.boxArray(), + WarpX::grid_type, + *m_poisson_boundary_handler, + is_solver_igf_on_lev0, + EB::enabled(), + WarpX::do_single_precision_comms, + warpx.refRatio(), + post_phi_calculation, + warpx.gett_new(0), + eb_farray_box_factory + ); + +} + +void +ElectrostaticSolver::computeE (amrex::Vector, 3> >& E, + const amrex::Vector >& phi, + std::array const beta ) const +{ + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + + const Real* dx = warpx.Geom(lev).CellSize(); + +#ifdef AMREX_USE_OMP +# pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { +#if defined(WARPX_DIM_3D) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dy = 1._rt/dx[1]; + const Real inv_dz = 1._rt/dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dz = 1._rt/dx[1]; +#else + const Real inv_dz = 1._rt/dx[0]; +#endif + const amrex::IntVect ex_type = E[lev][0]->ixType().toIntVect(); + const amrex::IntVect ey_type = E[lev][1]->ixType().toIntVect(); + const amrex::IntVect ez_type = E[lev][2]->ixType().toIntVect(); + + const amrex::Box& tbx = mfi.tilebox(ex_type); + const amrex::Box& tby = mfi.tilebox(ey_type); + const amrex::Box& tbz = mfi.tilebox(ez_type); + + const auto& phi_arr = phi[lev]->array(mfi); + const auto& Ex_arr = (*E[lev][0])[mfi].array(); + const auto& Ey_arr = (*E[lev][1])[mfi].array(); + const auto& Ez_arr = (*E[lev][2])[mfi].array(); + + const Real beta_x = beta[0]; + const Real beta_y = beta[1]; + const Real beta_z = beta[2]; + + // Calculate the electric field + // Use discretized derivative that matches the staggering of the grid. + // Nodal solver + if (ex_type == amrex::IntVect::TheNodeVector() && + ey_type == amrex::IntVect::TheNodeVector() && + ez_type == amrex::IntVect::TheNodeVector()) + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + + beta_x*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + + beta_y*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + +(beta_y*beta_y-1._rt)*0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + + beta_y*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k )) + + beta_z*beta_y *0.5_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k )) + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1)); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + + beta_x*beta_z *0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + +beta_x*beta_y*0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x *0.5_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)); + } + ); +#else + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + +beta_y*beta_z*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +(beta_z*beta_z-1._rt)*0.5_rt*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k)); + } + ); +#endif + } + else // Staggered solver + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt) *inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i ,j ,k )) + + beta_x*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) + + phi_arr(i+1,j+1,k )-phi_arr(i+1,j-1,k )) + + beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) + + phi_arr(i+1,j ,k+1)-phi_arr(i+1,j ,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ey_arr(i,j,k) += + + beta_y*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) + + phi_arr(i+1,j+1,k )-phi_arr(i-1,j+1,k )) + +(beta_y*beta_y-1._rt) *inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j ,k )) + + beta_y*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k-1) + + phi_arr(i ,j+1,k+1)-phi_arr(i ,j+1,k-1)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + + beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k )-phi_arr(i-1,j ,k ) + + phi_arr(i+1,j ,k+1)-phi_arr(i-1,j ,k+1)) + + beta_z*beta_y*0.25_rt*inv_dy*(phi_arr(i ,j+1,k )-phi_arr(i ,j-1,k ) + + phi_arr(i ,j+1,k+1)-phi_arr(i ,j-1,k+1)) + +(beta_z*beta_z-1._rt) *inv_dz*(phi_arr(i ,j ,k+1)-phi_arr(i ,j ,k )); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ex_arr(i,j,k) += + +(beta_x*beta_x-1._rt)*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k)) + +beta_x*beta_z*0.25_rt*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j-1,k)); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +beta_z*beta_x*0.25_rt*inv_dx*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i-1,j+1,k)) + +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k)); + } + ); + ignore_unused(beta_y); +#else + amrex::ParallelFor( tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Ez_arr(i,j,k) += + +(beta_z*beta_z-1._rt)*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k)); + } + ); + ignore_unused(beta_x,beta_y); +#endif + } + } + } +} + + +void ElectrostaticSolver::computeB (amrex::Vector, 3> >& B, + const amrex::Vector >& phi, + std::array const beta ) const +{ + // return early if beta is 0 since there will be no B-field + if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } + + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + + const Real* dx = warpx.Geom(lev).CellSize(); + +#ifdef AMREX_USE_OMP +# pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*phi[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { +#if defined(WARPX_DIM_3D) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dy = 1._rt/dx[1]; + const Real inv_dz = 1._rt/dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const Real inv_dx = 1._rt/dx[0]; + const Real inv_dz = 1._rt/dx[1]; +#else + const Real inv_dz = 1._rt/dx[0]; +#endif + const amrex::IntVect bx_type = B[lev][0]->ixType().toIntVect(); + const amrex::IntVect by_type = B[lev][1]->ixType().toIntVect(); + const amrex::IntVect bz_type = B[lev][2]->ixType().toIntVect(); + + const amrex::Box& tbx = mfi.tilebox(bx_type); + const amrex::Box& tby = mfi.tilebox(by_type); + const amrex::Box& tbz = mfi.tilebox(bz_type); + + const auto& phi_arr = phi[lev]->array(mfi); + const auto& Bx_arr = (*B[lev][0])[mfi].array(); + const auto& By_arr = (*B[lev][1])[mfi].array(); + const auto& Bz_arr = (*B[lev][2])[mfi].array(); + + const Real beta_x = beta[0]; + const Real beta_y = beta[1]; + const Real beta_z = beta[2]; + + constexpr Real inv_c = 1._rt/PhysConst::c; + + // Calculate the magnetic field + // Use discretized derivative that matches the staggering of the grid. + // Nodal solver + if (bx_type == amrex::IntVect::TheNodeVector() && + by_type == amrex::IntVect::TheNodeVector() && + bz_type == amrex::IntVect::TheNodeVector()) + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k-1)) + +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j-1,k ))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i-1,j,k )) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k-1))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k)) + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k))); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j+1,k)-phi_arr(i,j-1,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i-1,j ,k)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j-1,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j,k)-phi_arr(i-1,j,k))); + } + ); +#else + amrex::ParallelFor( tbx, tby, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); + ignore_unused(beta_z,tbz,Bz_arr); +#endif + } + else // Staggered solver + { +#if defined(WARPX_DIM_3D) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*0.5_rt*(phi_arr(i,j ,k+1)-phi_arr(i,j ,k ) + + phi_arr(i,j+1,k+1)-phi_arr(i,j+1,k )) + +beta_z*inv_dy*0.5_rt*(phi_arr(i,j+1,k )-phi_arr(i,j ,k ) + + phi_arr(i,j+1,k+1)-phi_arr(i,j ,k+1))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j,k )-phi_arr(i ,j,k ) + + phi_arr(i+1,j,k+1)-phi_arr(i ,j,k+1)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j,k+1)-phi_arr(i ,j,k ) + + phi_arr(i+1,j,k+1)-phi_arr(i+1,j,k ))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + -beta_x*inv_dy*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k)) + +beta_y*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k))); + } + ); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::ParallelFor( tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i,j+1,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + -beta_z*inv_dx*0.5_rt*(phi_arr(i+1,j ,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i ,j+1,k)) + +beta_x*inv_dz*0.5_rt*(phi_arr(i ,j+1,k)-phi_arr(i ,j ,k) + + phi_arr(i+1,j+1,k)-phi_arr(i+1,j ,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bz_arr(i,j,k) += inv_c * ( + +beta_y*inv_dx*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); +#else + amrex::ParallelFor( tbx, tby, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Bx_arr(i,j,k) += inv_c * ( + -beta_y*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + By_arr(i,j,k) += inv_c * ( + +beta_x*inv_dz*(phi_arr(i+1,j,k)-phi_arr(i,j,k))); + } + ); + ignore_unused(beta_z,tbz,Bz_arr); +#endif + } + } + } +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H new file mode 100644 index 00000000000..00cd061e975 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H @@ -0,0 +1,15 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_ELECTROSTATICSOLVER_FWD_H +#define WARPX_ELECTROSTATICSOLVER_FWD_H + +class ElectrostaticSolver; + +#endif /* WARPX_ELECTROSTATICSOLVER_FWD_H */ diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H new file mode 100644 index 00000000000..7dc41f0a056 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H @@ -0,0 +1,42 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_LABFRAMEEXPLICITES_H_ +#define WARPX_LABFRAMEEXPLICITES_H_ + +#include "ElectrostaticSolver.H" + +class LabFrameExplicitES final : public ElectrostaticSolver +{ +public: + + LabFrameExplicitES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + } + + void InitData () override; + + void ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) override; + + void computePhiTriDiagonal ( + const amrex::Vector >& rho, + amrex::Vector >& phi + ); + +}; + +#endif // WARPX_LABFRAMEEXPLICITES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp new file mode 100644 index 00000000000..d14abd1848a --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -0,0 +1,256 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "LabFrameExplicitES.H" +#include "Fluids/MultiFluidContainer_fwd.H" +#include "EmbeddedBoundary/Enabled.H" +#include "Particles/MultiParticleContainer_fwd.H" +#include "Python/callbacks.H" +#include "WarpX.H" + +using namespace amrex; + +void LabFrameExplicitES::InitData() { + auto & warpx = WarpX::GetInstance(); + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); +} + +void LabFrameExplicitES::ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& /*Bfield_fp*/ +) { + mpc.DepositCharge(rho_fp, 0.0_rt); + if (mfl) { + const int lev = 0; + mfl->DepositCharge(lev, *rho_fp[lev]); + } + + auto & warpx = WarpX::GetInstance(); + for (int lev = 0; lev < num_levels; lev++) { + if (lev > 0) { + if (charge_buf[lev]) { + charge_buf[lev]->setVal(0.); + } + } + } + warpx.SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + +#ifndef WARPX_DIM_RZ + for (int lev = 0; lev < num_levels; lev++) { + // Reflect density over PEC boundaries, if needed. + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + } +#endif + // beta is zero in lab frame + // Todo: use simpler finite difference form with beta=0 + const std::array beta = {0._rt}; + + // set the boundary potentials appropriately + setPhiBC(phi_fp, warpx.gett_new(0)); + + // Compute the potential phi, by solving the Poisson equation + if (IsPythonCallbackInstalled("poissonsolver")) { + + // Use the Python level solver (user specified) + ExecutePythonCallback("poissonsolver"); + + } else { + +#if defined(WARPX_DIM_1D_Z) + // Use the tridiag solver with 1D + computePhiTriDiagonal(rho_fp, phi_fp); +#else + // Use the AMREX MLMG or the FFT (IGF) solver otherwise + computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); +#endif + + } + + // Compute the electric field. Note that if an EB is used the electric + // field will be calculated in the computePhi call. + if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } + else { + if (IsPythonCallbackInstalled("poissonsolver")) { computeE(Efield_fp, phi_fp, beta); } + } +} + +/* \brief Compute the potential by solving Poisson's equation with + a 1D tridiagonal solve. + + \param[in] rho The charge density a given species + \param[out] phi The potential to be computed by this function +*/ +void LabFrameExplicitES::computePhiTriDiagonal ( + const amrex::Vector >& rho, + amrex::Vector >& phi) +{ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(num_levels == 1, + "The tridiagonal solver cannot be used with mesh refinement"); + + const int lev = 0; + auto & warpx = WarpX::GetInstance(); + + const amrex::Real* dx = warpx.Geom(lev).CellSize(); + const amrex::Real xmin = warpx.Geom(lev).ProbLo(0); + const amrex::Real xmax = warpx.Geom(lev).ProbHi(0); + const int nx_full_domain = static_cast( (xmax - xmin)/dx[0] + 0.5_rt ); + + int nx_solve_min = 1; + int nx_solve_max = nx_full_domain - 1; + + auto field_boundary_lo0 = WarpX::field_boundary_lo[0]; + auto field_boundary_hi0 = WarpX::field_boundary_hi[0]; + if (field_boundary_lo0 == FieldBoundaryType::Neumann || field_boundary_lo0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the lower boundary + nx_solve_min = 0; + } + if (field_boundary_hi0 == FieldBoundaryType::Neumann || field_boundary_hi0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the upper boundary + nx_solve_max = nx_full_domain; + } + + // Create a 1-D MultiFab that covers all of x. + // The tridiag solve will be done in this MultiFab and then copied out afterwards. + const amrex::IntVect lo_full_domain(AMREX_D_DECL(0,0,0)); + const amrex::IntVect hi_full_domain(AMREX_D_DECL(nx_full_domain,0,0)); + const amrex::Box box_full_domain_node(lo_full_domain, hi_full_domain, amrex::IntVect::TheNodeVector()); + const BoxArray ba_full_domain_node(box_full_domain_node); + const amrex::Vector pmap = {0}; // The data will only be on processor 0 + const amrex::DistributionMapping dm_full_domain(pmap); + + // Put the data in the pinned arena since the tridiag solver will be done on the CPU, but have + // the data readily accessible from the GPU. + auto phi1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + auto zwork1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + auto rho1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, 1, 0, MFInfo().SetArena(The_Pinned_Arena())); + + if (field_boundary_lo0 == FieldBoundaryType::PEC || field_boundary_hi0 == FieldBoundaryType::PEC) { + // Copy from phi to get the boundary values + phi1d_mf.ParallelCopy(*phi[lev], 0, 0, 1); + } + rho1d_mf.ParallelCopy(*rho[lev], 0, 0, 1); + + // Multiplier on the charge density + const amrex::Real norm = dx[0]*dx[0]/PhysConst::ep0; + rho1d_mf.mult(norm); + + // Use the MFIter loop since when parallel, only process zero has a FAB. + // This skips the loop on all other processors. + for (MFIter mfi(phi1d_mf); mfi.isValid(); ++mfi) { + + const auto& phi1d_arr = phi1d_mf[mfi].array(); + const auto& zwork1d_arr = zwork1d_mf[mfi].array(); + const auto& rho1d_arr = rho1d_mf[mfi].array(); + + // The loops are always performed on the CPU + + amrex::Real diag = 2._rt; + + // The initial values depend on the boundary condition + if (field_boundary_lo0 == FieldBoundaryType::PEC) { + + phi1d_arr(1,0,0) = (phi1d_arr(0,0,0) + rho1d_arr(1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::Neumann) { + + // Neumann boundary condition + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 2._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } + + // Loop upward, calculating the Gaussian elimination multipliers and right hand sides + for (int i_up = 2 ; i_up < nx_solve_max ; i_up++) { + + zwork1d_arr(i_up,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(i_up,0,0); + phi1d_arr(i_up,0,0) = (rho1d_arr(i_up,0,0) - (-1._rt)*phi1d_arr(i_up-1,0,0))/diag; + + } + + // The last value depend on the boundary condition + amrex::Real zwork_product = 1.; // Needed for parallel boundaries + if (field_boundary_hi0 == FieldBoundaryType::PEC) { + + int const nxm1 = nx_full_domain - 1; + zwork1d_arr(nxm1,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(nxm1,0,0); + phi1d_arr(nxm1,0,0) = (phi1d_arr(nxm1+1,0,0) + rho1d_arr(nxm1,0,0) - (-1._rt)*phi1d_arr(nxm1-1,0,0))/diag; + + } else if (field_boundary_hi0 == FieldBoundaryType::Neumann) { + + // Neumann boundary condition + zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; + diag = 2._rt - 2._rt*zwork1d_arr(nx_full_domain,0,0); + if (diag == 0._rt) { + // This happens if the lower boundary is also Neumann. + // It this case, the potential is relative to an arbitrary constant, + // so set the upper boundary to zero to force a value. + phi1d_arr(nx_full_domain,0,0) = 0.; + } else { + phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(nx_full_domain,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; + } + + } else if (field_boundary_hi0 == FieldBoundaryType::Periodic) { + + zwork1d_arr(nx_full_domain,0,0) = 1._rt/diag; + + for (int i = 1 ; i <= nx_full_domain ; i++) { + zwork_product *= zwork1d_arr(i,0,0); + } + + diag = 2._rt - zwork1d_arr(nx_full_domain,0,0) - zwork_product; + // Note that rho1d_arr(0,0,0) is used to ensure that the same value is used + // on both boundaries. + phi1d_arr(nx_full_domain,0,0) = (rho1d_arr(0,0,0) - (-1._rt)*phi1d_arr(nx_full_domain-1,0,0))/diag; + + } + + // Loop downward to calculate the phi + if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + // With periodic, the right hand column adds an extra term for all rows + for (int i_down = nx_full_domain-1 ; i_down >= 0 ; i_down--) { + zwork_product /= zwork1d_arr(i_down+1,0,0); + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0) + zwork_product*phi1d_arr(nx_full_domain,0,0); + } + + } else { + + for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0); + } + + } + + } + + // Copy phi1d to phi + phi[lev]->ParallelCopy(phi1d_mf, 0, 0, 1); +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/Make.package b/Source/FieldSolver/ElectrostaticSolvers/Make.package new file mode 100644 index 00000000000..a1d2d78dbb0 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/Make.package @@ -0,0 +1,6 @@ +CEXE_sources += PoissonBoundaryHandler.cpp +CEXE_sources += LabFrameExplicitES.cpp +CEXE_sources += RelativisticExplicitES.cpp +CEXE_sources += ElectrostaticSolver.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers diff --git a/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H new file mode 100644 index 00000000000..04e427d7122 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.H @@ -0,0 +1,142 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_BOUNDARYHANDLER_H_ +#define WARPX_BOUNDARYHANDLER_H_ + +#include "Utils/Parser/ParserUtils.H" +#include "WarpX.H" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +class PoissonBoundaryHandler +{ +public: + PoissonBoundaryHandler (); // constructor + + /** Read runtime parameters. Called in constructor. */ + void ReadParameters (); + + /** + * \brief Read the input settings and set the boundary conditions used + * on each domain boundary for the Poisson solver. + */ + void DefinePhiBCs (const amrex::Geometry& geom); + + /** + * \brief Initialize amrex::Parser objects to get the boundary potential + * values at specified times. + */ + void BuildParsers (); + void BuildParsersEB (); + + /** + * \brief Sets the EB potential string and updates the function parser. + * + * \param [in] potential The string value of the potential + */ + void setPotentialEB(const std::string& potential) { + potential_eb_str = potential; + BuildParsersEB(); + } + + struct PhiCalculatorEB { + + amrex::Real t; + amrex::ParserExecutor<4> potential_eb; + + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real operator()(const amrex::Real x, const amrex::Real z) const noexcept { + using namespace amrex::literals; + return potential_eb(x, 0.0_rt, z, t); + } + + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real operator()(const amrex::Real x, const amrex::Real y, const amrex::Real z) const noexcept { + return potential_eb(x, y, z, t); + } + }; + + [[nodiscard]] PhiCalculatorEB + getPhiEB(amrex::Real t) const noexcept + { + return PhiCalculatorEB{t, potential_eb}; + } + + bool m_boundary_potential_specified = false; + + // set default potentials to zero in order for current tests to pass + // but forcing the user to specify a potential might be better + std::string potential_xlo_str = "0"; + std::string potential_xhi_str = "0"; + std::string potential_ylo_str = "0"; + std::string potential_yhi_str = "0"; + std::string potential_zlo_str = "0"; + std::string potential_zhi_str = "0"; + std::string potential_eb_str = "0"; + + amrex::ParserExecutor<1> potential_xlo; + amrex::ParserExecutor<1> potential_xhi; + amrex::ParserExecutor<1> potential_ylo; + amrex::ParserExecutor<1> potential_yhi; + amrex::ParserExecutor<1> potential_zlo; + amrex::ParserExecutor<1> potential_zhi; + amrex::ParserExecutor<1> potential_eb_t; + amrex::ParserExecutor<4> potential_eb; + + amrex::Array lobc, hibc; + std::array dirichlet_flag; + bool has_non_periodic = false; + bool phi_EB_only_t = true; + +private: + + amrex::Parser potential_xlo_parser; + amrex::Parser potential_xhi_parser; + amrex::Parser potential_ylo_parser; + amrex::Parser potential_yhi_parser; + amrex::Parser potential_zlo_parser; + amrex::Parser potential_zhi_parser; + amrex::Parser potential_eb_parser; +}; + +/** use amrex to directly calculate the electric field since with EB's the + * + * simple finite difference scheme in WarpX::computeE sometimes fails + */ +class EBCalcEfromPhiPerLevel { + private: + amrex::Vector< + amrex::Array + > m_e_field; + + public: + EBCalcEfromPhiPerLevel(amrex::Vector > e_field) + : m_e_field(std::move(e_field)) {} + + void operator()(amrex::MLMG & mlmg, int const lev) { + using namespace amrex::literals; + + mlmg.getGradSolution({m_e_field[lev]}); + for (auto &field: m_e_field[lev]) { + field->mult(-1._rt); + } + } +}; + +#endif // WARPX_BOUNDARYHANDLER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp new file mode 100644 index 00000000000..8afaf4c0587 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/PoissonBoundaryHandler.cpp @@ -0,0 +1,187 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#include "PoissonBoundaryHandler.H" + +using namespace amrex; + +PoissonBoundaryHandler::PoissonBoundaryHandler () +{ + ReadParameters(); + BuildParsers(); +} + +void PoissonBoundaryHandler::ReadParameters() +{ + // Parse the input file for domain boundary potentials + const ParmParse pp_boundary("boundary"); + + // Read potentials from input file + m_boundary_potential_specified |= pp_boundary.query("potential_lo_x", potential_xlo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_x", potential_xhi_str); + m_boundary_potential_specified |= pp_boundary.query("potential_lo_y", potential_ylo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_y", potential_yhi_str); + m_boundary_potential_specified |= pp_boundary.query("potential_lo_z", potential_zlo_str); + m_boundary_potential_specified |= pp_boundary.query("potential_hi_z", potential_zhi_str); + + const ParmParse pp_warpx("warpx"); + m_boundary_potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", potential_eb_str); + + if (m_boundary_potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { + ablastr::warn_manager::WMRecordWarning( + "Algorithms", + "The input script specifies the electric potential (phi) at the boundary, but \ + also uses the hybrid PIC solver based on Ohm’s law. When using this solver, the \ + electric potential does not have any impact on the simulation.", + ablastr::warn_manager::WarnPriority::low); + } + else if (m_boundary_potential_specified & (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None)) { + ablastr::warn_manager::WMRecordWarning( + "Algorithms", + "The input script specifies the electric potential (phi) at the boundary so \ + an initial Poisson solve will be performed.", + ablastr::warn_manager::WarnPriority::low); + } +} + +void PoissonBoundaryHandler::DefinePhiBCs (const amrex::Geometry& geom) +{ +#ifdef WARPX_DIM_RZ + if (geom.ProbLo(0) == 0){ + lobc[0] = LinOpBCType::Neumann; + dirichlet_flag[0] = false; + + // handle the r_max boundary explicitly + if (WarpX::field_boundary_hi[0] == FieldBoundaryType::PEC) { + hibc[0] = LinOpBCType::Dirichlet; + dirichlet_flag[1] = true; + } + else if (WarpX::field_boundary_hi[0] == FieldBoundaryType::Neumann) { + hibc[0] = LinOpBCType::Neumann; + dirichlet_flag[1] = false; + } + else { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, + "Field boundary condition at the outer radius must be either PEC or neumann " + "when using the electrostatic solver" + ); + } + } + const int dim_start = 1; +#else + const int dim_start = 0; + amrex::ignore_unused(geom); +#endif + for (int idim=dim_start; idim(); + potential_xhi = potential_xhi_parser.compile<1>(); + potential_ylo = potential_ylo_parser.compile<1>(); + potential_yhi = potential_yhi_parser.compile<1>(); + potential_zlo = potential_zlo_parser.compile<1>(); + potential_zhi = potential_zhi_parser.compile<1>(); + + BuildParsersEB(); +} + +void PoissonBoundaryHandler::BuildParsersEB () +{ + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"x", "y", "z", "t"}); + + // check if the EB potential is a function of space or only of time + const std::set eb_symbols = potential_eb_parser.symbols(); + if ((eb_symbols.count("x") != 0) || (eb_symbols.count("y") != 0) + || (eb_symbols.count("z") != 0)) { + potential_eb = potential_eb_parser.compile<4>(); + phi_EB_only_t = false; + } + else { + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"t"}); + potential_eb_t = potential_eb_parser.compile<1>(); + } +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H new file mode 100644 index 00000000000..70382d7ced5 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H @@ -0,0 +1,84 @@ + +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_RELATIVISTICEXPLICITES_H_ +#define WARPX_RELATIVISTICEXPLICITES_H_ + +#include "ElectrostaticSolver.H" +#include "Particles/WarpXParticleContainer.H" + + +class RelativisticExplicitES final : public ElectrostaticSolver +{ +public: + + RelativisticExplicitES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + } + + void InitData () override; + + /** + * \brief Computes electrostatic fields for species + * that have initialize self fields turned on. + * A loop over all the species is performed and for each species (with self fields) + * the function ``AddSpaceChargeField`` is called. + * This function computes the electrostatic potential for species charge denisyt as source + * and then the electric and magnetic fields are updated to include the + * corresponding fields from the electrostatic potential. + * Then electric and magnetic fields are updated to include potential variation + * due to boundary conditions using the function ``AddBoundaryField`` + * + * \param[in,unused] rho_fp A temporary multifab is used for species charge density + * \param[in,unused] rho_cp A temporary multifab is used to store species charge density on coarse patch + * \param[in] charge_buf Buffer region to synchronize charge density from fine and coarse patch + * \param[in,unused] phi_fp A temporary multifab is used to compute electrostatic potentail for each species + * \param[in] mpc Pointer to multi particle container to access species data + * \param[in,out] Efield_fp Field contribution from phi computed from each species' charge density is added + * \param[in,out] Bfield Field contribution from phi computed from each species' charge density is added + */ + void ComputeSpaceChargeField ( + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, + [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp + ) override; + + /** + * Compute the charge density of the species paricle container, pc, + * and obtain the corresponding electrostatic potential to + * update the electric and magnetic fields. + * \param[in] charge_buf Multifab that stores buffer region to + * synchronize charge density on fine and coarse patch + * \param[in] pc particle container for the selected species + * \param[in] Efield Efield updated to include potential computed for selected species charge density as source + * \param[in] Bfield Bfield updated to include potential computed for selected species charge density as source + */ + void AddSpaceChargeField ( + amrex::Vector >& charge_buf, + WarpXParticleContainer& pc, + amrex::Vector, 3>>& Efield, + amrex::Vector, 3>>& Bfield + ); + + /** Compute the potential `phi` by solving the Poisson equation with the + simulation specific boundary conditions and boundary values, then add the + E field due to that `phi` to `Efield_fp`. + * \param[in] Efield Efield updated to include potential gradient from boundary condition + */ + void AddBoundaryField ( + amrex::Vector, 3>>& Efield + ); +}; + +#endif // WARPX_RELATIVISTICEXPLICITES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp new file mode 100644 index 00000000000..1660efd48c2 --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp @@ -0,0 +1,168 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "WarpX.H" + +#include "RelativisticExplicitES.H" + +#include "Particles/MultiParticleContainer.H" +#include "Particles/WarpXParticleContainer.H" + +using namespace amrex; + +void RelativisticExplicitES::InitData () { + auto & warpx = WarpX::GetInstance(); + bool prepare_field_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + // check if any of the particle containers have initialize_self_fields = True + for (auto const& species : warpx.GetPartContainer()) { + prepare_field_solve |= species->initialize_self_fields; + } + prepare_field_solve |= m_poisson_boundary_handler->m_boundary_potential_specified; + + if (prepare_field_solve) { + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); + } +} + +void RelativisticExplicitES::ComputeSpaceChargeField ( + amrex::Vector< std::unique_ptr >& rho_fp, + amrex::Vector< std::unique_ptr >& rho_cp, + amrex::Vector< std::unique_ptr >& charge_buf, + amrex::Vector< std::unique_ptr >& phi_fp, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, + amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp +) { + WARPX_PROFILE("RelativisticExplicitES::ComputeSpaceChargeField"); + amrex::ignore_unused(rho_fp, rho_cp, phi_fp, mfl); + + const bool always_run_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + + // Loop over the species and add their space-charge contribution to E and B. + // Note that the fields calculated here does not include the E field + // due to simulation boundary potentials + for (auto const& species : mpc) { + if (always_run_solve || (species->initialize_self_fields)) { + AddSpaceChargeField(charge_buf, *species, Efield_fp, Bfield_fp); + } + } + + // Add the field due to the boundary potentials + if (always_run_solve || (m_poisson_boundary_handler->m_boundary_potential_specified)) + { + AddBoundaryField(Efield_fp); + } +} + +void RelativisticExplicitES::AddSpaceChargeField ( + amrex::Vector >& charge_buf, + WarpXParticleContainer& pc, + amrex::Vector, 3>>& Efield_fp, + amrex::Vector, 3>>& Bfield_fp) +{ + WARPX_PROFILE("RelativisticExplicitES::AddSpaceChargeField"); + + if (pc.getCharge() == 0) { return; } + +#ifdef WARPX_DIM_RZ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes == 1, + "Error: RZ electrostatic only implemented for a single mode"); +#endif + + auto & warpx = WarpX::GetInstance(); + + // Allocate fields for charge and potential + Vector > rho(num_levels); + Vector > rho_coarse(num_levels); // Used in order to interpolate between levels + Vector > phi(num_levels); + // Use number of guard cells used for local deposition of rho + const amrex::IntVect ng = warpx.get_ng_depos_rho(); + for (int lev = 0; lev < num_levels; lev++) { + BoxArray nba = warpx.boxArray(lev); + nba.surroundingNodes(); + rho[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, ng); + rho[lev]->setVal(0.); + phi[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, 1); + phi[lev]->setVal(0.); + if (lev > 0) { + // For MR levels: allocated the coarsened version of rho + BoxArray cba = nba; + cba.coarsen(warpx.refRatio(lev-1)); + rho_coarse[lev] = std::make_unique(cba, warpx.DistributionMap(lev), 1, ng); + rho_coarse[lev]->setVal(0.); + if (charge_buf[lev]) { + charge_buf[lev]->setVal(0.); + } + } + } + // Deposit particle charge density (source of Poisson solver) + // The options below are identical to those in MultiParticleContainer::DepositCharge + bool const local = true; + bool const reset = false; + bool const apply_boundary_and_scale_volume = true; + bool const interpolate_across_levels = false; + if ( !pc.do_not_deposit) { + pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, + interpolate_across_levels); + } + warpx.SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + + // Get the particle beta vector + bool const local_average = false; // Average across all MPI ranks + std::array beta_pr = pc.meanParticleVelocity(local_average); + std::array beta; + for (int i=0 ; i < static_cast(beta.size()) ; i++) { + beta[i] = beta_pr[i]/PhysConst::c; // Normalize + } + + // Compute the potential phi, by solving the Poisson equation + computePhi( rho, phi, beta, pc.self_fields_required_precision, + pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, + pc.self_fields_verbosity ); + + // Compute the corresponding electric and magnetic field, from the potential phi + computeE( Efield_fp, phi, beta ); + computeB( Bfield_fp, phi, beta ); + +} + +void RelativisticExplicitES::AddBoundaryField (amrex::Vector, 3>>& Efield_fp) +{ + WARPX_PROFILE("RelativisticExplicitES::AddBoundaryField"); + + auto & warpx = WarpX::GetInstance(); + + // Allocate fields for charge and potential + amrex::Vector > rho(num_levels); + amrex::Vector > phi(num_levels); + // Use number of guard cells used for local deposition of rho + const amrex::IntVect ng = warpx.get_ng_depos_rho(); + for (int lev = 0; lev < num_levels; lev++) { + BoxArray nba = warpx.boxArray(lev); + nba.surroundingNodes(); + rho[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, ng); + rho[lev]->setVal(0.); + phi[lev] = std::make_unique(nba, warpx.DistributionMap(lev), 1, 1); + phi[lev]->setVal(0.); + } + + // Set the boundary potentials appropriately + setPhiBC(phi, warpx.gett_new(0)); + + // beta is zero for boundaries + const std::array beta = {0._rt}; + + // Compute the potential phi, by solving the Poisson equation + computePhi( rho, phi, beta, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity ); + + // Compute the corresponding electric field, from the potential phi. + computeE( Efield_fp, phi, beta ); +} diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 4ae988b9d10..031bc915afc 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -116,7 +116,12 @@ WarpX::AddMagnetostaticFieldLabFrame() WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsPythonCallbackInstalled("poissonsolver"), "Python Level Poisson Solve not supported for Magnetostatic implementation."); - const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; + // const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; + // temporary fix!!! + const amrex::Real magnetostatic_absolute_tolerance = 0.0; + const amrex::Real self_fields_required_precision = 1e-12; + const int self_fields_max_iters = 200; + const int self_fields_verbosity = 2; computeVectorPotential( current_fp, vector_potential_fp_nodal, self_fields_required_precision, magnetostatic_absolute_tolerance, self_fields_max_iters, diff --git a/Source/FieldSolver/Make.package b/Source/FieldSolver/Make.package index a8af4c2de97..4c7c41f6f8e 100644 --- a/Source/FieldSolver/Make.package +++ b/Source/FieldSolver/Make.package @@ -2,9 +2,11 @@ CEXE_sources += WarpXPushFieldsEM.cpp CEXE_sources += WarpXPushFieldsHybridPIC.cpp CEXE_sources += ElectrostaticSolver.cpp CEXE_sources += WarpX_QED_Field_Pushers.cpp +CEXE_sources += WarpXSolveFieldsES.cpp ifeq ($(USE_FFT),TRUE) include $(WARPX_HOME)/Source/FieldSolver/SpectralSolver/Make.package endif +include $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers/Make.package include $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/Make.package include $(WARPX_HOME)/Source/FieldSolver/MagnetostaticSolver/Make.package include $(WARPX_HOME)/Source/FieldSolver/ImplicitSolvers/Make.package diff --git a/Source/FieldSolver/WarpXSolveFieldsES.cpp b/Source/FieldSolver/WarpXSolveFieldsES.cpp new file mode 100644 index 00000000000..42a537b5c2a --- /dev/null +++ b/Source/FieldSolver/WarpXSolveFieldsES.cpp @@ -0,0 +1,30 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Remi Lehe, Roelof Groenewald, Arianna Formenti, Revathi Jambunathan + * + * License: BSD-3-Clause-LBNL + */ +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" +#include "Utils/WarpXProfilerWrapper.H" +#include "WarpX.H" + +void WarpX::ComputeSpaceChargeField (bool const reset_fields) +{ + WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); + if (reset_fields) { + // Reset all E and B fields to 0, before calculating space-charge fields + WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); + for (int lev = 0; lev <= max_level; lev++) { + for (int comp=0; comp<3; comp++) { + Efield_fp[lev][comp]->setVal(0); + Bfield_fp[lev][comp]->setVal(0); + } + } + } + + m_electrostatic_solver->ComputeSpaceChargeField( + rho_fp, rho_cp, charge_buf, phi_fp, *mypc, myfl.get(), Efield_fp, Bfield_fp + ); +} diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 49b0d439c50..0cf9496e63e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -18,6 +18,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Filter/BilinearFilter.H" @@ -551,6 +552,8 @@ WarpX::InitData () ); } + m_electrostatic_solver->InitData(); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { m_hybrid_pic_model->InitData(); } diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index e583d42c49c..2689b3115fa 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -36,7 +36,7 @@ #include #include #include - +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include #include #include @@ -176,13 +176,13 @@ The physical fields in WarpX have the following naming: std::string potential_lo_y, std::string potential_hi_y, std::string potential_lo_z, std::string potential_hi_z) { - if (potential_lo_x != "") wx.m_poisson_boundary_handler.potential_xlo_str = potential_lo_x; - if (potential_hi_x != "") wx.m_poisson_boundary_handler.potential_xhi_str = potential_hi_x; - if (potential_lo_y != "") wx.m_poisson_boundary_handler.potential_ylo_str = potential_lo_y; - if (potential_hi_y != "") wx.m_poisson_boundary_handler.potential_yhi_str = potential_hi_y; - if (potential_lo_z != "") wx.m_poisson_boundary_handler.potential_zlo_str = potential_lo_z; - if (potential_hi_z != "") wx.m_poisson_boundary_handler.potential_zhi_str = potential_hi_z; - wx.m_poisson_boundary_handler.buildParsers(); + if (potential_lo_x != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_xlo_str = potential_lo_x; + if (potential_hi_x != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_xhi_str = potential_hi_x; + if (potential_lo_y != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_ylo_str = potential_lo_y; + if (potential_hi_y != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_yhi_str = potential_hi_y; + if (potential_lo_z != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_zlo_str = potential_lo_z; + if (potential_hi_z != "") wx.GetElectrostaticSolver().m_poisson_boundary_handler->potential_zhi_str = potential_hi_z; + wx.GetElectrostaticSolver().m_poisson_boundary_handler->BuildParsers(); }, py::arg("potential_lo_x") = "", py::arg("potential_hi_x") = "", @@ -194,7 +194,7 @@ The physical fields in WarpX have the following naming: ) .def("set_potential_on_eb", [](WarpX& wx, std::string potential) { - wx.m_poisson_boundary_handler.setPotentialEB(potential); + wx.GetElectrostaticSolver().m_poisson_boundary_handler->setPotentialEB(potential); }, py::arg("potential"), "Sets the EB potential string and updates the function parser." diff --git a/Source/WarpX.H b/Source/WarpX.H index a89ffe20573..28bb6215a45 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -16,6 +16,7 @@ #include "Diagnostics/MultiDiagnostics_fwd.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H" #include "EmbeddedBoundary/WarpXFaceInfoBox_fwd.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel_fwd.H" @@ -39,7 +40,6 @@ #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" #include "FieldSolver/Fields.H" -#include "FieldSolver/ElectrostaticSolver.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "FieldSolver/ImplicitSolvers/ImplicitSolver.H" #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" @@ -142,6 +142,7 @@ public: MultiParticleContainer& GetPartContainer () { return *mypc; } MultiFluidContainer& GetFluidContainer () { return *myfl; } MacroscopicProperties& GetMacroscopicProperties () { return *m_macroscopic_properties; } + ElectrostaticSolver& GetElectrostaticSolver () {return *m_electrostatic_solver;} HybridPICModel& GetHybridPICModel () { return *m_hybrid_pic_model; } [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} @@ -957,12 +958,6 @@ public: static inline auto electrostatic_solver_id = ElectrostaticSolverAlgo::Default; static inline auto poisson_solver_id = PoissonSolverAlgo::Default; - // Parameters for lab frame electrostatic - static amrex::Real self_fields_required_precision; - static amrex::Real self_fields_absolute_tolerance; - static int self_fields_max_iters; - static int self_fields_verbosity; - static int do_moving_window; // boolean static int start_moving_window_step; // the first step to move window static int end_moving_window_step; // the last step to move window @@ -1009,31 +1004,8 @@ public: */ [[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;} - /** Enable embedded boundaries */ - bool m_boundary_potential_specified = false; - ElectrostaticSolver::PoissonBoundaryHandler m_poisson_boundary_handler; + /** Electrostatic solve call */ void ComputeSpaceChargeField (bool reset_fields); - void AddBoundaryField (); - void AddSpaceChargeField (WarpXParticleContainer& pc); - void AddSpaceChargeFieldLabFrame (); - void computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array beta = {{0,0,0}}, - amrex::Real required_precision=amrex::Real(1.e-11), - amrex::Real absolute_tolerance=amrex::Real(0.0), - int max_iters=200, - int verbosity=2) const; - - void setPhiBC (amrex::Vector >& phi ) const; - - void computeE (amrex::Vector, 3> >& E, - const amrex::Vector >& phi, - std::array beta = {{0,0,0}} ) const; - void computeB (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array beta = {{0,0,0}} ) const; - void computePhiTriDiagonal (const amrex::Vector >& rho, - amrex::Vector >& phi) const; // Magnetostatic Solver Interface MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; @@ -1676,6 +1648,9 @@ private: // Macroscopic properties std::unique_ptr m_macroscopic_properties; + // Electrostatic solver + std::unique_ptr m_electrostatic_solver; + // Hybrid PIC algorithm parameters std::unique_ptr m_hybrid_pic_model; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 2e9befba992..ef1668de4c0 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -16,6 +16,9 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" +#include "FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H" +#include "FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -174,11 +177,6 @@ amrex::IntVect WarpX::sort_idx_type(AMREX_D_DECL(0,0,0)); bool WarpX::do_dynamic_scheduling = true; -Real WarpX::self_fields_required_precision = 1.e-11_rt; -Real WarpX::self_fields_absolute_tolerance = 0.0_rt; -int WarpX::self_fields_max_iters = 200; -int WarpX::self_fields_verbosity = 2; - bool WarpX::do_subcycling = false; bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; @@ -356,6 +354,17 @@ WarpX::WarpX () current_fp_vay.resize(nlevs_max); } + // Create Electrostatic Solver object if needed + if ((WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) + || (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic)) + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } + else + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { // Create hybrid-PIC model object if needed @@ -733,20 +742,6 @@ WarpX::ReadParameters () electromagnetic_solver_id = ElectromagneticSolverAlgo::None; } - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - // Note that with the relativistic version, these parameters would be - // input for each species. - utils::parser::queryWithParser( - pp_warpx, "self_fields_required_precision", self_fields_required_precision); - utils::parser::queryWithParser( - pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); - utils::parser::queryWithParser( - pp_warpx, "self_fields_max_iters", self_fields_max_iters); - pp_warpx.query("self_fields_verbosity", self_fields_verbosity); - } - pp_warpx.query_enum_sloppy("poisson_solver", poisson_solver_id, "-_"); #ifndef WARPX_DIM_3D WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -767,7 +762,7 @@ WarpX::ReadParameters () "The FFT Poisson solver is not implemented in labframe-electromagnetostatic mode yet." ); - bool const eb_enabled = EB::enabled(); + [[maybe_unused]] bool const eb_enabled = EB::enabled(); #if !defined(AMREX_USE_EB) WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !eb_enabled, @@ -775,38 +770,8 @@ WarpX::ReadParameters () ); #endif - // Parse the input file for domain boundary potentials - const ParmParse pp_boundary("boundary"); - bool potential_specified = false; - // When reading the potential at the boundary from the input file, set this flag to true if any of the potential is specified - potential_specified |= pp_boundary.query("potential_lo_x", m_poisson_boundary_handler.potential_xlo_str); - potential_specified |= pp_boundary.query("potential_hi_x", m_poisson_boundary_handler.potential_xhi_str); - potential_specified |= pp_boundary.query("potential_lo_y", m_poisson_boundary_handler.potential_ylo_str); - potential_specified |= pp_boundary.query("potential_hi_y", m_poisson_boundary_handler.potential_yhi_str); - potential_specified |= pp_boundary.query("potential_lo_z", m_poisson_boundary_handler.potential_zlo_str); - potential_specified |= pp_boundary.query("potential_hi_z", m_poisson_boundary_handler.potential_zhi_str); - if (eb_enabled) { - potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); - } - m_boundary_potential_specified = potential_specified; - if (potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { - ablastr::warn_manager::WMRecordWarning( - "Algorithms", - "The input script specifies the electric potential (phi) at the boundary, but \ - also uses the hybrid PIC solver based on Ohm’s law. When using this solver, the \ - electric potential does not have any impact on the simulation.", - ablastr::warn_manager::WarnPriority::low); - } - else if (potential_specified & (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None)) { - ablastr::warn_manager::WMRecordWarning( - "Algorithms", - "The input script specifies the electric potential (phi) at the boundary so \ - an initial Poisson solve will be performed.", - ablastr::warn_manager::WarnPriority::low); - } - - m_poisson_boundary_handler.buildParsers(); #ifdef WARPX_DIM_RZ + const ParmParse pp_boundary("boundary"); pp_boundary.query("verboncoeur_axis_correction", verboncoeur_axis_correction); #endif diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index fbd8e5f14be..c36c83bc336 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -57,6 +57,101 @@ namespace ablastr::fields { +/** Compute the L-infinity norm of the charge density `rho` across all MR levels + * to determine if `rho` is zero everywhere + * + * \param[in] rho The charge density a given species + * \param[in] finest_level The most refined mesh refinement level + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[out] max_norm_rho The maximum L-infinity norm of `rho` across all levels + */ +inline amrex::Real getMaxNormRho ( + amrex::Vector const & rho, + int finest_level, + amrex::Real & absolute_tolerance) +{ + amrex::Real max_norm_rho = 0.0; + for (int lev=0; lev<=finest_level; lev++) { + max_norm_rho = amrex::max(max_norm_rho, rho[lev]->norm0()); + } + amrex::ParallelDescriptor::ReduceRealMax(max_norm_rho); + + if (max_norm_rho == 0) { + if (absolute_tolerance == 0.0) { absolute_tolerance = amrex::Real(1e-6); } + ablastr::warn_manager::WMRecordWarning( + "ElectrostaticSolver", + "Max norm of rho is 0", + ablastr::warn_manager::WarnPriority::low + ); + } + return max_norm_rho; +} + +/** Interpolate the potential `phi` from level `lev` to `lev+1` + * + * Needed to solve Poisson equation on `lev+1` + * The coarser level `lev` provides both + * the boundary values and initial guess for `phi` + * on the finer level `lev+1` + * + * \param[in] phi_lev The potential on a given mesh refinement level `lev` + * \param[in] phi_lev_plus_one The potential on the next level `lev+1` + * \param[in] geom_lev The geometry of level `lev` + * \param[in] do_single_precision_comms perform communications in single precision + * \param[in] refratio mesh refinement ratio between level `lev` and `lev+1` + * \param[in] ncomp Number of components of the multifab (1) + * \param[in] ng Number of ghost cells (1 if collocated, 0 otherwise) + */ +inline void interpolatePhiBetweenLevels ( + amrex::MultiFab const* phi_lev, + amrex::MultiFab* phi_lev_plus_one, + amrex::Geometry const & geom_lev, + bool do_single_precision_comms, + const amrex::IntVect& refratio, + const int ncomp, + const int ng) +{ + using namespace amrex::literals; + + // Allocate phi_cp for lev+1 + amrex::BoxArray ba = phi_lev_plus_one->boxArray(); + ba.coarsen(refratio); + amrex::MultiFab phi_cp(ba, phi_lev_plus_one->DistributionMap(), ncomp, ng); + if (ng > 0) { + // Set all values outside the domain to zero + phi_cp.setDomainBndry(0.0_rt, geom_lev); + } + + // Copy from phi[lev] to phi_cp (in parallel) + const amrex::Periodicity& crse_period = geom_lev.periodicity(); + + ablastr::utils::communication::ParallelCopy( + phi_cp, + *phi_lev, + 0, + 0, + 1, + amrex::IntVect(0), + amrex::IntVect(ng), + do_single_precision_comms, + crse_period + ); + + // Local interpolation from phi_cp to phi[lev+1] +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*phi_lev_plus_one, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + amrex::Array4 const phi_fp_arr = phi_lev_plus_one->array(mfi); + amrex::Array4 const phi_cp_arr = phi_cp.array(mfi); + + details::PoissonInterpCPtoFP const interp(phi_fp_arr, phi_cp_arr, refratio); + + amrex::Box const& b = mfi.growntilebox(ng); + amrex::ParallelFor(b, interp); + } +} + /** Compute the potential `phi` by solving the Poisson equation * * Uses `rho` as a source, assuming that the source moves at a @@ -121,8 +216,10 @@ computePhi (amrex::Vector const & rho, ABLASTR_PROFILE("computePhi"); + auto const finest_level = static_cast(rho.size() - 1); + if (!rel_ref_ratio.has_value()) { - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(rho.size() == 1u, + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(finest_level == 0u, "rel_ref_ratio must be set if mesh-refinement is used"); rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } @@ -132,30 +229,18 @@ computePhi (amrex::Vector const & rho, "Embedded boundary solve requested but not compiled in"); #endif - auto const finest_level = static_cast(rho.size() - 1); - - // determine if rho is zero everywhere - amrex::Real max_norm_b = 0.0; - for (int lev=0; lev<=finest_level; lev++) { - max_norm_b = amrex::max(max_norm_b, rho[lev]->norm0()); - } - amrex::ParallelDescriptor::ReduceRealMax(max_norm_b); - - const bool always_use_bnorm = (max_norm_b > 0); - if (!always_use_bnorm) { - if (absolute_tolerance == 0.0) { absolute_tolerance = amrex::Real(1e-6); } - ablastr::warn_manager::WMRecordWarning( - "ElectrostaticSolver", - "Max norm of rho is 0", - ablastr::warn_manager::WarnPriority::low - ); - } +#if !defined(ABLASTR_USE_FFT) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "Must compile with FFT support to use the IGF solver!"); +#endif - amrex::LPInfo info; +#if !defined(WARPX_DIM_3D) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "The FFT Poisson solver is currently only implemented for 3D!"); +#endif - for (int lev=0; lev<=finest_level; lev++) { - // Set the value of beta - amrex::Array beta_solver = + // Set the value of beta + amrex::Array beta_solver = #if defined(WARPX_DIM_1D_Z) {{ beta[2] }}; // beta_x and beta_z #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) @@ -164,36 +249,31 @@ computePhi (amrex::Vector const & rho, {{ beta[0], beta[1], beta[2] }}; #endif -#if !defined(ABLASTR_USE_FFT) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "Must compile with FFT support to use the IGF solver!"); -#endif + // determine if rho is zero everywhere + const amrex::Real max_norm_b = getMaxNormRho(rho, finest_level, absolute_tolerance); -#if !defined(WARPX_DIM_3D) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "The FFT Poisson solver is currently only implemented for 3D!"); -#endif + amrex::LPInfo info; + + for (int lev=0; lev<=finest_level; lev++) { + amrex::Array const dx_scaled + {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), + geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), + geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; #if (defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D)) // Use the Integrated Green Function solver (FFT) on the coarsest level if it was selected if(is_solver_igf_on_lev0 && lev==0){ - amrex::Array const dx_igf - {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), - geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), - geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; if ( max_norm_b == 0 ) { phi[lev]->setVal(0); } else { - computePhiIGF( *rho[lev], *phi[lev], dx_igf, grids[lev] ); + computePhiIGF( *rho[lev], *phi[lev], dx_scaled, grids[lev] ); } continue; } #endif - // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately - using namespace ablastr::constant::SI; - rho[lev]->mult(-1._rt / ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! #ifdef WARPX_DIM_RZ constexpr bool is_rz = true; @@ -203,10 +283,6 @@ computePhi (amrex::Vector const & rho, if (!eb_enabled && !is_rz) { // Determine whether to use semi-coarsening - amrex::Array dx_scaled - {AMREX_D_DECL(geom[lev].CellSize(0) / std::sqrt(1._rt - beta_solver[0] * beta_solver[0]), - geom[lev].CellSize(1) / std::sqrt(1._rt - beta_solver[1] * beta_solver[1]), - geom[lev].CellSize(2) / std::sqrt(1._rt - beta_solver[2] * beta_solver[2]))}; int max_semicoarsening_level = 0; int semicoarsening_direction = -1; const auto min_dir = static_cast(std::distance(dx_scaled.begin(), @@ -263,7 +339,6 @@ computePhi (amrex::Vector const & rho, 1._rt-beta_solver[1]*beta_solver[1], 1._rt-beta_solver[2]*beta_solver[2])}); #endif - #if defined(AMREX_USE_EB) if (eb_enabled) { // if the EB potential only depends on time, the potential can be passed @@ -295,8 +370,10 @@ computePhi (amrex::Vector const & rho, amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); - mlmg.setAlwaysUseBNorm(always_use_bnorm); - if (grid_type == utils::enums::GridType::Collocated) { + mlmg.setAlwaysUseBNorm((max_norm_b > 0)); + + const int ng = int(grid_type == utils::enums::GridType::Collocated); // ghost cells + if (ng) { // In this case, computeE needs to use ghost nodes data. So we // ask MLMG to fill BC for us after it solves the problem. mlmg.setFinalFillBC(true); @@ -306,54 +383,22 @@ computePhi (amrex::Vector const & rho, mlmg.solve( {phi[lev]}, {rho[lev]}, relative_tolerance, absolute_tolerance ); + const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; + const int ncomp = linop->getNComp(); + // needed for solving the levels by levels: // - coarser level is initial guess for finer level // - coarser level provides boundary values for finer level patch // Interpolation from phi[lev] to phi[lev+1] // (This provides both the boundary conditions and initial guess for phi[lev+1]) if (lev < finest_level) { - - // Allocate phi_cp for lev+1 - amrex::BoxArray ba = phi[lev+1]->boxArray(); - const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; - ba.coarsen(refratio); - const int ncomp = linop->getNComp(); - const int ng = (grid_type == utils::enums::GridType::Collocated) ? 1 : 0; - amrex::MultiFab phi_cp(ba, phi[lev+1]->DistributionMap(), ncomp, ng); - if (ng > 0) { - // Set all values outside the domain to zero - phi_cp.setDomainBndry(0.0_rt, geom[lev]); - } - - // Copy from phi[lev] to phi_cp (in parallel) - const amrex::Periodicity& crse_period = geom[lev].periodicity(); - - ablastr::utils::communication::ParallelCopy( - phi_cp, - *phi[lev], - 0, - 0, - 1, - amrex::IntVect(0), - amrex::IntVect(ng), - do_single_precision_comms, - crse_period - ); - - // Local interpolation from phi_cp to phi[lev+1] -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for (amrex::MFIter mfi(*phi[lev + 1], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Array4 const phi_fp_arr = phi[lev + 1]->array(mfi); - amrex::Array4 const phi_cp_arr = phi_cp.array(mfi); - - details::PoissonInterpCPtoFP const interp(phi_fp_arr, phi_cp_arr, refratio); - - amrex::Box const& b = mfi.growntilebox(ng); - amrex::ParallelFor(b, interp); - } - + interpolatePhiBetweenLevels(phi[lev], + phi[lev+1], + geom[lev], + do_single_precision_comms, + refratio, + ncomp, + ng); } // Run additional operations, such as calculation of the E field for embedded boundaries @@ -362,10 +407,8 @@ computePhi (amrex::Vector const & rho, post_phi_calculation.value()(mlmg, lev); } } - } // loop over lev(els) -} - +} // computePhi } // namespace ablastr::fields #endif // ABLASTR_POISSON_SOLVER_H From 3c5ebd332bca524839cf6924aa4c7ac3edc33319 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 13 Sep 2024 20:28:26 -0700 Subject: [PATCH 51/91] Add base 2D Langmuir test, fix broken docs link (#5271) --- Examples/Tests/langmuir/CMakeLists.txt | 10 +++++++ .../langmuir/inputs_test_2d_langmuir_multi | 7 +++++ .../test_2d_langmuir_multi.json | 29 +++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 Examples/Tests/langmuir/inputs_test_2d_langmuir_multi create mode 100644 Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt index bd0cea79c7a..b259083c695 100644 --- a/Examples/Tests/langmuir/CMakeLists.txt +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -11,6 +11,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_2d_langmuir_multi # name + 2 # dims + 2 # nprocs + inputs_test_2d_langmuir_multi # inputs + analysis_2d.py # analysis + diags/diag1000080 # output + OFF # dependency +) + add_warpx_test( test_2d_langmuir_multi_mr # name 2 # dims diff --git a/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi new file mode 100644 index 00000000000..df0189acd91 --- /dev/null +++ b/Examples/Tests/langmuir/inputs_test_2d_langmuir_multi @@ -0,0 +1,7 @@ +# base input parameters +FILE = inputs_base_2d + +# test input parameters +algo.current_deposition = direct +diag1.electrons.variables = x z w ux uy uz +diag1.positrons.variables = x z w ux uy uz diff --git a/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json new file mode 100644 index 00000000000..899352c45ba --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json @@ -0,0 +1,29 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 5.726296856755232, + "Bz": 0.0, + "Ex": 3751589134191.326, + "Ey": 0.0, + "Ez": 3751589134191.332, + "jx": 1.0100623329922576e+16, + "jy": 0.0, + "jz": 1.0100623329922578e+16 + }, + "electrons": { + "particle_momentum_x": 5.668407513430198e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668407513430198e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.65536, + "particle_weight": 3200000000000000.5 + }, + "positrons": { + "particle_momentum_x": 5.668407513430198e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668407513430198e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.65536, + "particle_weight": 3200000000000000.5 + } +} From b3f759b6e811fdc248343a8b36f2a8fedbdcdabe Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 16 Sep 2024 16:19:15 -0700 Subject: [PATCH 52/91] For flux injection, improve the calculation of number of particles per cell (#5272) * Improve the calculation of number of particles per cell * Up test_rz_flux_injection benchmark This is needed since the change alters the calling of the random number --- .../test_rz_flux_injection.json | 23 ++++++++++--------- .../Particles/PhysicalParticleContainer.cpp | 5 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json index 5a80590891c..2ba80d4fb0e 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection.json @@ -1,14 +1,15 @@ { - "electron": { - "particle_momentum_x": 7.168456345337534e-18, - "particle_momentum_y": 7.02290351254873e-18, - "particle_momentum_z": 9.565641373942318e-42, - "particle_position_x": 6962.988311042427, - "particle_position_y": 2034.5301680154264, - "particle_theta": 6397.068924320389, - "particle_weight": 3.215011942598676e-08 - }, "lev=0": { - "Bz": 9.526664429810971e-24 + "Bz": 9.524453851623612e-24 + }, + "electron": { + "particle_momentum_x": 7.146168286112378e-18, + "particle_momentum_y": 7.073108431229069e-18, + "particle_momentum_z": 9.282175511339672e-42, + "particle_position_x": 6978.157994231982, + "particle_position_y": 2044.6981840260364, + "particle_theta": 6298.956888689097, + "particle_weight": 3.2236798669537214e-08 } -} \ No newline at end of file +} + diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 0617b36a273..cfd0eb0b500 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1453,8 +1453,6 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - const int num_ppc_int = static_cast(num_ppc_real + amrex::Random(engine)); - if (flux_pos->overlapsWith(lo, hi)) { auto index = overlap_box.index(iv); @@ -1464,7 +1462,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } else { r = 1; } - pcounts[index] = num_ppc_int*r; + const int num_ppc_int = static_cast(num_ppc_real*r + amrex::Random(engine)); + pcounts[index] = num_ppc_int; } amrex::ignore_unused(j,k); }); From 12bf06904e5fbefdf5ed1b6cc9b146ba386c5c17 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 16:20:37 -0700 Subject: [PATCH 53/91] [pre-commit.ci] pre-commit autoupdate (#5276) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.4 → v0.6.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.4...v0.6.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c581183703a..1b668d5931e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: # Run the linter - id: ruff From 2c7a9bee1968027773c09ec32365431aadc04b11 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 16 Sep 2024 17:01:33 -0700 Subject: [PATCH 54/91] Doc: Link to heFFTe RST Rendering in RST did not resolve, missing closing `>`. --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index f46dc3d1640..72c599ae2bd 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -28,7 +28,7 @@ Optional dependencies include: - `FFTW3 `__: for spectral solver (PSATD or IGF) support when running on CPU or SYCL - also needs the ``pkg-config`` tool on Unix -- `heFFTe 2.4.0+ `__: for multi-node spectral solver (IGF) support - `BLAS++ `__ and `LAPACK++ `__: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support - `openPMD-api 0.15.1+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support From f7dd6a9bc8bb1cfe863409fcf0451b020e99509e Mon Sep 17 00:00:00 2001 From: Johannes van de Wetering <92386744+johvandewetering@users.noreply.github.com> Date: Mon, 16 Sep 2024 19:21:59 -0700 Subject: [PATCH 55/91] [Hackathon] Ionization docs fixes (#5270) * Fixed typos in generalized Ohm's law * Fixed Testing link * Removed minus sign from BTO equation (Zhang eq 8 is wrong) * Clarify comment about difference to published equation. Co-authored-by: Axel Huebl --------- Co-authored-by: Johannes Van de Wetering Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Co-authored-by: Axel Huebl --- Docs/source/theory/kinetic_fluid_hybrid_model.rst | 4 ++-- Docs/source/theory/multiphysics/ionization.rst | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Docs/source/theory/kinetic_fluid_hybrid_model.rst b/Docs/source/theory/kinetic_fluid_hybrid_model.rst index b4f494d8382..f764ce4e02b 100644 --- a/Docs/source/theory/kinetic_fluid_hybrid_model.rst +++ b/Docs/source/theory/kinetic_fluid_hybrid_model.rst @@ -46,7 +46,7 @@ integrating over velocity), also called the generalized Ohm's law, is given by: .. math:: - en_e\vec{E} = \frac{m}{e}\frac{\partial \vec{J}_e}{\partial t} + \frac{m}{e^2}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e + en_e\vec{E} = \frac{m}{e}\frac{\partial \vec{J}_e}{\partial t} + \frac{m}{e}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e where :math:`\vec{U}_e = \vec{J}_e/(en_e)` is the electron fluid velocity, :math:`{\overleftrightarrow P}_e` is the electron pressure tensor and @@ -64,7 +64,7 @@ Plugging this back into the generalized Ohm' law gives: \left(en_e +\frac{m}{e\mu_0}\nabla\times\nabla\times\right)\vec{E} =& - \frac{m}{e}\left( \frac{\partial\vec{J}_{ext}}{\partial t} + \sum_{s\neq e}\frac{\partial\vec{J}_s}{\partial t} \right) \\ - &+ \frac{m}{e^2}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e. + &+ \frac{m}{e}\left( \vec{U}_e\cdot\nabla \right) \vec{J}_e - \nabla\cdot {\overleftrightarrow P}_e - \vec{J}_e\times\vec{B}+\vec{R}_e. If we now further assume electrons are inertialess (i.e. :math:`m=0`), the above equation simplifies to, diff --git a/Docs/source/theory/multiphysics/ionization.rst b/Docs/source/theory/multiphysics/ionization.rst index 11abea386c8..5003872b1a1 100644 --- a/Docs/source/theory/multiphysics/ionization.rst +++ b/Docs/source/theory/multiphysics/ionization.rst @@ -56,18 +56,18 @@ where :math:`\mathrm{d}\tau` is the simulation timestep, which is divided by the Empirical Extension to Over-the-Barrier Regime for Hydrogen ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For hydrogen, WarpX offers the modified empirical ADK extension to the Over-the-Barrier (OTB) published in :cite:t:`mpion-zhang_empirical_2014` Eq. (8). +For hydrogen, WarpX offers the modified empirical ADK extension to the Over-the-Barrier (OTB) published in :cite:t:`mpion-zhang_empirical_2014` Eq. (8) (note there is a typo in the paper and there should not be a minus sign in Eq. 8). .. math:: - W_\mathrm{M} = \exp\left[ -\left( a_1 \frac{E^2}{E_\mathrm{b}} + a_2 \frac{E}{E_\mathrm{b}} + a_3 \right) \right] W_\mathrm{ADK} + W_\mathrm{M} = \exp\left[ a_1 \frac{E^2}{E_\mathrm{b}} + a_2 \frac{E}{E_\mathrm{b}} + a_3 \right] W_\mathrm{ADK} The parameters :math:`a_1` through :math:`a_3` are independent of :math:`E` and can be found in the same reference. :math:`E_\mathrm{b}` is the classical Barrier Suppresion Ionization (BSI) field strength :math:`E_\mathrm{b} = U_\mathrm{ion}^2 / (4 Z)` given here in atomic units (AU). For a detailed description of conversion between unit systems consider the book by :cite:t:`mpion-Mulser2010`. Testing ^^^^^^^ -* `Testing the field ionization module <../../../../Examples/Tests/field_ionization/README.rst>`_. +* `Testing the field ionization module <../../../../en/latest/usage/examples/field_ionization/README.html>`_. .. bibliography:: :keyprefix: mpion- From 55e86ded534bbadc77af7e635415004106cc750c Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 17 Sep 2024 07:37:56 -0700 Subject: [PATCH 56/91] Minor refactoring of refined injection for AddPlasmaFlux (#5274) * Minor refactoring of refined injection for AddPlasmaFlux * Additional simplification * Minor fixes * Remove unneeded variable lrrfac --- .../Particles/PhysicalParticleContainer.cpp | 37 ++++++------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index cfd0eb0b500..23af57b9206 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1030,7 +1030,6 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int Gpu::DeviceVector counts(overlap_box.numPts(), 0); Gpu::DeviceVector offset(overlap_box.numPts()); auto *pcounts = counts.data(); - const amrex::IntVect lrrfac = rrfac; Box fine_overlap_box; // default Box is NOT ok(). if (refine_injection) { fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); @@ -1048,7 +1047,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int { auto index = overlap_box.index(iv); const amrex::Long r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv))? - (AMREX_D_TERM(lrrfac[0],*lrrfac[1],*lrrfac[2])) : (1); + (AMREX_D_TERM(rrfac[0],*rrfac[1],*rrfac[2])) : (1); pcounts[index] = num_ppc*r; // update pcount by checking if cell-corners or cell-center // has non-zero density @@ -1154,8 +1153,8 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int long ip = poffset[index] + i_part; pa_idcpu[ip] = amrex::SetParticleIDandCPU(pid+ip, cpuid); const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `lrrfac` - inj_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // In the refined injection region: use refinement ratio `rrfac` + inj_pos->getPositionUnitBox(i_part, rrfac, engine) : // Otherwise: use 1 as the refinement ratio inj_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); @@ -1441,7 +1440,6 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, Gpu::DeviceVector counts(overlap_box.numPts(), 0); Gpu::DeviceVector offset(overlap_box.numPts()); auto *pcounts = counts.data(); - const amrex::IntVect lrrfac = rrfac; const int flux_normal_axis = plasma_injector.flux_normal_axis; Box fine_overlap_box; // default Box is NOT ok(). if (refine_injection) { @@ -1450,22 +1448,21 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv(AMREX_D_DECL(i, j, k)); + amrex::ignore_unused(j,k); + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); if (flux_pos->overlapsWith(lo, hi)) { auto index = overlap_box.index(iv); - int r; + int r = 1; if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(lrrfac, flux_normal_axis); - } else { - r = 1; + r = compute_area_weights(rrfac, flux_normal_axis); } const int num_ppc_int = static_cast(num_ppc_real*r + amrex::Random(engine)); pcounts[index] = num_ppc_int; } - amrex::ignore_unused(j,k); }); // Max number of new particles. All of them are created, @@ -1532,24 +1529,14 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv = IntVect(AMREX_D_DECL(i, j, k)); + amrex::ignore_unused(j,k); const auto index = overlap_box.index(iv); Real scale_fac = compute_scale_fac_area(dx, num_ppc_real, flux_normal_axis); - auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); - auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - - if (flux_pos->overlapsWith(lo, hi)) - { - int r; - if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(lrrfac, flux_normal_axis); - } else { - r = 1; - } - scale_fac /= r; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + scale_fac /= compute_area_weights(rrfac, flux_normal_axis); } - amrex::ignore_unused(j,k); for (int i_part = 0; i_part < pcounts[index]; ++i_part) { @@ -1558,8 +1545,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // This assumes the flux_pos is of type InjectorPositionRandomPlane const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `lrrfac` - flux_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // In the refined injection region: use refinement ratio `rrfac` + flux_pos->getPositionUnitBox(i_part, rrfac, engine) : // Otherwise: use 1 as the refinement ratio flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); From 6463b1f84878e9e0fc957b0ef9ccde53efde0859 Mon Sep 17 00:00:00 2001 From: Alfred Mishi <140518333+Haavaan@users.noreply.github.com> Date: Tue, 17 Sep 2024 21:12:02 +0200 Subject: [PATCH 57/91] Updated visualization scripts for beam-beam collision example (#4797) * add viz scripts to beam-beam collision example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update plot_reduced.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Modified pull request * Modified README.rst * Modified README.rst * updated vizs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * restore plot fields * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * rm EOL white spaces * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add missing text back. * Change figure link --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Arianna Formenti Co-authored-by: Remi Lehe --- .../beam_beam_collision/README.rst | 49 +++++- .../inputs_test_3d_beam_beam_collision | 2 +- .../beam_beam_collision/plot_fields.py | 139 ++++++++++++++++++ .../beam_beam_collision/plot_reduced.py | 48 ++++++ 4 files changed, 233 insertions(+), 5 deletions(-) create mode 100644 Examples/Physics_applications/beam_beam_collision/plot_fields.py create mode 100644 Examples/Physics_applications/beam_beam_collision/plot_reduced.py diff --git a/Examples/Physics_applications/beam_beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README.rst index a7a06521218..28fdc1ee70e 100644 --- a/Examples/Physics_applications/beam_beam_collision/README.rst +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -11,7 +11,8 @@ We turn on the Quantum Synchrotron QED module for photon emission (also known as the Breit-Wheeler QED module for the generation of electron-positron pairs (also known as coherent pair generation in the collider community). To solve for the electromagnetic field we use the nodal version of the electrostatic relativistic solver. -This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). This solver accurately reproduced the subtle cancellation that occur for some component of the ``E + v x B`` terms which are crucial in simulations of relativistic particles. +This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). +This solver accurately reproduces the subtle cancellation that occur for some component of ``E + v x B``, which are crucial in simulations of relativistic particles. This example is based on the following paper :cite:t:`ex-Yakimenko2019`. @@ -26,7 +27,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. literalinclude:: inputs_test_3d_beam_beam_collision :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/inputs_test_3d_beam_beam_collision``. + :caption: You can copy this file from ``Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision``. Visualize @@ -34,12 +35,15 @@ Visualize The figure below shows the number of photons emitted per beam particle (left) and the number of secondary pairs generated per beam particle (right). -We compare different results: +We compare different results for the reduced diagnostics with the literature: * (red) simplified WarpX simulation as the example stored in the directory ``/Examples/Physics_applications/beam-beam_collision``; * (blue) large-scale WarpX simulation (high resolution and ad hoc generated tables ; * (black) literature results from :cite:t:`ex-Yakimenko2019`. -The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. For the large-scale simulation we have used the following options: +The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. +Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. +To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. +For the large-scale simulation we have used the following options: .. code-block:: ini @@ -63,8 +67,45 @@ The small-scale simulation has been performed with a resolution of ``nx = 64, ny qed_bw.tab_pair_frac_how_many=512 qed_bw.save_table_in=my_bw_table.txt + .. figure:: https://gist.github.com/user-attachments/assets/2dd43782-d039-4faa-9d27-e3cf8fb17352 :alt: Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. :width: 100% Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. + + +Below are two visualizations scripts that provide examples to graph the field and reduced diagnostics. +They are available in the ``Examples/Physics_applications/beam-beam_collision/`` folder and can be run as simply as ``python3 plot_fields.py`` and ``python3 plot_reduced.py``. + +.. tab-set:: + + .. tab-item:: Field Diagnostics + + This script visualizes the evolution of the fields (:math:`|E|, |B|, \rho`) during the collision between the two ultra-relativistic lepton beams. + The magnitude of E and B and the charge densities of the primary beams and of the secondary pairs are sliced along either one of the two transverse coordinates (:math:`x` and :math:`y`). + + .. literalinclude:: plot_fields.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_fields.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/04c9c0ec-b580-446f-a11a-437c1b244a41 + :alt: Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + :width: 100% + + Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + + + .. tab-item:: Reduced Diagnostics + + A similar script to the one below was used to produce the image showing the benchmark against :cite:t:`ex-Yakimenko2019`. + + .. literalinclude:: plot_reduced.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_reduced.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/c280490a-f1f2-4329-ad3c-46817d245dc1 + :alt: Photon and pair production rates in time throughout the collision. + :width: 100% + + Photon and pair production rates in time throughout the collision. diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index e856a078003..d0cf3cd7ebf 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -211,7 +211,7 @@ warpx.do_qed_schwinger = 0. # FULL diagnostics.diags_names = diag1 -diag1.intervals = 0 +diag1.intervals = 15 diag1.diag_type = Full diag1.write_species = 1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho_beam1 rho_beam2 rho_ele1 rho_pos1 rho_ele2 rho_pos2 diff --git a/Examples/Physics_applications/beam_beam_collision/plot_fields.py b/Examples/Physics_applications/beam_beam_collision/plot_fields.py new file mode 100644 index 00000000000..a7ddb2d13e9 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_fields.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable +from openpmd_viewer import OpenPMDTimeSeries + +plt.rcParams.update({"font.size": 16}) + +series = OpenPMDTimeSeries("./diags/diag1") +steps = series.iterations + + +for slice_axis in ["x", "y"]: # slice the fields along x and y + for n in steps: # loop through the available timesteps + fig, ax = plt.subplots( + ncols=2, nrows=2, figsize=(10, 6), dpi=300, sharex=True, sharey=True + ) + + # get E field + Ex, info = series.get_field( + field="E", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + Ey, info = series.get_field( + field="E", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Ez, info = series.get_field( + field="E", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get B field + Bx, info = series.get_field( + field="B", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + By, info = series.get_field( + field="B", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Bz, info = series.get_field( + field="B", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get charge densities + rho_beam1, info = series.get_field( + field="rho_beam1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_beam2, info = series.get_field( + field="rho_beam2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele1, info = series.get_field( + field="rho_ele1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos1, info = series.get_field( + field="rho_pos1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele2, info = series.get_field( + field="rho_ele2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos2, info = series.get_field( + field="rho_pos2", iteration=n, plot=False, slice_across=slice_axis + ) + + xmin = info.z.min() + xmax = info.z.max() + xlabel = "z [m]" + + if slice_axis == "x": + ymin = info.y.min() + ymax = info.y.max() + ylabel = "y [m]" + elif slice_axis == "y": + ymin = info.x.min() + ymax = info.x.max() + ylabel = "x [m]" + + # plot E magnitude + Emag = np.sqrt(Ex**2 + Ey**2 + Ez**2) + im = ax[0, 0].imshow( + np.transpose(Emag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Emag)), + ) + ax[0, 0].set_title("E [V/m]") + divider = make_axes_locatable(ax[0, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot B magnitude + Bmag = np.sqrt(Bx**2 + By**2 + Bz**2) + im = ax[1, 0].imshow( + np.transpose(Bmag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Bmag)), + ) + ax[1, 0].set_title("B [T]") + divider = make_axes_locatable(ax[1, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot beam densities + rho_beams = rho_beam1 + rho_beam2 + im = ax[0, 1].imshow( + np.transpose(rho_beams), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho_beams)), + vmax=np.max(np.abs(rho_beams)), + ) + ax[0, 1].set_title(r"$\rho$ beams [C/m$^3$]") + divider = make_axes_locatable(ax[0, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot secondary densities + rho2 = rho_ele1 + rho_pos1 + rho_ele2 + rho_pos2 + im = ax[1, 1].imshow( + np.transpose(rho2), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho2)), + vmax=np.max(np.abs(rho2)), + ) + ax[1, 1].set_title(r"$\rho$ secondaries [C/m$^3$]") + divider = make_axes_locatable(ax[1, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + for a in ax[-1, :].reshape(-1): + a.set_xlabel(xlabel) + for a in ax[:, 0].reshape(-1): + a.set_ylabel(ylabel) + + fig.suptitle(f"Iteration = {n}, time [s] = {series.current_t}", fontsize=20) + plt.tight_layout() + + image_file_name = "FIELDS_" + slice_axis + f"_{n:03d}.png" + plt.savefig(image_file_name, dpi=100, bbox_inches="tight") + plt.close() diff --git a/Examples/Physics_applications/beam_beam_collision/plot_reduced.py b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py new file mode 100644 index 00000000000..3f59f975519 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py @@ -0,0 +1,48 @@ +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from scipy.constants import c, nano, physical_constants + +r_e = physical_constants["classical electron radius"][0] +my_dpi = 300 +sigmaz = 10 * nano + +fig, ax = plt.subplots( + ncols=2, nrows=1, figsize=(2000.0 / my_dpi, 1000.0 / my_dpi), dpi=my_dpi +) + +rdir = "./diags/reducedfiles/" + +df_cr = pd.read_csv(f"{rdir}" + "ColliderRelevant_beam1_beam2.txt", sep=" ", header=0) +df_pn = pd.read_csv(f"{rdir}" + "ParticleNumber.txt", sep=" ", header=0) + + +times = df_cr[[col for col in df_cr.columns if "]time" in col]].to_numpy() +steps = df_cr[[col for col in df_cr.columns if "]step" in col]].to_numpy() + +x = df_cr[[col for col in df_cr.columns if "]dL_dt" in col]].to_numpy() +coll_index = np.argmax(x) +coll_time = times[coll_index] + +# number of photons per beam particle +np1 = df_pn[[col for col in df_pn.columns if "]pho1_weight" in col]].to_numpy() +np2 = df_pn[[col for col in df_pn.columns if "]pho2_weight" in col]].to_numpy() +Ne = df_pn[[col for col in df_pn.columns if "]beam1_weight" in col]].to_numpy()[0] +Np = df_pn[[col for col in df_pn.columns if "]beam2_weight" in col]].to_numpy()[0] + +ax[0].plot((times - coll_time) / (sigmaz / c), (np1 + np2) / (Ne + Np), lw=2) +ax[0].set_title(r"photon number/beam particle") + +# number of NLBW particles per beam particle +e1 = df_pn[[col for col in df_pn.columns if "]ele1_weight" in col]].to_numpy() +e2 = df_pn[[col for col in df_pn.columns if "]ele2_weight" in col]].to_numpy() + +ax[1].plot((times - coll_time) / (sigmaz / c), (e1 + e2) / (Ne + Np), lw=2) +ax[1].set_title(r"NLBW particles/beam particle") + +for a in ax.reshape(-1): + a.set_xlabel(r"time [$\sigma_z/c$]") +image_file_name = "reduced.png" +plt.tight_layout() +plt.savefig(image_file_name, dpi=300, bbox_inches="tight") +plt.close("all") From 402d7549121b15a15aa469a2135c0753177a18ed Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 18 Sep 2024 05:31:46 -0700 Subject: [PATCH 58/91] Docs: how to use `add_subdirectory` for new tests (#5279) --- Docs/source/developers/testing.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 5bbc7d0fef4..ee5c82aeea9 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -175,6 +175,8 @@ If you need a new Python package dependency for testing, please add it in `Regre Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. +If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. + Naming conventions for automated tests -------------------------------------- From 091c8d6571d359fc88ce3086ebf4196873797e14 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 18 Sep 2024 06:03:33 -0700 Subject: [PATCH 59/91] Copy rho from guard cells in IGF solver (#5284) * Copy rho from guard cells in IGF solver * Reset checksum --- .../test_3d_open_bc_poisson_solver.json | 24 +++++++++---------- .../fields/IntegratedGreenFunctionSolver.cpp | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index e4ff1fc68a8..0453481ec60 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 100915933.44993827, - "By": 157610622.1855512, - "Bz": 9.717358898362187e-14, - "Ex": 4.7250652706211096e+16, - "Ey": 3.0253948990559976e+16, - "Ez": 3276573.9514776524, - "rho": 10994013582437.193 + "Bx": 100915975.15792876, + "By": 157610677.31483692, + "Bz": 2.404060922276648e-13, + "Ex": 4.725066923361703e+16, + "Ey": 3.0253961494347724e+16, + "Ez": 3276584.4383433666, + "rho": 10994013582437.197 }, "electron": { - "particle_momentum_x": 5.701277606050295e-19, - "particle_momentum_y": 3.6504516641520437e-19, + "particle_momentum_x": 5.701279599504008e-19, + "particle_momentum_y": 3.650453172860547e-19, "particle_momentum_z": 1.145432768297242e-10, - "particle_position_x": 17.314086912497864, - "particle_position_y": 0.2583691267187796, + "particle_position_x": 17.31408691249785, + "particle_position_y": 0.2583691267187801, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } -} \ No newline at end of file +} diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 0767ecfb2f3..1aeee9d81d2 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -75,7 +75,7 @@ computePhiIGF ( amrex::MultiFab const & rho, SpectralField tmp_G_fft = SpectralField( spectralspace_ba, dm_global_fft, 1, 0 ); // Copy from rho to tmp_rho - tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); + tmp_rho.ParallelCopy( rho, 0, 0, 1, rho.nGrowVect(), amrex::IntVect::TheZeroVector() ); // Compute the integrated Green function { From f8f4e5783e94299160fa337e8b37a99d8958b091 Mon Sep 17 00:00:00 2001 From: Thomas Marks Date: Wed, 18 Sep 2024 13:36:08 -0400 Subject: [PATCH 60/91] CFL-limited adaptive timestepping for electrostatic solver (#5176) * Improve maxParticleVelocity * gpu fix 1 * gpu fix 2 * gpu fix 3 - change to general reduction * Remove seperate OMP bit * Add const qualifier for clang tidy * Refactor maxVelocity function * Add dt_next parameter to all Evolve + PushPX methods * Write first pass update dt function * First pass implementation works, need to add logic to only use it when ES simulation enabled * Only call update dt if no const dt supplied and solver is electrostatic * No longer errors if const_dt not specified for ES solver. still need to add CFL input to picmi * warpx_cfl option added to picmi * Address most comments from @dpgrote * Move timestep update to start of Evolve * Untrack accidentally-tracked files * Add max_dt param and handle zero-velocity cases * Fix time-centering of leapfrog push * Fix max dt picmi input * Change condition for updating dt * Fix some more picmi stuff * Add timestep diagnostic to docs * Add timestep diagnostic to picmi * Initialize dt_next on restart * Add electrostatic sphere test using adaptive timestep * Fix clang-tidy errors * ruff reformatting * begin refactoring based on feedback * finish refactor * remove unused variable in WarpX.H * Update checksum for new test * Address comments --------- Co-authored-by: Remi Lehe --- Docs/source/usage/parameters.rst | 65 ++++++++------ .../Tests/electrostatic_sphere/CMakeLists.txt | 10 +++ ...puts_test_3d_electrostatic_sphere_adaptive | 47 +++++++++++ Python/pywarpx/picmi.py | 22 ++++- ...test_3d_electrostatic_sphere_adaptive.json | 17 ++++ .../Diagnostics/ReducedDiags/CMakeLists.txt | 17 ++-- Source/Diagnostics/ReducedDiags/Make.package | 24 +++--- .../ReducedDiags/MultiReducedDiags.cpp | 24 +++--- Source/Diagnostics/ReducedDiags/Timestep.H | 35 ++++++++ Source/Diagnostics/ReducedDiags/Timestep.cpp | 72 ++++++++++++++++ Source/Evolve/WarpXComputeDt.cpp | 84 +++++++++++++------ Source/Evolve/WarpXEvolve.cpp | 55 +++++++----- Source/WarpX.H | 14 ++++ Source/WarpX.cpp | 7 ++ 14 files changed, 391 insertions(+), 102 deletions(-) create mode 100644 Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive create mode 100644 Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json create mode 100644 Source/Diagnostics/ReducedDiags/Timestep.H create mode 100644 Source/Diagnostics/ReducedDiags/Timestep.cpp diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 86ab7594c5f..b9d82d5014a 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2129,14 +2129,24 @@ Time step The ratio between the actual timestep that is used in the simulation and the Courant-Friedrichs-Lewy (CFL) limit. (e.g. for `warpx.cfl=1`, the timestep will be exactly equal to the CFL limit.) - This parameter will only be used with the electromagnetic solver. + For some speed v and grid spacing dx, this limits the timestep to `warpx.cfl * dx / v`. + When used with the electromagnetic solver, `v` is the speed of light. + For the electrostatic solver, `v` is the maximum speed among all particles in the domain. * ``warpx.const_dt`` (`float`) Allows direct specification of the time step size, in units of seconds. - When the electrostatic solver is being used, this must be supplied. + When the electrostatic solver is being used, this must be supplied if not using adaptive timestepping. This can be used with the electromagnetic solver, overriding ``warpx.cfl``, but it is up to the user to ensure that the CFL condition is met. +* ``warpx.dt_update_interval`` (`string`) optional (default `-1`) + How many iterations pass between timestep adaptations when using the electrostatic solver. + Must be greater than `0` to use adaptive timestepping, or else ``warpx.const_dt`` must be specified. + +* ``warpx.max_dt`` (`float`) optional + The maximum timestep permitted for the electrostatic solver, when using adaptive timestepping. + If supplied, also sets the initial timestep for these simulations, before the first timestep update. + Filtering ^^^^^^^^^ @@ -3448,39 +3458,42 @@ Reduced Diagnostics For 1D-Z, :math:`x`-related and :math:`y`-related quantities are not outputted. RZ geometry is not supported yet. -* ``DifferentialLuminosity`` - This type computes the differential luminosity between two species, defined as: + * ``DifferentialLuminosity`` + This type computes the differential luminosity between two species, defined as: - .. math:: + .. math:: + + \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; + \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) - \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; - \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) + where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 + \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, + and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` + is the center-of-mass cross-section of a given collision process, then + :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` + gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). - where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 - \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, - and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` - is the center-of-mass cross-section of a given collision process, then - :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` - gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). + The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations + involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` + can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. - The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations - involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` - can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. + In practice, the above expression of the differential luminosity is evaluated over discrete bins in energy :math:`\mathcal{E}^*`, + and by summing over macroparticles. - In practice, the above expression of the differential luminosity is evaluated over discrete bins in energy :math:`\mathcal{E}^*`, - and by summing over macroparticles. + * ``.species`` (`list of two strings`) + The names of the two species for which the differential luminosity is computed. - * ``.species`` (`list of two strings`) - The names of the two species for which the differential luminosity is computed. + * ``.bin_number`` (`int` > 0) + The number of bins in energy :math:`\mathcal{E}^*` - * ``.bin_number`` (`int` > 0) - The number of bins in energy :math:`\mathcal{E}^*` + * ``.bin_max`` (`float`, in eV) + The minimum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. - * ``.bin_max`` (`float`, in eV) - The minimum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. + * ``.bin_min`` (`float`, in eV) + The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. - * ``.bin_min`` (`float`, in eV) - The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. + * ``Timestep`` + This type outputs the simulation's physical timestep (in seconds) at each mesh refinement level. * ``.intervals`` (`string`) Using the `Intervals Parser`_ syntax, this string defines the timesteps at which reduced diff --git a/Examples/Tests/electrostatic_sphere/CMakeLists.txt b/Examples/Tests/electrostatic_sphere/CMakeLists.txt index 41a151b7884..3d17c4462f8 100644 --- a/Examples/Tests/electrostatic_sphere/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere/CMakeLists.txt @@ -41,6 +41,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_3d_electrostatic_sphere_adaptive # name + 3 # dims + 2 # nprocs + inputs_test_3d_electrostatic_sphere_adaptive # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000054 # output + OFF # dependency +) + add_warpx_test( test_rz_electrostatic_sphere # name RZ # dims diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive new file mode 100644 index 00000000000..f64f6de08ee --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive @@ -0,0 +1,47 @@ +stop_time = 60e-6 +warpx.cfl = 0.2 +warpx.dt_update_interval = 10 +warpx.max_dt = 1.5e-6 +amr.n_cell = 64 64 64 +amr.max_level = 0 +amr.blocking_factor = 8 +amr.max_grid_size = 128 +geometry.dims = 3 +geometry.prob_lo = -0.5 -0.5 -0.5 +geometry.prob_hi = 0.5 0.5 0.5 +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +warpx.do_electrostatic = relativistic + +particles.species_names = electron + +algo.field_gathering = momentum-conserving + +# Order of particle shape factors +algo.particle_shape = 1 + +my_constants.n0 = 1.49e6 +my_constants.R0 = 0.1 + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "NUniformPerCell" +electron.num_particles_per_cell_each_dim = 2 2 2 +electron.profile = parse_density_function +electron.density_function(x,y,z) = "(x*x + y*y + z*z < R0*R0)*n0" +electron.momentum_distribution_type = at_rest + +diagnostics.diags_names = diag1 diag2 + +diag1.intervals = 30 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez rho + +diag2.intervals = 30 +diag2.diag_type = Full +diag2.fields_to_plot = none +diag2.format = openpmd + +warpx.reduced_diags_names = timestep +timestep.intervals = 1 +timestep.type = Timestep diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 0d51a8723b4..478b4d5802e 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1522,8 +1522,7 @@ def solver_initialize_inputs(self): # --- Same method names are used, though mapped to lower case. pywarpx.algo.maxwell_solver = self.method - if self.cfl is not None: - pywarpx.warpx.cfl = self.cfl + pywarpx.warpx.cfl = self.cfl if self.source_smoother is not None: self.source_smoother.smoother_initialize_inputs(self) @@ -1880,6 +1879,16 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver + + warpx_dt_update_interval: string, optional (default = -1) + How frequently the timestep is updated. Adaptive timestepping is disabled when this is <= 0. + + warpx_cfl: float, optional + Fraction of the CFL condition for particle velocity vs grid size, used to set the timestep when `dt_update_interval > 0`. + + warpx_max_dt: float, optional + The maximum allowable timestep when `dt_update_interval > 0`. + """ def init(self, kw): @@ -1887,6 +1896,9 @@ def init(self, kw): self.absolute_tolerance = kw.pop("warpx_absolute_tolerance", None) self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) self.magnetostatic = kw.pop("warpx_magnetostatic", False) + self.cfl = kw.pop("warpx_cfl", None) + self.dt_update_interval = kw.pop("dt_update_interval", None) + self.max_dt = kw.pop("warpx_max_dt", None) def solver_initialize_inputs(self): # Open BC means FieldBoundaryType::Open for electrostatic sims, rather than perfectly-matched layer @@ -1894,6 +1906,11 @@ def solver_initialize_inputs(self): self.grid.grid_initialize_inputs() + # set adaptive timestepping parameters + pywarpx.warpx.cfl = self.cfl + pywarpx.warpx.dt_update_interval = self.dt_update_interval + pywarpx.warpx.max_dt = self.max_dt + if self.relativistic: pywarpx.warpx.do_electrostatic = "relativistic" else: @@ -3890,6 +3907,7 @@ def __init__( "ParticleNumber", "LoadBalanceCosts", "LoadBalanceEfficiency", + "Timestep", ] # The species diagnostics require a species to be provided self._species_reduced_diagnostics = [ diff --git a/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json new file mode 100644 index 00000000000..561fbf86669 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json @@ -0,0 +1,17 @@ +{ + "lev=0": { + "Ex": 5.177444767224255, + "Ey": 5.177444767224254, + "Ez": 5.177444767224256, + "rho": 2.6092568008333797e-10 + }, + "electron": { + "particle_momentum_x": 1.3215019655285216e-23, + "particle_momentum_y": 1.3215019655285214e-23, + "particle_momentum_z": 1.3215019655285217e-23, + "particle_position_x": 912.2310003741203, + "particle_position_y": 912.2310003741203, + "particle_position_z": 912.2310003741202, + "particle_weight": 6212.501525878906 + } +} diff --git a/Source/Diagnostics/ReducedDiags/CMakeLists.txt b/Source/Diagnostics/ReducedDiags/CMakeLists.txt index 4f0b05f6180..bbf1b6b65b0 100644 --- a/Source/Diagnostics/ReducedDiags/CMakeLists.txt +++ b/Source/Diagnostics/ReducedDiags/CMakeLists.txt @@ -3,26 +3,27 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE BeamRelevant.cpp + ChargeOnEB.cpp ColliderRelevant.cpp DifferentialLuminosity.cpp FieldEnergy.cpp + FieldMaximum.cpp + FieldMomentum.cpp FieldProbe.cpp FieldProbeParticleContainer.cpp - FieldMomentum.cpp + FieldReduction.cpp + FieldProbe.cpp LoadBalanceCosts.cpp LoadBalanceEfficiency.cpp MultiReducedDiags.cpp ParticleEnergy.cpp - ParticleMomentum.cpp + ParticleExtrema.cpp ParticleHistogram.cpp ParticleHistogram2D.cpp + ParticleMomentum.cpp + ParticleNumber.cpp ReducedDiags.cpp - FieldMaximum.cpp - ParticleExtrema.cpp RhoMaximum.cpp - ParticleNumber.cpp - FieldReduction.cpp - FieldProbe.cpp - ChargeOnEB.cpp + Timestep.cpp ) endforeach() diff --git a/Source/Diagnostics/ReducedDiags/Make.package b/Source/Diagnostics/ReducedDiags/Make.package index e840931f8d3..2611831a3dd 100644 --- a/Source/Diagnostics/ReducedDiags/Make.package +++ b/Source/Diagnostics/ReducedDiags/Make.package @@ -1,24 +1,24 @@ CEXE_sources += MultiReducedDiags.cpp CEXE_sources += ReducedDiags.cpp -CEXE_sources += ParticleEnergy.cpp -CEXE_sources += ParticleMomentum.cpp -CEXE_sources += FieldEnergy.cpp -CEXE_sources += FieldProbe.cpp -CEXE_sources += FieldProbeParticleContainer.cpp -CEXE_sources += FieldMomentum.cpp CEXE_sources += BeamRelevant.cpp +CEXE_sources += ChargeOnEB.cpp CEXE_sources += ColliderRelevant.cpp CEXE_sources += DifferentialLuminosity.cpp +CEXE_sources += FieldEnergy.cpp +CEXE_sources += FieldMaximum.cpp +CEXE_sources += FieldMomentum.cpp +CEXE_sources += FieldProbe.cpp +CEXE_sources += FieldProbeParticleContainer.cpp +CEXE_sources += FieldReduction.cpp CEXE_sources += LoadBalanceCosts.cpp CEXE_sources += LoadBalanceEfficiency.cpp +CEXE_sources += ParticleEnergy.cpp +CEXE_sources += ParticleExtrema.cpp CEXE_sources += ParticleHistogram.cpp CEXE_sources += ParticleHistogram2D.cpp -CEXE_sources += FieldMaximum.cpp -CEXE_sources += FieldProbe.cpp -CEXE_sources += ParticleExtrema.cpp -CEXE_sources += RhoMaximum.cpp +CEXE_sources += ParticleMomentum.cpp CEXE_sources += ParticleNumber.cpp -CEXE_sources += FieldReduction.cpp -CEXE_sources += ChargeOnEB.cpp +CEXE_sources += RhoMaximum.cpp +CEXE_sources += Timestep.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/Diagnostics/ReducedDiags diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index 25ea87d9f54..5035eac58a8 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -12,8 +12,8 @@ #include "DifferentialLuminosity.H" #include "FieldEnergy.H" #include "FieldMaximum.H" -#include "FieldProbe.H" #include "FieldMomentum.H" +#include "FieldProbe.H" #include "FieldReduction.H" #include "LoadBalanceCosts.H" #include "LoadBalanceEfficiency.H" @@ -24,6 +24,7 @@ #include "ParticleMomentum.H" #include "ParticleNumber.H" #include "RhoMaximum.H" +#include "Timestep.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" @@ -52,24 +53,25 @@ MultiReducedDiags::MultiReducedDiags () using CS = const std::string& ; const auto reduced_diags_dictionary = std::map(CS)>>{ + {"BeamRelevant", [](CS s){return std::make_unique(s);}}, + {"ChargeOnEB", [](CS s){return std::make_unique(s);}}, + {"ColliderRelevant", [](CS s){return std::make_unique(s);}}, + {"DifferentialLuminosity",[](CS s){return std::make_unique(s);}}, {"ParticleEnergy", [](CS s){return std::make_unique(s);}}, + {"ParticleExtrema", [](CS s){return std::make_unique(s);}}, + {"ParticleHistogram", [](CS s){return std::make_unique(s);}}, + {"ParticleHistogram2D", [](CS s){return std::make_unique(s);}}, {"ParticleMomentum", [](CS s){return std::make_unique(s);}}, + {"ParticleNumber", [](CS s){return std::make_unique(s);}}, {"FieldEnergy", [](CS s){return std::make_unique(s);}}, - {"FieldMomentum", [](CS s){return std::make_unique(s);}}, {"FieldMaximum", [](CS s){return std::make_unique(s);}}, + {"FieldMomentum", [](CS s){return std::make_unique(s);}}, {"FieldProbe", [](CS s){return std::make_unique(s);}}, {"FieldReduction", [](CS s){return std::make_unique(s);}}, - {"RhoMaximum", [](CS s){return std::make_unique(s);}}, - {"BeamRelevant", [](CS s){return std::make_unique(s);}}, - {"ColliderRelevant", [](CS s){return std::make_unique(s);}}, - {"DifferentialLuminosity",[](CS s){return std::make_unique(s);}}, {"LoadBalanceCosts", [](CS s){return std::make_unique(s);}}, {"LoadBalanceEfficiency", [](CS s){return std::make_unique(s);}}, - {"ParticleHistogram", [](CS s){return std::make_unique(s);}}, - {"ParticleHistogram2D", [](CS s){return std::make_unique(s);}}, - {"ParticleNumber", [](CS s){return std::make_unique(s);}}, - {"ParticleExtrema", [](CS s){return std::make_unique(s);}}, - {"ChargeOnEB", [](CS s){return std::make_unique(s);}} + {"RhoMaximum", [](CS s){return std::make_unique(s);}}, + {"Timestep", [](CS s){return std::make_unique(s);}} }; // loop over all reduced diags and fill m_multi_rd with requested reduced diags std::transform(m_rd_names.begin(), m_rd_names.end(), std::back_inserter(m_multi_rd), diff --git a/Source/Diagnostics/ReducedDiags/Timestep.H b/Source/Diagnostics/ReducedDiags/Timestep.H new file mode 100644 index 00000000000..bcf4fe6452f --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/Timestep.H @@ -0,0 +1,35 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Thomas Marks + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ +#define WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ + +#include "ReducedDiags.H" +#include + +/** + * This class contains a function for retrieving the current simulation timestep as a diagnostic. + * Useful mainly for simulations using adaptive timestepping. + */ +class Timestep: public ReducedDiags { +public: + /** + * constructor + * @param[in] rd_name reduced diags name + */ + Timestep (const std::string& rd_name); + + /** + * This function gets the current physical timestep of the simulation at all refinement levels. + * @param[in] step current time step + */ + void ComputeDiags (int step) final; +}; + +#endif //WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ diff --git a/Source/Diagnostics/ReducedDiags/Timestep.cpp b/Source/Diagnostics/ReducedDiags/Timestep.cpp new file mode 100644 index 00000000000..3474121db91 --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/Timestep.cpp @@ -0,0 +1,72 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Thomas Marks + * + * License: BSD-3-Clause-LBNL + */ + +#include "Timestep.H" + +#include "WarpX.H" + +#include +#include +#include // TODO: remove this +#include + +#include + +using namespace amrex::literals; + +// constructor +Timestep::Timestep (const std::string& rd_name) +:ReducedDiags{rd_name} +{ + const auto& warpx = WarpX::GetInstance(); + const auto max_level = warpx.maxLevel(); + + // data size should be equal to the number of refinement levels + m_data.resize(max_level + 1, 0.0_rt); + + if (amrex::ParallelDescriptor::IOProcessor() && m_write_header) { + // open file + std::ofstream ofs{m_path + m_rd_name + "." + m_extension, std::ofstream::out}; + + // write header row + int c = 0; + ofs << "#"; + ofs << "[" << c++ << "]step()"; + ofs << m_sep; + ofs << "[" << c++ << "]time(s)"; + ofs << m_sep; + + for (int lev = 0; lev <= max_level; lev++) { + ofs << "[" << c++ << "]timestep[" << lev << "](s)"; + if (lev < max_level) { + ofs << m_sep; + } + } + + // close file + ofs << std::endl; + ofs.close(); + } +} +// end constructor + +// function to get current simulation timestep at all refinement levels +void Timestep::ComputeDiags (int step) { + // Check if diagnostic should be done + if (!m_intervals.contains(step+1)) { return; } + + const auto& warpx = WarpX::GetInstance(); + const auto max_level = warpx.maxLevel(); + const auto dt = warpx.getdt(); + + for (int lev = 0; lev <= max_level; lev++) { + m_data[lev] = dt[lev]; + } +} +// end Timestep::ComputeDiags diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index c1a87166920..b82cb6aff26 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -13,6 +13,7 @@ #else # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -27,29 +28,29 @@ #include #include +/** + * Compute the minimum of array x, where x has dimension AMREX_SPACEDIM + */ +AMREX_FORCE_INLINE amrex::Real +minDim (const amrex::Real* x) +{ + return std::min({AMREX_D_DECL(x[0], x[1], x[2])}); +} + /** * Determine the timestep of the simulation. */ void WarpX::ComputeDt () { // Handle cases where the timestep is not limited by the speed of light - if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None || - electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - - std::stringstream errorMsg; - if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { - errorMsg << "warpx.const_dt must be specified with the electrostatic solver."; - } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - errorMsg << "warpx.const_dt must be specified with the hybrid-PIC solver."; - } else { - errorMsg << "warpx.const_dt must be specified when not using a field solver."; - } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_const_dt.has_value(), errorMsg.str()); - - for (int lev=0; lev<=max_level; lev++) { - dt[lev] = m_const_dt.value(); - } - return; + // and no constant timestep is provided + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_const_dt.has_value(), "warpx.const_dt must be specified with the hybrid-PIC solver."); + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + m_const_dt.has_value() || dt_update_interval.isActivated(), + "warpx.const_dt must be specified with the electrostatic solver, or warpx.dt_update_interval must be > 0." + ); } // Determine the appropriate timestep as limited by the speed of light @@ -58,16 +59,17 @@ WarpX::ComputeDt () if (m_const_dt.has_value()) { deltat = m_const_dt.value(); + } else if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { + // Set dt for electrostatic algorithm + if (m_max_dt.has_value()) { + deltat = m_max_dt.value(); + } else { + deltat = cfl * minDim(dx) / PhysConst::c; + } } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Computation of dt for spectral algorithm // (determined by the minimum cell size in all directions) -#if defined(WARPX_DIM_1D_Z) - deltat = cfl * dx[0] / PhysConst::c; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - deltat = cfl * std::min(dx[0], dx[1]) / PhysConst::c; -#else - deltat = cfl * std::min(dx[0], std::min(dx[1], dx[2])) / PhysConst::c; -#endif + deltat = cfl * minDim(dx) / PhysConst::c; } else { // Computation of dt for FDTD algorithm #ifdef WARPX_DIM_RZ @@ -99,6 +101,40 @@ WarpX::ComputeDt () } } +/** + * Determine the simulation timestep from the maximum speed of all particles + * Sets timestep so that a particle can only cross cfl*dx cells per timestep. + */ +void +WarpX::UpdateDtFromParticleSpeeds () +{ + const amrex::Real* dx = geom[max_level].CellSize(); + const amrex::Real dx_min = minDim(dx); + + const amrex::ParticleReal max_v = mypc->maxParticleVelocity(); + amrex::Real deltat_new = 0.; + + // Protections from overly-large timesteps + if (max_v == 0) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_max_dt.has_value(), "Particles at rest and no constant or maximum timestep specified. Aborting."); + deltat_new = m_max_dt.value(); + } else { + deltat_new = cfl * dx_min / max_v; + } + + // Restrict to be less than user-specified maximum timestep, if present + if (m_max_dt.has_value()) { + deltat_new = std::min(deltat_new, m_max_dt.value()); + } + + // Update dt + dt[max_level] = deltat_new; + + for (int lev = max_level-1; lev >= 0; --lev) { + dt[lev] = dt[lev+1] * refRatio(lev)[0]; + } +} + void WarpX::PrintDtDxDyDz () { diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index c668eac2e26..9acbe734405 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -60,6 +60,27 @@ using namespace amrex; using ablastr::utils::SignalHandling; +void +WarpX::Synchronize () { + FillBoundaryE(guard_cells.ng_FieldGather); + FillBoundaryB(guard_cells.ng_FieldGather); + if (fft_do_time_averaging) + { + FillBoundaryE_avg(guard_cells.ng_FieldGather); + FillBoundaryB_avg(guard_cells.ng_FieldGather); + } + UpdateAuxilaryData(); + FillBoundaryAux(guard_cells.ng_UpdateAux); + for (int lev = 0; lev <= finest_level; ++lev) { + mypc->PushP(lev, 0.5_rt*dt[lev], + *Efield_aux[lev][0],*Efield_aux[lev][1], + *Efield_aux[lev][2], + *Bfield_aux[lev][0],*Bfield_aux[lev][1], + *Bfield_aux[lev][2]); + } + is_synchronized = true; +} + void WarpX::Evolve (int numsteps) { @@ -95,6 +116,18 @@ WarpX::Evolve (int numsteps) CheckLoadBalance(step); + // Update timestep for electrostatic solver if a constant dt is not provided + // This first synchronizes the position and velocity before setting the new timestep + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None && + !m_const_dt.has_value() && dt_update_interval.contains(step+1)) { + if (verbose) { + amrex::Print() << Utils::TextMsg::Info("updating timestep"); + } + Synchronize(); + UpdateDtFromParticleSpeeds(); + } + + // If position and velocity are synchronized, push velocity backward one half step if (evolve_scheme == EvolveScheme::Explicit) { ExplicitFillBoundaryEBUpdateAux(); @@ -175,25 +208,9 @@ WarpX::Evolve (int numsteps) // TODO: move out if (evolve_scheme == EvolveScheme::Explicit) { + // At the end of last step, push p by 0.5*dt to synchronize if (cur_time + dt[0] >= stop_time - 1.e-3*dt[0] || step == numsteps_max-1) { - // At the end of last step, push p by 0.5*dt to synchronize - FillBoundaryE(guard_cells.ng_FieldGather); - FillBoundaryB(guard_cells.ng_FieldGather); - if (fft_do_time_averaging) - { - FillBoundaryE_avg(guard_cells.ng_FieldGather); - FillBoundaryB_avg(guard_cells.ng_FieldGather); - } - UpdateAuxilaryData(); - FillBoundaryAux(guard_cells.ng_UpdateAux); - for (int lev = 0; lev <= finest_level; ++lev) { - mypc->PushP(lev, 0.5_rt*dt[lev], - *Efield_aux[lev][0],*Efield_aux[lev][1], - *Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1], - *Bfield_aux[lev][2]); - } - is_synchronized = true; + Synchronize(); } } @@ -445,7 +462,7 @@ void WarpX::checkEarlyUnusedParams () void WarpX::ExplicitFillBoundaryEBUpdateAux () { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(evolve_scheme == EvolveScheme::Explicit, - "Cannot call WarpX::ExplicitFillBoundaryEBUpdateAux wihtout Explicit evolve scheme set!"); + "Cannot call WarpX::ExplicitFillBoundaryEBUpdateAux without Explicit evolve scheme set!"); // At the beginning, we have B^{n} and E^{n}. // Particles have p^{n} and x^{n}. diff --git a/Source/WarpX.H b/Source/WarpX.H index 28bb6215a45..5065fa73ff9 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -117,6 +117,11 @@ public: void Evolve (int numsteps = -1); + /** Push momentum one half step forward to synchronize with position. + * Also sets is_synchronized to `true`. + */ + void Synchronize (); + // // Functions used by implicit solvers // @@ -606,6 +611,12 @@ public: /** Determine the timestep of the simulation. */ void ComputeDt (); + /** + * Determine the simulation timestep from the maximum speed of all particles + * Sets timestep so that a particle can only cross cfl*dx cells per timestep. + */ + void UpdateDtFromParticleSpeeds (); + /** Print main PIC parameters to stdout */ void PrintMainPICparameters (); @@ -1482,6 +1493,7 @@ private: amrex::Vector t_new; amrex::Vector t_old; amrex::Vector dt; + static utils::parser::IntervalsParser dt_update_interval; // How often to update the timestep when using adaptive timestepping // Particle container std::unique_ptr mypc; @@ -1643,7 +1655,9 @@ private: int num_injected_species = -1; amrex::Vector injected_plasma_species; + // Timestepping parameters std::optional m_const_dt; + std::optional m_max_dt; // Macroscopic properties std::unique_ptr m_macroscopic_properties; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index ef1668de4c0..e9c518e8f61 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -182,6 +182,8 @@ bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; bool WarpX::safe_guard_cells = false; +utils::parser::IntervalsParser WarpX::dt_update_interval; + std::map WarpX::multifab_map; std::map WarpX::imultifab_map; @@ -775,7 +777,12 @@ WarpX::ReadParameters () pp_boundary.query("verboncoeur_axis_correction", verboncoeur_axis_correction); #endif + // Read timestepping options utils::parser::queryWithParser(pp_warpx, "const_dt", m_const_dt); + utils::parser::queryWithParser(pp_warpx, "max_dt", m_max_dt); + std::vector dt_interval_vec = {"-1"}; + pp_warpx.queryarr("dt_update_interval", dt_interval_vec); + dt_update_interval = utils::parser::IntervalsParser(dt_interval_vec); // Filter currently not working with FDTD solver in RZ geometry: turn OFF by default // (see https://github.com/ECP-WarpX/WarpX/issues/1943) From cb5d0c80a0e29d2318c61303670b581c9a484bb2 Mon Sep 17 00:00:00 2001 From: Alfred Mishi <140518333+Haavaan@users.noreply.github.com> Date: Wed, 18 Sep 2024 19:38:55 +0200 Subject: [PATCH 61/91] Integrated Green's Function Poisson Solver (#4937) * [WIP]Integrated Green Function Poisson Solver * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated inputs * Modified IGF.cpp file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Latest IGF.cpp * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed the IGF file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * IGF final * IGF file final * Added BL_PROFILE in IGF file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated IntegratedGreenFunctionSolver * Check Compilation Flags * Added Multiplication * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed inputs * Update Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp Co-authored-by: Remi Lehe * change import orders * fix macros and add timers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update checksums and changed ifdefs * mod GNUmake * fix GNUmakefile for real * heffte ctest * fix test input name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp * Start refactoring * Continue refactoring * Continue refactoring * Finish refactoring * Fix unused variable * Fix compilation without heFFTe * Fix bugs without heFFTe * Remove unneeded ifdef * Update checksum --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Alfred Haavaan Mishi Co-authored-by: Alfred Haavaan Mishi Co-authored-by: Alfred Haavaan Mishi Co-authored-by: Arianna Formenti Co-authored-by: Remi Lehe --- .../open_bc_poisson_solver/CMakeLists.txt | 12 + ...puts_test_3d_open_bc_poisson_solver_heffte | 1 + GNUmakefile | 1 + .../test_3d_open_bc_poisson_solver.json | 20 +- .../fields/IntegratedGreenFunctionSolver.H | 31 +++ .../fields/IntegratedGreenFunctionSolver.cpp | 258 ++++++++++++------ 6 files changed, 232 insertions(+), 91 deletions(-) create mode 100644 Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index c5ec4583da1..d6141f0b4ab 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -12,3 +12,15 @@ if(WarpX_FFT) OFF # dependency ) endif() + +if(WarpX_HEFFTE) + add_warpx_test( + test_3d_open_bc_poisson_solver_heffte # name + 3 # dims + 2 # nprocs + inputs_test_3d_open_bc_poisson_solver_heffte # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte new file mode 100644 index 00000000000..4f0a50df037 --- /dev/null +++ b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte @@ -0,0 +1 @@ +FILE = inputs_test_3d_open_bc_poisson_solver diff --git a/GNUmakefile b/GNUmakefile index 86bdab2709f..fe10983b780 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -38,6 +38,7 @@ USE_OPENPMD = FALSE WarpxBinDir = Bin USE_FFT = FALSE +USE_HEFFTE = FALSE USE_RZ = FALSE USE_EB = FALSE diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index 0453481ec60..0ca6bde570a 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 100915975.15792876, - "By": 157610677.31483692, - "Bz": 2.404060922276648e-13, - "Ex": 4.725066923361703e+16, - "Ey": 3.0253961494347724e+16, - "Ez": 3276584.4383433666, + "Bx": 100915975.15403552, + "By": 157610677.3147734, + "Bz": 1.2276713711194638e-13, + "Ex": 4.725066923359797e+16, + "Ey": 3.025396149317578e+16, + "Ez": 3276584.4383433824, "rho": 10994013582437.197 }, "electron": { - "particle_momentum_x": 5.701279599504008e-19, - "particle_momentum_y": 3.650453172860547e-19, + "particle_momentum_x": 5.701279599509506e-19, + "particle_momentum_y": 3.650453172383178e-19, "particle_momentum_z": 1.145432768297242e-10, "particle_position_x": 17.31408691249785, - "particle_position_y": 0.2583691267187801, + "particle_position_y": 0.25836912671878015, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } -} +} \ No newline at end of file diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H index 97ffdb5ac36..28885e167a3 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H @@ -7,6 +7,8 @@ #ifndef ABLASTR_IGF_SOLVER_H #define ABLASTR_IGF_SOLVER_H +#include + #include #include #include @@ -47,6 +49,35 @@ namespace ablastr::fields return G; } + /** @brief add + * + * @param[in] x x-coordinate of given location + * @param[in] y y-coordinate of given location + * @param[in] z z-coordinate of given location + * + * @return the sum of integrated Green function G + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real + SumOfIntegratedPotential (amrex::Real x, amrex::Real y, amrex::Real z, amrex::Real dx, amrex::Real dy, amrex::Real dz) + { + using namespace amrex::literals; + + + amrex::Real const G_value = 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( + IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + + IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + - IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + ); + + return G_value; + } + /** @brief Compute the electrostatic potential using the Integrated Green Function method * as in http://dx.doi.org/10.1103/PhysRevSTAB.9.044204 * diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 1aeee9d81d2..5b9aa940d6a 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -27,7 +27,9 @@ #include #include -#include +#if defined(ABLASTR_USE_FFT) && defined(ABLASTR_USE_HEFFTE) +#include +#endif namespace ablastr::fields { @@ -36,10 +38,16 @@ void computePhiIGF ( amrex::MultiFab const & rho, amrex::MultiFab & phi, std::array const & cell_size, - amrex::BoxArray const & ba ) + amrex::BoxArray const & ba) { using namespace amrex::literals; + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFTs", timer_ffts); + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFT plans", timer_plans); + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: parallel copies", timer_pcopies); + + BL_PROFILE("ablastr::fields::computePhiIGF"); + // Define box that encompasses the full domain amrex::Box domain = ba.minimalBox(); domain.surroundingNodes(); // get nodal points, since `phi` and `rho` are nodal @@ -50,41 +58,87 @@ computePhiIGF ( amrex::MultiFab const & rho, int const nz = domain.length(2); // Allocate 2x wider arrays for the convolution of rho with the Green function - // This also defines the box arrays for the global FFT: contains only one box; amrex::Box const realspace_box = amrex::Box( {domain.smallEnd(0), domain.smallEnd(1), domain.smallEnd(2)}, {2*nx-1+domain.smallEnd(0), 2*ny-1+domain.smallEnd(1), 2*nz-1+domain.smallEnd(2)}, amrex::IntVect::TheNodeVector() ); + +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + // allocate the 2x wider array on a single box amrex::BoxArray const realspace_ba = amrex::BoxArray( realspace_box ); - amrex::Box const spectralspace_box = amrex::Box( - {0,0,0}, - {nx, 2*ny-1, 2*nz-1}, - amrex::IntVect::TheNodeVector() ); - amrex::BoxArray const spectralspace_ba = amrex::BoxArray( spectralspace_box ); // Define a distribution mapping for the global FFT, with only one box amrex::DistributionMapping dm_global_fft; dm_global_fft.define( realspace_ba ); +#elif defined(ABLASTR_USE_HEFFTE) + // With distributed FFTs (i.e. with heFFTe): + // Define a new distribution mapping which is decomposed purely along z + // and has one box per MPI rank + int const nprocs = amrex::ParallelDescriptor::NProcs(); + amrex::BoxArray realspace_ba; + amrex::DistributionMapping dm_global_fft; + { + int realspace_nx = realspace_box.length(0); + int realspace_ny = realspace_box.length(1); + int realspace_nz = realspace_box.length(2); + int minsize_z = realspace_nz / nprocs; + int nleft_z = realspace_nz - minsize_z*nprocs; + + AMREX_ALWAYS_ASSERT(realspace_nz >= nprocs); + // We are going to split realspace_box in such a way that the first + // nleft boxes has minsize_z+1 nodes and the others minsize + // nodes. We do it this way instead of BoxArray::maxSize to make + // sure there are exactly nprocs boxes and there are no overlaps. + amrex::BoxList bl(amrex::IndexType::TheNodeType()); + for (int iproc = 0; iproc < nprocs; ++iproc) { + int zlo, zhi; + if (iproc < nleft_z) { + zlo = iproc*(minsize_z+1); + zhi = zlo + minsize_z; + + } else { + zlo = iproc*minsize_z + nleft_z; + zhi = zlo + minsize_z - 1; + + } + amrex::Box tbx(amrex::IntVect(0,0,zlo),amrex::IntVect(realspace_nx-1,realspace_ny-1,zhi),amrex::IntVect(1)); + + tbx.shift(realspace_box.smallEnd()); + bl.push_back(tbx); + } + realspace_ba.define(std::move(bl)); + amrex::Vector pmap(nprocs); + std::iota(pmap.begin(), pmap.end(), 0); + dm_global_fft.define(std::move(pmap)); + } +#endif + // Allocate required arrays amrex::MultiFab tmp_rho = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); tmp_rho.setVal(0); amrex::MultiFab tmp_G = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); tmp_G.setVal(0); - // Allocate corresponding arrays in Fourier space - using SpectralField = amrex::FabArray< amrex::BaseFab< amrex::GpuComplex< amrex::Real > > >; - SpectralField tmp_rho_fft = SpectralField( spectralspace_ba, dm_global_fft, 1, 0 ); - SpectralField tmp_G_fft = SpectralField( spectralspace_ba, dm_global_fft, 1, 0 ); - // Copy from rho to tmp_rho + BL_PROFILE_VAR_START(timer_pcopies); + // Copy from rho including its ghost cells to tmp_rho tmp_rho.ParallelCopy( rho, 0, 0, 1, rho.nGrowVect(), amrex::IntVect::TheZeroVector() ); + BL_PROFILE_VAR_STOP(timer_pcopies); + +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + // We loop over the original box (not the 2x wider one), and the other quadrants by periodicity + amrex::BoxArray const& igf_compute_box = amrex::BoxArray( domain ); +#else + // With distributed FFTs (i.e. with heFFTe): + // We loop over the full 2x wider box, since 1 MPI rank does not necessarily own the data for the other quadrants + amrex::BoxArray const& igf_compute_box = tmp_G.boxArray(); +#endif // Compute the integrated Green function - { - BL_PROFILE("Initialize Green function"); - amrex::BoxArray const domain_ba = amrex::BoxArray( domain ); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (amrex::MFIter mfi(domain_ba, dm_global_fft,amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(igf_compute_box, dm_global_fft, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { amrex::Box const bx = mfi.tilebox(); @@ -95,6 +149,7 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const dx = cell_size[0]; amrex::Real const dy = cell_size[1]; amrex::Real const dz = cell_size[2]; + amrex::Array4 const tmp_G_arr = tmp_G.array(mfi); amrex::ParallelFor( bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept @@ -106,17 +161,9 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const y = j0*dy; amrex::Real const z = k0*dz; - amrex::Real const G_value = 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - ); - +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + amrex::Real const G_value = SumOfIntegratedPotential(x , y , z , dx, dy, dz); tmp_G_arr(i,j,k) = G_value; // Fill the rest of the array by periodicity if (i0>0) {tmp_G_arr(hi[0]+1-i0, j , k ) = G_value;} @@ -126,71 +173,120 @@ computePhiIGF ( amrex::MultiFab const & rho, if ((j0>0)&&(k0>0)) {tmp_G_arr(i , hi[1]+1-j0, hi[2]+1-k0) = G_value;} if ((i0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, j , hi[2]+1-k0) = G_value;} if ((i0>0)&&(j0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, hi[1]+1-j0, hi[2]+1-k0) = G_value;} - } - ); - } +#else + // With distributed FFTs (i.e. with heFFTe): + amrex::Real x_hi = dx*(hi[0]+2); + amrex::Real y_hi = dy*(hi[1]+2); + amrex::Real z_hi = dz*(hi[2]+2); + if ((i0< nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z , dx, dy, dz); } + if ((i0< nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z , dx, dy, dz); } + if ((i0< nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z , dx, dy, dz); } + if ((i0< nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z , dx, dy, dz); } +#endif + } + ); } - // Perform forward FFTs - auto forward_plan_rho = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - auto forward_plan_G = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - // Loop over boxes perform FFTs - for ( amrex::MFIter mfi(realspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ - - // Note: the size of the real-space box and spectral-space box - // differ when using real-to-complex FFT. When initializing - // the FFT plan, the valid dimensions are those of the real-space box. - const amrex::IntVect fft_size = realspace_ba[mfi].length(); - - // FFT of rho - forward_plan_rho[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_rho[mfi].dataPtr(), - reinterpret_cast(tmp_rho_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(forward_plan_rho[mfi]); + // Prepare to perform global FFT + // Since there is 1 MPI rank per box, here each MPI rank obtains its local box and the associated boxid + int local_boxid = amrex::ParallelDescriptor::MyProc(); // because of how we made the DistributionMapping + if (local_boxid < realspace_ba.size()) { + // When not using heFFTe, there is only one box (the global box) + // It is taken care of my MPI rank 0 ; other ranks have no work (hence the if condition) - // FFT of G - forward_plan_G[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[mfi].dataPtr(), - reinterpret_cast(tmp_G_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(forward_plan_G[mfi]); + amrex::Box local_nodal_box = realspace_ba[local_boxid]; + amrex::Box local_box(local_nodal_box.smallEnd(), local_nodal_box.bigEnd()); + local_box.shift(-realspace_box.smallEnd()); // This simplifies the setup because the global lo is zero now + // Since we the domain decompostion is in the z-direction, setting up c_local_box is simple. + amrex::Box c_local_box = local_box; + c_local_box.setBig(0, local_box.length(0)/2+1); - } + // Allocate array in spectral space + using SpectralField = amrex::BaseFab< amrex::GpuComplex< amrex::Real > > ; + SpectralField tmp_rho_fft(c_local_box, 1, amrex::The_Device_Arena()); + SpectralField tmp_G_fft(c_local_box, 1, amrex::The_Device_Arena()); + tmp_rho_fft.shift(realspace_box.smallEnd()); + tmp_G_fft.shift(realspace_box.smallEnd()); - // Multiply tmp_G_fft and tmp_rho_fft in spectral space - // Store the result in-place in Gtmp_G_fft, to save memory - amrex::Multiply( tmp_G_fft, tmp_rho_fft, 0, 0, 1, 0); + // Create FFT plans + BL_PROFILE_VAR_START(timer_plans); +#if !defined(ABLASTR_USE_HEFFTE) + const amrex::IntVect fft_size = realspace_ba[local_boxid].length(); + ablastr::math::anyfft::FFTplan forward_plan_rho = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_rho[local_boxid].dataPtr(), + reinterpret_cast(tmp_rho_fft.dataPtr()), + ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); + ablastr::math::anyfft::FFTplan forward_plan_G = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_G[local_boxid].dataPtr(), + reinterpret_cast(tmp_G_fft.dataPtr()), + ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); + ablastr::math::anyfft::FFTplan backward_plan = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_G[local_boxid].dataPtr(), + reinterpret_cast( tmp_G_fft.dataPtr()), + ablastr::math::anyfft::direction::C2R, AMREX_SPACEDIM); +#elif defined(ABLASTR_USE_HEFFTE) +#if defined(AMREX_USE_CUDA) + heffte::fft3d_r2c fft +#elif defined(AMREX_USE_HIP) + heffte::fft3d_r2c fft +#else + heffte::fft3d_r2c fft +#endif + ({{local_box.smallEnd(0), local_box.smallEnd(1), local_box.smallEnd(2)}, + {local_box.bigEnd(0), local_box.bigEnd(1), local_box.bigEnd(2)}}, + {{c_local_box.smallEnd(0), c_local_box.smallEnd(1), c_local_box.smallEnd(2)}, + {c_local_box.bigEnd(0), c_local_box.bigEnd(1), c_local_box.bigEnd(2)}}, + 0, amrex::ParallelDescriptor::Communicator()); + using heffte_complex = typename heffte::fft_output::type; + heffte_complex* rho_fft_data = (heffte_complex*) tmp_rho_fft.dataPtr(); + heffte_complex* G_fft_data = (heffte_complex*) tmp_G_fft.dataPtr(); +#endif + BL_PROFILE_VAR_STOP(timer_plans); - // Perform inverse FFT - auto backward_plan = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - // Loop over boxes perform FFTs - for ( amrex::MFIter mfi(spectralspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ + // Perform forward FFTs + BL_PROFILE_VAR_START(timer_ffts); +#if !defined(ABLASTR_USE_HEFFTE) + ablastr::math::anyfft::Execute(forward_plan_rho); + ablastr::math::anyfft::Execute(forward_plan_G); +#elif defined(ABLASTR_USE_HEFFTE) + fft.forward(tmp_rho[local_boxid].dataPtr(), rho_fft_data); + fft.forward(tmp_G[local_boxid].dataPtr(), G_fft_data); +#endif + BL_PROFILE_VAR_STOP(timer_ffts); - // Note: the size of the real-space box and spectral-space box - // differ when using real-to-complex FFT. When initializing - // the FFT plan, the valid dimensions are those of the real-space box. - const amrex::IntVect fft_size = realspace_ba[mfi].length(); + // Multiply tmp_G_fft and tmp_rho_fft in spectral space + // Store the result in-place in Gtmp_G_fft, to save memory + tmp_G_fft.template mult(tmp_rho_fft, 0, 0, 1); + amrex::Gpu::streamSynchronize(); - // Inverse FFT: is done in-place, in the array of G - backward_plan[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[mfi].dataPtr(), - reinterpret_cast( tmp_G_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::C2R, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(backward_plan[mfi]); + // Perform backward FFT + BL_PROFILE_VAR_START(timer_ffts); +#if !defined(ABLASTR_USE_HEFFTE) + ablastr::math::anyfft::Execute(backward_plan); +#elif defined(ABLASTR_USE_HEFFTE) + fft.backward(G_fft_data, tmp_G[local_boxid].dataPtr()); +#endif + BL_PROFILE_VAR_STOP(timer_ffts); + +#if !defined(ABLASTR_USE_HEFFTE) + // Loop to destroy FFT plans + ablastr::math::anyfft::DestroyPlan(forward_plan_G); + ablastr::math::anyfft::DestroyPlan(forward_plan_rho); + ablastr::math::anyfft::DestroyPlan(backward_plan); +#endif } - // Normalize, since (FFT + inverse FFT) results in a factor N + + // Normalize, since (FFT + inverse FFT) results in a factor N const amrex::Real normalization = 1._rt / realspace_box.numPts(); tmp_G.mult( normalization ); + BL_PROFILE_VAR_START(timer_pcopies); // Copy from tmp_G to phi - phi.ParallelCopy( tmp_G, 0, 0, 1, amrex::IntVect::TheZeroVector(), phi.nGrowVect() ); - - // Loop to destroy FFT plans - for ( amrex::MFIter mfi(spectralspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ - ablastr::math::anyfft::DestroyPlan(forward_plan_G[mfi]); - ablastr::math::anyfft::DestroyPlan(forward_plan_rho[mfi]); - ablastr::math::anyfft::DestroyPlan(backward_plan[mfi]); - } + phi.ParallelCopy( tmp_G, 0, 0, 1, amrex::IntVect::TheZeroVector(), phi.nGrowVect()); + BL_PROFILE_VAR_STOP(timer_pcopies); } } // namespace ablastr::fields From d85cc04e280be177e7f73d3a6e8934d2bd886072 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 19 Sep 2024 00:50:09 -0700 Subject: [PATCH 62/91] Fix clang-tidy errors in `development` branch (#5296) --- Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 5b9aa940d6a..ae11ad5087d 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -193,12 +193,12 @@ computePhiIGF ( amrex::MultiFab const & rho, // Prepare to perform global FFT // Since there is 1 MPI rank per box, here each MPI rank obtains its local box and the associated boxid - int local_boxid = amrex::ParallelDescriptor::MyProc(); // because of how we made the DistributionMapping + const int local_boxid = amrex::ParallelDescriptor::MyProc(); // because of how we made the DistributionMapping if (local_boxid < realspace_ba.size()) { // When not using heFFTe, there is only one box (the global box) // It is taken care of my MPI rank 0 ; other ranks have no work (hence the if condition) - amrex::Box local_nodal_box = realspace_ba[local_boxid]; + const amrex::Box local_nodal_box = realspace_ba[local_boxid]; amrex::Box local_box(local_nodal_box.smallEnd(), local_nodal_box.bigEnd()); local_box.shift(-realspace_box.smallEnd()); // This simplifies the setup because the global lo is zero now // Since we the domain decompostion is in the z-direction, setting up c_local_box is simple. From 180245e170d43bc7b81b89f3ebabd33f804c5b13 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Thu, 19 Sep 2024 15:54:02 +0200 Subject: [PATCH 63/91] Docs: add missing references in Science Highlights (#5288) * add missing references * add missing space --- Docs/source/highlights.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 66570644bdc..09156072cad 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -90,6 +90,11 @@ Scientific works in laser-ion acceleration and laser-matter interaction. Physical Review Research **6**, 033148, 2024. `DOI:10.1103/PhysRevResearch.6.033148 `__ +#. Zaïm N, Sainte-Marie A, Fedeli L, Bartoli P, Huebl A, Leblanc A, Vay J-L, Vincenti H. + **Light-matter interaction near the Schwinger limit using tightly focused doppler-boosted lasers**. + Physical Review Letters **132**, 175002, 2024. + `DOI:10.1103/PhysRevLett.132.175002 `__ + #. Knight B, Gautam C, Stoner C, Egner B, Smith J, Orban C, Manfredi J, Frische K, Dexter M, Chowdhury E, Patnaik A (2023). **Detailed Characterization of a kHz-rate Laser-Driven Fusion at a Thin Liquid Sheet with a Neutron Detection Suite**. High Power Laser Science and Engineering, 1-13, 2023. @@ -110,6 +115,11 @@ Scientific works in laser-ion acceleration and laser-matter interaction. Phys. Rev. Accel. Beams **25**, 093402, 2022. `DOI:10.1103/PhysRevAccelBeams.25.093402 `__ +#. Fedeli L, Sainte-Marie A, Zaïm N, Thévenet M, Vay J-L, Myers A, Quéré F, Vincenti H. + **Probing strong-field QED with Doppler-boosted PetaWatt-class lasers**. + Physical Review Letters **127**, 114801, 2021. + `DOI:10.1103/PhysRevLett.127.114801 `__ + Particle Accelerator & Beam Physics *********************************** From 7e22ae76f753998bab07d5af33f0f93b0ed854ad Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 20 Sep 2024 10:39:13 -0700 Subject: [PATCH 64/91] AMReX: Weekly Update (#5298) --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index a1e7f5affda..28bfaaf57a7 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 028638564f7be0694b9898f8d4088cdbf9a6f9f5 && cd - + cd ../amrex && git checkout --detach 3734079379bb6b2a3850d197241f6b2c3b3bfa7d && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index e3682b69ff5..72642b575e8 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "028638564f7be0694b9898f8d4088cdbf9a6f9f5" +set(WarpX_amrex_branch "3734079379bb6b2a3850d197241f6b2c3b3bfa7d" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 58b024ae8a2dec0afa786b0fdb50dcbf2ee9a386 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 09:36:02 -0700 Subject: [PATCH 65/91] Doc: HPC3 has CMake 3.30.2 (#5300) HPC3 (UCI) now has a modern CMake module. Use this instead of installing our own. --- Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example | 2 +- Tools/machines/hpc3-uci/install_gpu_dependencies.sh | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example index 27b6a59592e..970dc980347 100644 --- a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example +++ b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example @@ -6,7 +6,7 @@ export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies -module load cmake/3.22.1 # we need 3.24+ - installing via pipx until module is available +module load cmake/3.30.2 module load gcc/11.2.0 module load cuda/11.7.1 module load openmpi/4.1.2/gcc.11.2.0 diff --git a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh index 56f2bff4025..c4c31dd4066 100755 --- a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh +++ b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh @@ -119,7 +119,6 @@ python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel python3 -m pip install --upgrade setuptools python3 -m pip install --upgrade pipx -python3 -m pipx install --upgrade cmake python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas From 1da2ea0792c65777016fec25081744fc65664b60 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 09:37:25 -0700 Subject: [PATCH 66/91] LUMI (CSC): September Upgrade (#5301) Update the LUMI documentation to the latest major system upgrade from last week. --- Tools/machines/lumi-csc/lumi_warpx.profile.example | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Tools/machines/lumi-csc/lumi_warpx.profile.example b/Tools/machines/lumi-csc/lumi_warpx.profile.example index 13fb6b1d81e..915f976f4ab 100644 --- a/Tools/machines/lumi-csc/lumi_warpx.profile.example +++ b/Tools/machines/lumi-csc/lumi_warpx.profile.example @@ -2,9 +2,9 @@ #export proj="project_..." # required dependencies -module load LUMI/23.09 partition/G -module load rocm/5.2.3 # waiting for 5.5 for next bump -module load buildtools/23.09 +module load LUMI/24.03 partition/G +module load rocm/6.0.3 +module load buildtools/24.03 # optional: just an additional text editor module load nano @@ -27,7 +27,7 @@ export PATH=${SW_DIR}/hdf5-1.14.1.2/bin:${PATH} export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: for Python bindings or libEnsemble -module load cray-python/3.10.10 +module load cray-python/3.11.7 if [ -d "${SW_DIR}/venvs/warpx-lumi" ] then From 27a72489da7a08e3d88669c0b4afc4c5fef3c918 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 16:06:49 -0700 Subject: [PATCH 67/91] `magnetostatic_eb` test: fix warnings (#5302) Fix observed warnings for latex strings, use raw strings. --- .../magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py | 4 ++-- .../magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py | 4 ++-- Python/pywarpx/WarpX.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py index d3c35daf261..ff450b92bc7 100755 --- a/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py @@ -225,7 +225,7 @@ def Er_an(r): er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel("$E_r$ (V/m)") +plt.ylabel(r"$E_r$ (V/m)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() @@ -298,7 +298,7 @@ def Bt_an(r): bt_err = np.abs(Bt_mean[r_idx] - Bt_an(r_sub)).max() / np.abs(Bt_an(r_sub)).max() -plt.ylabel("$B_{\Theta}$ (T)") +plt.ylabel(r"$B_{\Theta}$ (T)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(bt_err * 100.0)) plt.tight_layout() diff --git a/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py index d0f1787a5a2..ff7767181f4 100755 --- a/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py @@ -195,7 +195,7 @@ def Er_an(r): er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel("$E_r$ (V/m)") +plt.ylabel(r"$E_r$ (V/m)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() @@ -246,7 +246,7 @@ def Bth_an(r): bth_err = np.abs(Bth_mean[r_idx] - Bth_an(r_sub)).max() / np.abs(Bth_an(r_sub)).max() -plt.ylabel("$B_{\Theta}$ (T)") +plt.ylabel(r"$B_{\Theta}$ (T)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(bth_err * 100.0)) plt.tight_layout() diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 9b3aaa27636..9ef7019cda9 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -137,7 +137,7 @@ def write_inputs(self, filename="inputs", **kw): for arg in argv: # This prints the name of the input group (prefix) as a header # before each group to make the input file more human readable - prefix_new = re.split(" |\.", arg)[0] + prefix_new = re.split(r" |\.", arg)[0] if prefix_new != prefix_old: if prefix_old != "": ff.write("\n") From d0c304016b6a0e3199a15de35cfa93d5062d9e8d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 16:13:18 -0700 Subject: [PATCH 68/91] Docs: Lonestar6 GPUs (TACC) (#3673) Add a documentation on how to run on Lonestar6 GPUs (TACC). --- Docs/source/install/hpc.rst | 1 + Docs/source/install/hpc/lonestar6.rst | 139 +++++++++++++++ .../install_a100_dependencies.sh | 168 ++++++++++++++++++ .../lonestar6-tacc/lonestar6_a100.sbatch | 41 +++++ .../lonestar6_warpx_a100.profile.example | 59 ++++++ 5 files changed, 408 insertions(+) create mode 100644 Docs/source/install/hpc/lonestar6.rst create mode 100755 Tools/machines/lonestar6-tacc/install_a100_dependencies.sh create mode 100644 Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch create mode 100644 Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index af4c0fe3e61..35884050a59 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -43,6 +43,7 @@ This section documents quick-start guides for a selection of supercomputers that hpc/lassen hpc/lawrencium hpc/leonardo + hpc/lonestar6 hpc/lumi hpc/lxplus hpc/ookami diff --git a/Docs/source/install/hpc/lonestar6.rst b/Docs/source/install/hpc/lonestar6.rst new file mode 100644 index 00000000000..81795545da3 --- /dev/null +++ b/Docs/source/install/hpc/lonestar6.rst @@ -0,0 +1,139 @@ +.. _building-lonestar6: + +Lonestar6 (TACC) +================ + +The `Lonestar6 cluster `_ is located at `TACC `__. + + +Introduction +------------ + +If you are new to this system, **please see the following resources**: + +* `TACC user guide `__ +* Batch system: `Slurm `__ +* `Jupyter service `__ +* `Filesystem directories `__: + + * ``$HOME``: per-user home directory, backed up (10 GB) + * ``$WORK``: per-user production directory, not backed up, not purged, Lustre (1 TB) + * ``$SCRATCH``: per-user production directory, not backed up, purged every 10 days, Lustre (no limits, 8PByte total) + + +Installation +------------ + +Use the following commands to download the WarpX source code and switch to the correct branch: + +.. code-block:: bash + + git clone https://github.com/ECP-WarpX/WarpX.git $WORK/src/warpx + +We use system software modules, add environment hints and further dependencies via the file ``$HOME/lonestar6_warpx_a100.profile``. +Create it now: + +.. code-block:: bash + + cp $HOME/src/warpx/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example $HOME/lonestar6_warpx_a100.profile + +.. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example + :language: bash + +Edit the 2nd line of this script, which sets the ``export proj=""`` variable. +For example, if you are member of the project ``abcde``, then run ``nano $HOME/lonestar6_warpx_a100.profile`` and edit line 2 to read: + +.. code-block:: bash + + export proj="abcde" + +Exit the ``nano`` editor with ``Ctrl`` + ``O`` (save) and then ``Ctrl`` + ``X`` (exit). + +.. important:: + + Now, and as the first step on future logins to Lonestar6, activate these environment settings: + + .. code-block:: bash + + source $HOME/lonestar6_warpx_a100.profile + +Finally, since Lonestar6 does not yet provide software modules for some of our dependencies, install them once: + +.. code-block:: bash + + bash $HOME/src/warpx/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh + source ${SW_DIR}/venvs/warpx-a100/bin/activate + +.. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/install_a100_dependencies.sh + :language: bash + + +.. _building-lonestar6-compilation: + +Compilation +----------- + +Use the following :ref:`cmake commands ` to compile the application executable: + +.. code-block:: bash + + cd $HOME/src/warpx + rm -rf build_pm_gpu + + cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_gpu -j 16 + +The WarpX application executables are now in ``$HOME/src/warpx/build_gpu/bin/``. +Additionally, the following commands will install WarpX as a Python module: + +.. code-block:: bash + + cd $HOME/src/warpx + rm -rf build_pm_gpu_py + + cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_gpu_py -j 16 --target pip_install + +Now, you can :ref:`submit Lonestar6 compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). +Or, you can use the WarpX executables to submit Lonestar6 jobs (:ref:`example inputs `). +For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$WORK`` or ``$SCRATCH``. + + +.. _running-cpp-lonestar6: + +Running +------- + +.. _running-cpp-lonestar6-A100-GPUs: + +A100 GPUs (40 GB) +^^^^^^^^^^^^^^^^^ + +`84 GPU nodes, each with 2 A100 GPUs (40 GB) `__. + +The batch script below can be used to run a WarpX simulation on multiple nodes (change ``-N`` accordingly) on the supercomputer lonestar6 at tacc. +Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``plasma_mirror_inputs``. +Note that we run one MPI rank per GPU. + + +.. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch + :language: bash + :caption: You can copy this file from ``Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch``. + +To run a simulation, copy the lines above to a file ``lonestar6.sbatch`` and run + +.. code-block:: bash + + sbatch lonestar6_a100.sbatch + +to submit the job. diff --git a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh new file mode 100755 index 00000000000..cd29664a978 --- /dev/null +++ b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh @@ -0,0 +1,168 @@ +#!/bin/bash +# +# Copyright 2023 The WarpX Community +# +# This file is part of WarpX. +# +# Author: Axel Huebl +# License: BSD-3-Clause-LBNL + +# Exit on first error encountered ############################################# +# +set -eu -o pipefail + + +# Check: ###################################################################### +# +# Was lonestar6_warpx_a100.profile sourced and configured correctly? +if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your lonestar6_warpx_a100.profile file! Please edit its line 2 to continue!"; exit 1; fi + + +# Remove old dependencies ##################################################### +# +SW_DIR="${WORK}/sw/lonestar6/sw/lonestar6/a100" +rm -rf ${SW_DIR} +mkdir -p ${SW_DIR} + +# remove common user mistakes in python, located in .local instead of a venv +python3 -m pip uninstall -qq -y pywarpx +python3 -m pip uninstall -qq -y warpx +python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true + + +# General extra dependencies ################################################## +# + +# tmpfs build directory: avoids issues often seen with $HOME and is faster +build_dir=$(mktemp -d) + +# c-blosc (I/O compression) +if [ -d $HOME/src/c-blosc ] +then + cd $HOME/src/c-blosc + git fetch --prune + git checkout v1.21.1 + cd - +else + git clone -b v1.21.1 https://github.com/Blosc/c-blosc.git $HOME/src/c-blosc +fi +rm -rf $HOME/src/c-blosc-a100-build +cmake -S $HOME/src/c-blosc -B ${build_dir}/c-blosc-a100-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 +cmake --build ${build_dir}/c-blosc-a100-build --target install --parallel 16 +rm -rf ${build_dir}/c-blosc-a100-build + +# ADIOS2 +if [ -d $HOME/src/adios2 ] +then + cd $HOME/src/adios2 + git fetch --prune + git checkout v2.8.3 + cd - +else + git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 +fi +rm -rf $HOME/src/adios2-a100-build +cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-a100-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake --build ${build_dir}/adios2-a100-build --target install -j 16 +rm -rf ${build_dir}/adios2-a100-build + +# BLAS++ (for PSATD+RZ) +if [ -d $HOME/src/blaspp ] +then + cd $HOME/src/blaspp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git $HOME/src/blaspp +fi +rm -rf $HOME/src/blaspp-a100-build +cmake -S $HOME/src/blaspp -B ${build_dir}/blaspp-a100-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 +cmake --build ${build_dir}/blaspp-a100-build --target install --parallel 16 +rm -rf ${build_dir}/blaspp-a100-build + +# LAPACK++ (for PSATD+RZ) +if [ -d $HOME/src/lapackpp ] +then + cd $HOME/src/lapackpp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git $HOME/src/lapackpp +fi +rm -rf $HOME/src/lapackpp-a100-build +CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B ${build_dir}/lapackpp-a100-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 +cmake --build ${build_dir}/lapackpp-a100-build --target install --parallel 16 +rm -rf ${build_dir}/lapackpp-a100-build + +# heFFTe +if [ -d $HOME/src/heffte ] +then + cd $HOME/src/heffte + git fetch --prune + git checkout v2.4.0 + cd - +else + git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${HOME}/src/heffte +fi +rm -rf ${HOME}/src/heffte-a100-build +cmake \ + -S ${HOME}/src/heffte \ + -B ${build_dir}/heffte-a100-build \ + -DBUILD_SHARED_LIBS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ + -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ + -DHeffte_ENABLE_AVX=OFF \ + -DHeffte_ENABLE_AVX512=OFF \ + -DHeffte_ENABLE_FFTW=OFF \ + -DHeffte_ENABLE_CUDA=ON \ + -DHeffte_ENABLE_ROCM=OFF \ + -DHeffte_ENABLE_ONEAPI=OFF \ + -DHeffte_ENABLE_MKL=OFF \ + -DHeffte_ENABLE_DOXYGEN=OFF \ + -DHeffte_SEQUENTIAL_TESTING=OFF \ + -DHeffte_ENABLE_TESTING=OFF \ + -DHeffte_ENABLE_TRACING=OFF \ + -DHeffte_ENABLE_PYTHON=OFF \ + -DHeffte_ENABLE_FORTRAN=OFF \ + -DHeffte_ENABLE_SWIG=OFF \ + -DHeffte_ENABLE_MAGMA=OFF +cmake --build ${build_dir}/heffte-a100-build --target install --parallel 16 +rm -rf ${build_dir}/heffte-a100-build + + +# Python ###################################################################### +# +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade virtualenv +python3 -m pip cache purge +rm -rf ${SW_DIR}/venvs/warpx-a100 +python3 -m venv ${SW_DIR}/venvs/warpx-a100 +source ${SW_DIR}/venvs/warpx-a100/bin/activate +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade build +python3 -m pip install --upgrade packaging +python3 -m pip install --upgrade wheel +python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade cython +python3 -m pip install --upgrade numpy +python3 -m pip install --upgrade pandas +python3 -m pip install --upgrade scipy +python3 -m pip install --upgrade mpi4py --no-cache-dir --no-build-isolation --no-binary mpi4py +python3 -m pip install --upgrade openpmd-api +python3 -m pip install --upgrade matplotlib +python3 -m pip install --upgrade yt +# install or update WarpX dependencies +python3 -m pip install --upgrade -r $HOME/src/warpx/requirements.txt +#python3 -m pip install --upgrade cupy-cuda12x # CUDA 12 compatible wheel +# optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) +#python3 -m pip install --upgrade torch # CUDA 12 compatible wheel +#python3 -m pip install --upgrade optimas[all] + + +# remove build temporary directory +rm -rf ${build_dir} diff --git a/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch new file mode 100644 index 00000000000..bef40942ed6 --- /dev/null +++ b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch @@ -0,0 +1,41 @@ +#!/bin/bash -l + +# Copyright 2021-2022 Axel Huebl, Kevin Gott +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +#SBATCH -t 00:10:00 +#SBATCH -N 2 +#SBATCH -J WarpX +# note: must end on _g +#SBATCH -A +#SBATCH -q regular +#SBATCH -C gpu +#SBATCH --exclusive +#SBATCH --gpu-bind=none +#SBATCH --gpus-per-node=4 +#SBATCH -o WarpX.o%j +#SBATCH -e WarpX.e%j + +# executable & inputs file or python interpreter & PICMI script here +EXE=./warpx +INPUTS=inputs_small + +# pin to closest NIC to GPU +export MPICH_OFI_NIC_POLICY=GPU + +# threads for OpenMP and threaded compressors per MPI rank +export SRUN_CPUS_PER_TASK=32 + +# depends on https://github.com/ECP-WarpX/WarpX/issues/2009 +#GPU_AWARE_MPI="amrex.the_arena_is_managed=0 amrex.use_gpu_aware_mpi=1" +GPU_AWARE_MPI="" + +# CUDA visible devices are ordered inverse to local task IDs +# Reference: nvidia-smi topo -m +srun --cpu-bind=cores bash -c " + export CUDA_VISIBLE_DEVICES=\$((3-SLURM_LOCALID)); + ${EXE} ${INPUTS} ${GPU_AWARE_MPI}" \ + > output.txt diff --git a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example new file mode 100644 index 00000000000..148299f281c --- /dev/null +++ b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example @@ -0,0 +1,59 @@ +# please set your project account +#export proj="" # change me + +# required dependencies +module purge +module load TACC +module load gcc/11.2.0 +module load cuda/12.2 +module load cmake +module load mvapich2 + +# optional: for QED support with detailed tables +module load boost/1.84 + +# optional: for openPMD and PSATD+RZ support +module load phdf5/1.10.4 + +SW_DIR="${WORK}/sw/lonestar6/sw/lonestar6/a100" +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/heffte-2.4.0:${CMAKE_PREFIX_PATH} + +export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/heffte-2.4.0/lib64:$LD_LIBRARY_PATH + +export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} + +# optional: CCache +#module load ccache # TODO: request from support + +# optional: for Python bindings or libEnsemble +module load python3/3.9.7 + +if [ -d "$WORK/sw/lonestar6/a100/venvs/warpx-a100" ] +then + source $WORK/sw/lonestar6/a100/venvs/warpx-a100/bin/activate +fi + +# an alias to request an interactive batch node for one hour +# for parallel execution, start on the batch node: srun +alias getNode="salloc -N 1 --ntasks-per-node=2 -t 1:00:00 -p gpu-100 --gpu-bind=single:1 -c 32 -G 2 -A $proj" +# an alias to run a command on a batch node for up to 30min +# usage: runNode +alias runNode="srun -N 1 --ntasks-per-node=2 -t 0:30:00 -p gpu-100 --gpu-bind=single:1 -c 32 -G 2 -A $proj" + +# optimize CUDA compilation for A100 +export AMREX_CUDA_ARCH=8.0 + +# compiler environment hints +export CC=$(which gcc) +export CXX=$(which g++) +export FC=$(which gfortran) +export CUDACXX=$(which nvcc) +export CUDAHOSTCXX=${CXX} From 2e033c888b3d716de2f3701663ff37be2509f204 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 17:29:22 -0700 Subject: [PATCH 69/91] [pre-commit.ci] pre-commit autoupdate (#5306) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.5 → v0.6.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.5...v0.6.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1b668d5931e..ae8881150c9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.7 hooks: # Run the linter - id: ruff From 086b0a39c40e94829bc7a357e40d803ee02c3aac Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 18:00:46 -0700 Subject: [PATCH 70/91] Add: `MultiFabRegister` (#5230) Add a central location to store all `MultiFab`s in. The `ablastr::fields::MultiFabRegister` provides us with a central location to allocate, initialize and exchange fields now. Co-authored-by: Remi Lehe Co-authored-by: Marco Acciarri Co-authored-by: Justin Angus Co-authored-by: Edoardo Zoni Co-authored-by: Luca Fedeli Co-authored-by: David Grote Co-authored-by: Marco Acciarri --- .github/workflows/windows.yml | 4 +- .../inputs_test_3d_embedded_boundary_picmi.py | 4 +- Python/pywarpx/fields.py | 151 ++-- Python/pywarpx/particle_containers.py | 2 +- Source/BoundaryConditions/PML.H | 87 +- Source/BoundaryConditions/PML.cpp | 371 +++----- Source/BoundaryConditions/PML_RZ.H | 33 +- Source/BoundaryConditions/PML_RZ.cpp | 169 ++-- Source/BoundaryConditions/WarpXEvolvePML.cpp | 55 +- .../WarpXFieldBoundaries.cpp | 63 +- Source/Diagnostics/BTDiagnostics.cpp | 50 +- .../ComputeDiagFunctors/DivBFunctor.H | 13 +- .../ComputeDiagFunctors/DivBFunctor.cpp | 9 +- .../ComputeDiagFunctors/DivEFunctor.H | 13 +- .../ComputeDiagFunctors/DivEFunctor.cpp | 10 +- .../ComputeDiagFunctors/JFunctor.cpp | 29 +- .../ComputeDiagFunctors/JdispFunctor.cpp | 12 +- .../ComputeDiagFunctors/RhoFunctor.cpp | 2 +- .../FlushFormats/FlushFormatCheckpoint.cpp | 70 +- .../FlushFormats/FlushFormatPlotfile.cpp | 103 ++- Source/Diagnostics/FullDiagnostics.cpp | 100 ++- Source/Diagnostics/ParticleIO.cpp | 6 +- .../Diagnostics/ReducedDiags/ChargeOnEB.cpp | 12 +- .../ReducedDiags/ColliderRelevant.cpp | 19 +- .../Diagnostics/ReducedDiags/FieldEnergy.cpp | 20 +- .../Diagnostics/ReducedDiags/FieldMaximum.cpp | 19 +- .../ReducedDiags/FieldMomentum.cpp | 19 +- .../Diagnostics/ReducedDiags/FieldProbe.cpp | 19 +- .../Diagnostics/ReducedDiags/FieldReduction.H | 22 +- .../ReducedDiags/LoadBalanceCosts.cpp | 10 +- .../ReducedDiags/ParticleExtrema.cpp | 20 +- Source/Diagnostics/SliceDiagnostic.cpp | 21 +- Source/Diagnostics/WarpXIO.cpp | 84 +- Source/EmbeddedBoundary/ParticleScraper.H | 6 +- .../EmbeddedBoundary/WarpXFaceExtensions.cpp | 49 +- Source/EmbeddedBoundary/WarpXInitEB.cpp | 42 +- Source/Evolve/WarpXEvolve.cpp | 371 +++++--- .../ElectrostaticSolver.H | 27 +- .../ElectrostaticSolver.cpp | 56 +- .../ElectrostaticSolvers/LabFrameExplicitES.H | 13 +- .../LabFrameExplicitES.cpp | 39 +- .../RelativisticExplicitES.H | 20 +- .../RelativisticExplicitES.cpp | 73 +- Source/FieldSolver/Fields.H | 60 -- .../ApplySilverMuellerBoundary.cpp | 34 +- .../FiniteDifferenceSolver/ComputeDivE.cpp | 18 +- .../FiniteDifferenceSolver/EvolveB.cpp | 67 +- .../FiniteDifferenceSolver/EvolveBPML.cpp | 20 +- .../FiniteDifferenceSolver/EvolveE.cpp | 73 +- .../FiniteDifferenceSolver/EvolveECTRho.cpp | 17 +- .../FiniteDifferenceSolver/EvolveEPML.cpp | 30 +- .../FiniteDifferenceSolver/EvolveF.cpp | 18 +- .../FiniteDifferenceSolver/EvolveFPML.cpp | 4 +- .../FiniteDifferenceSolver/EvolveG.cpp | 8 +- .../FiniteDifferenceSolver.H | 252 +++--- .../HybridPICModel/HybridPICModel.H | 123 +-- .../HybridPICModel/HybridPICModel.cpp | 251 +++--- .../HybridPICSolveE.cpp | 82 +- .../MacroscopicEvolveE.cpp | 17 +- .../MacroscopicProperties.cpp | 3 +- .../ImplicitSolvers/SemiImplicitEM.cpp | 4 +- .../ImplicitSolvers/ThetaImplicitEM.H | 8 - .../ImplicitSolvers/ThetaImplicitEM.cpp | 42 +- .../ImplicitSolvers/WarpXImplicitOps.cpp | 55 +- .../ImplicitSolvers/WarpXSolverVec.H | 37 +- .../ImplicitSolvers/WarpXSolverVec.cpp | 76 +- .../MagnetostaticSolver/MagnetostaticSolver.H | 18 +- .../MagnetostaticSolver.cpp | 130 +-- .../SpectralBaseAlgorithm.H | 3 +- .../SpectralBaseAlgorithm.cpp | 7 +- .../SpectralBaseAlgorithmRZ.H | 3 +- .../SpectralBaseAlgorithmRZ.cpp | 4 +- .../SpectralSolver/SpectralSolver.H | 11 +- .../SpectralSolver/SpectralSolverRZ.H | 4 +- .../SpectralSolver/SpectralSolverRZ.cpp | 6 +- Source/FieldSolver/WarpXPushFieldsEM.cpp | 394 +++++---- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 81 +- Source/FieldSolver/WarpXSolveFieldsES.cpp | 13 +- .../FieldSolver/WarpX_QED_Field_Pushers.cpp | 42 +- Source/Fields.H | 131 +++ Source/Fluids/MultiFluidContainer.H | 25 +- Source/Fluids/MultiFluidContainer.cpp | 34 +- Source/Fluids/WarpXFluidContainer.H | 38 +- Source/Fluids/WarpXFluidContainer.cpp | 246 +++--- .../DivCleaner/ProjectionDivCleaner.H | 6 +- .../DivCleaner/ProjectionDivCleaner.cpp | 27 +- Source/Initialization/WarpXInitData.cpp | 294 ++++--- Source/Parallelization/WarpXComm.cpp | 449 ++++++---- Source/Parallelization/WarpXRegrid.cpp | 111 +-- Source/Particles/LaserParticleContainer.H | 15 +- Source/Particles/LaserParticleContainer.cpp | 37 +- Source/Particles/MultiParticleContainer.H | 28 +- Source/Particles/MultiParticleContainer.cpp | 65 +- Source/Particles/ParticleBoundaryBuffer.H | 4 +- Source/Particles/ParticleBoundaryBuffer.cpp | 2 +- Source/Particles/PhotonParticleContainer.H | 36 +- Source/Particles/PhotonParticleContainer.cpp | 22 +- Source/Particles/PhysicalParticleContainer.H | 56 +- .../Particles/PhysicalParticleContainer.cpp | 93 +- .../RigidInjectedParticleContainer.H | 34 +- .../RigidInjectedParticleContainer.cpp | 22 +- Source/Particles/WarpXParticleContainer.H | 19 +- Source/Particles/WarpXParticleContainer.cpp | 25 +- Source/Python/CMakeLists.txt | 1 + Source/Python/MultiFabRegister.cpp | 164 ++++ Source/Python/WarpX.cpp | 40 +- Source/Python/pyWarpX.cpp | 2 + Source/Utils/WarpXMovingWindow.cpp | 92 +- Source/Utils/WarpXUtil.H | 52 +- Source/Utils/WarpXUtil.cpp | 52 +- Source/WarpX.H | 347 ++------ Source/WarpX.cpp | 757 ++++++---------- Source/ablastr/fields/CMakeLists.txt | 6 + Source/ablastr/fields/Make.package | 1 + Source/ablastr/fields/MultiFabRegister.H | 819 ++++++++++++++++++ Source/ablastr/fields/MultiFabRegister.cpp | 626 +++++++++++++ Source/ablastr/fields/PoissonSolver.H | 44 +- 117 files changed, 5481 insertions(+), 3673 deletions(-) delete mode 100644 Source/FieldSolver/Fields.H create mode 100644 Source/Fields.H create mode 100644 Source/Python/MultiFabRegister.cpp create mode 100644 Source/ablastr/fields/MultiFabRegister.H create mode 100644 Source/ablastr/fields/MultiFabRegister.cpp diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index d6030743524..fc75ccb0141 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,7 +10,9 @@ jobs: build_win_msvc: name: MSVC C++17 w/o MPI runs-on: windows-latest - if: github.event.pull_request.draft == false + # disabled due to issues in #5230 + if: 0 + #if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py index 80ce483f2c7..7148cde2d3e 100755 --- a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py +++ b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py @@ -94,7 +94,7 @@ face_areas_y = fields.FaceAreasyWrapper() face_areas_z = fields.FaceAreaszWrapper() -print("======== Testing the wrappers of m_edge_lengths =========") +print("======== Testing the wrappers of edge_lengths =========") ly_slice_x = edge_lengths_y[nx // 2, :, :] lz_slice_x = edge_lengths_z[nx // 2, :, :] @@ -159,7 +159,7 @@ print("Perimeter of the middle z-slice:", perimeter_slice_z) assert np.isclose(perimeter_slice_z, perimeter_slice_z_true, rtol=1e-05, atol=1e-08) -print("======== Testing the wrappers of m_face_areas =========") +print("======== Testing the wrappers of face_areas =========") Sx_slice = np.sum(face_areas_x[nx // 2, :, :]) dx = (xmax - xmin) / nx diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index cbdd8d4517a..0100f64f261 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -77,6 +77,9 @@ class _MultiFABWrapper(object): everytime it is called if this argument is given instead of directly providing the Multifab. + idir: int, optional + For MultiFab that is an element of a vector, the direction number, 0, 1, or 2. + level: int The refinement level @@ -86,9 +89,10 @@ class _MultiFABWrapper(object): ghost cells. """ - def __init__(self, mf=None, mf_name=None, level=0, include_ghosts=False): + def __init__(self, mf=None, mf_name=None, idir=None, level=0, include_ghosts=False): self._mf = mf self.mf_name = mf_name + self.idir = idir self.level = level self.include_ghosts = include_ghosts @@ -116,8 +120,11 @@ def mf(self): else: # Always fetch this anew in case the C++ MultiFab is recreated warpx = libwarpx.libwarpx_so.get_instance() - # All MultiFab names have the level suffix - return warpx.multifab(f"{self.mf_name}[level={self.level}]") + if self.idir is not None: + direction = libwarpx.libwarpx_so.Direction(self.idir) + return warpx.multifab(self.mf_name, direction, self.level) + else: + return warpx.multifab(self.mf_name, self.level) @property def shape(self): @@ -573,145 +580,145 @@ def norm0(self, *args): def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts ) def EyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=1, level=level, include_ghosts=include_ghosts ) def EzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=2, level=level, include_ghosts=include_ghosts ) def BxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=0, level=level, include_ghosts=include_ghosts ) def ByWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=1, level=level, include_ghosts=include_ghosts ) def BzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=2, level=level, include_ghosts=include_ghosts ) def JxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=2, level=level, include_ghosts=include_ghosts ) def ExFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=2, level=level, include_ghosts=include_ghosts ) def ExFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -737,7 +744,8 @@ def GFPWrapper(level=0, include_ghosts=False): def AxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[x]", + mf_name="vector_potential_fp_nodal", + idir=0, level=level, include_ghosts=include_ghosts, ) @@ -745,7 +753,8 @@ def AxFPWrapper(level=0, include_ghosts=False): def AyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[y]", + mf_name="vector_potential_fp_nodal", + idir=1, level=level, include_ghosts=include_ghosts, ) @@ -753,7 +762,8 @@ def AyFPWrapper(level=0, include_ghosts=False): def AzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[z]", + mf_name="vector_potential_fp_nodal", + idir=2, level=level, include_ghosts=include_ghosts, ) @@ -761,55 +771,55 @@ def AzFPWrapper(level=0, include_ghosts=False): def ExCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=0, level=level, include_ghosts=include_ghosts ) def EyCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=1, level=level, include_ghosts=include_ghosts ) def EzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=2, level=level, include_ghosts=include_ghosts ) def BxCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=0, level=level, include_ghosts=include_ghosts ) def ByCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=1, level=level, include_ghosts=include_ghosts ) def BzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=2, level=level, include_ghosts=include_ghosts ) def JxCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=0, level=level, include_ghosts=include_ghosts ) def JyCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=1, level=level, include_ghosts=include_ghosts ) def JzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -829,109 +839,118 @@ def GCPWrapper(level=0, include_ghosts=False): def EdgeLengthsxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[x]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=0, level=level, include_ghosts=include_ghosts ) def EdgeLengthsyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[y]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=1, level=level, include_ghosts=include_ghosts ) def EdgeLengthszWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[z]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=2, level=level, include_ghosts=include_ghosts ) def FaceAreasxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[x]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=0, level=level, include_ghosts=include_ghosts ) def FaceAreasyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[y]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=1, level=level, include_ghosts=include_ghosts ) def FaceAreaszWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[z]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPAmpereWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[x]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_ampere", + idir=0, + level=level, + include_ghosts=include_ghosts, ) def JyFPAmpereWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[y]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_ampere", + idir=1, + level=level, + include_ghosts=include_ghosts, ) def JzFPAmpereWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[z]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_ampere", + idir=2, + level=level, + include_ghosts=include_ghosts, ) def ExFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -949,55 +968,55 @@ def GFPPMLWrapper(level=0, include_ghosts=False): def ExCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=0, level=level, include_ghosts=include_ghosts ) def EyCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=1, level=level, include_ghosts=include_ghosts ) def EzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=2, level=level, include_ghosts=include_ghosts ) def BxCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=0, level=level, include_ghosts=include_ghosts ) def ByCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=1, level=level, include_ghosts=include_ghosts ) def BzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=2, level=level, include_ghosts=include_ghosts ) def JxCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=0, level=level, include_ghosts=include_ghosts ) def JyCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=1, level=level, include_ghosts=include_ghosts ) def JzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=2, level=level, include_ghosts=include_ghosts ) diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 8af012f5e7b..bc6b2d74106 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -733,7 +733,7 @@ def deposit_charge_density(self, level, clear_rho=True, sync_rho=True): sync_rho : bool If True, perform MPI exchange and properly set boundary cells for rho_fp. """ - rho_fp = libwarpx.warpx.multifab(f"rho_fp[level={level}]") + rho_fp = libwarpx.warpx.multifab("rho_fp", level) if rho_fp is None: raise RuntimeError("Multifab `rho_fp` is not allocated.") diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 203c109f026..9e7dbc0034c 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -17,6 +17,8 @@ # include "FieldSolver/SpectralSolver/SpectralSolver.H" #endif +#include + #include #include #include @@ -155,23 +157,6 @@ public: void ComputePMLFactors (amrex::Real dt); - std::array GetE_fp (); - std::array GetB_fp (); - std::array Getj_fp (); - std::array GetE_cp (); - std::array GetB_cp (); - std::array Getj_cp (); - std::array Get_edge_lengths (); - std::array Get_face_areas (); - - // Used when WarpX::do_pml_dive_cleaning = true - amrex::MultiFab* GetF_fp (); - amrex::MultiFab* GetF_cp (); - - // Used when WarpX::do_pml_divb_cleaning = true - amrex::MultiFab* GetG_fp (); - amrex::MultiFab* GetG_cp (); - [[nodiscard]] const MultiSigmaBox& GetMultiSigmaBox_fp () const { return *sigba_fp; @@ -183,35 +168,33 @@ public: } #ifdef WARPX_USE_FFT - void PushPSATD (int lev); + void PushPSATD (ablastr::fields::MultiFabRegister& fields, int lev); #endif - void CopyJtoPMLs (const std::array& j_fp, - const std::array& j_cp); + void CopyJtoPMLs (ablastr::fields::MultiFabRegister& fields, int lev); - void Exchange (const std::array& mf_pml, - const std::array& mf, + void Exchange (ablastr::fields::VectorField mf_pml, + ablastr::fields::VectorField mf, + const PatchType& patch_type, + int do_pml_in_domain); + void Exchange (amrex::MultiFab* mf_pml, + amrex::MultiFab* mf, const PatchType& patch_type, int do_pml_in_domain); - void CopyJtoPMLs (PatchType patch_type, - const std::array& jp); - - void ExchangeF (amrex::MultiFab* F_fp, amrex::MultiFab* F_cp, int do_pml_in_domain); - void ExchangeF (PatchType patch_type, amrex::MultiFab* Fp, int do_pml_in_domain); - - void ExchangeG (amrex::MultiFab* G_fp, amrex::MultiFab* G_cp, int do_pml_in_domain); - void ExchangeG (PatchType patch_type, amrex::MultiFab* Gp, int do_pml_in_domain); + void CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int lev + ); - void FillBoundaryE (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryB (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryF (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryG (PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundary (ablastr::fields::VectorField mf_pml, PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundary (amrex::MultiFab & mf_pml, PatchType patch_type, std::optional nodal_sync=std::nullopt); [[nodiscard]] bool ok () const { return m_ok; } - void CheckPoint (const std::string& dir) const; - void Restart (const std::string& dir); + void CheckPoint (ablastr::fields::MultiFabRegister& fields, const std::string& dir) const; + void Restart (ablastr::fields::MultiFabRegister& fields, const std::string& dir); static void Exchange (amrex::MultiFab& pml, amrex::MultiFab& reg, const amrex::Geometry& geom, int do_pml_in_domain); @@ -227,24 +210,6 @@ private: const amrex::Geometry* m_geom; const amrex::Geometry* m_cgeom; - std::array,3> pml_E_fp; - std::array,3> pml_B_fp; - std::array,3> pml_j_fp; - - std::array,3> pml_edge_lengths; - - std::array,3> pml_E_cp; - std::array,3> pml_B_cp; - std::array,3> pml_j_cp; - - // Used when WarpX::do_pml_dive_cleaning = true - std::unique_ptr pml_F_fp; - std::unique_ptr pml_F_cp; - - // Used when WarpX::do_pml_divb_cleaning = true - std::unique_ptr pml_G_fp; - std::unique_ptr pml_G_cp; - std::unique_ptr sigba_fp; std::unique_ptr sigba_cp; @@ -293,13 +258,15 @@ private: }; #ifdef WARPX_USE_FFT -void PushPMLPSATDSinglePatch( int lev, +void PushPMLPSATDSinglePatch ( + int lev, SpectralSolver& solver, - std::array,3>& pml_E, - std::array,3>& pml_B, - std::unique_ptr& pml_F, - std::unique_ptr& pml_G, - const amrex::IntVect& fill_guards); + ablastr::fields::VectorField& pml_E, + ablastr::fields::VectorField& pml_B, + ablastr::fields::ScalarField pml_F, + ablastr::fields::ScalarField pml_G, + const amrex::IntVect& fill_guards +); #endif #endif diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index a66dcb5c0bb..91d821d6646 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -10,7 +10,7 @@ #include "BoundaryConditions/PML.H" #include "BoundaryConditions/PMLComponent.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldData.H" #endif @@ -57,7 +57,7 @@ #endif using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -571,6 +571,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, "PML: eb_enabled is true but was not compiled in."); #endif + using ablastr::fields::Direction; + // When `do_pml_in_domain` is true, the PML overlap with the last `ncell` of the physical domain or fine patch(es) // (instead of extending `ncell` outside of the physical domain or fine patch(es)) // In order to implement this, we define a new reduced Box Array ensuring that it does not @@ -698,33 +700,36 @@ PML::PML (const int lev, const BoxArray& grid_ba, const int ncompe = (m_dive_cleaning) ? 3 : 2; const int ncompb = (m_divb_cleaning) ? 3 : 2; - const amrex::BoxArray ba_Ex = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_Ey = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_Ez = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Ex, dm, ncompe, nge, lev, "pml_E_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Ey, dm, ncompe, nge, lev, "pml_E_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[2], ba_Ez, dm, ncompe, nge, lev, "pml_E_fp[z]", 0.0_rt); - - const amrex::BoxArray ba_Bx = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_By = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_Bz = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Bx, dm, ncompb, ngb, lev, "pml_B_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[1], ba_By, dm, ncompb, ngb, lev, "pml_B_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[2], ba_Bz, dm, ncompb, ngb, lev, "pml_B_fp[z]", 0.0_rt); - - const amrex::BoxArray ba_jx = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_jy = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_jz = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_j_fp[0], ba_jx, dm, 1, ngb, lev, "pml_j_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_fp[1], ba_jy, dm, 1, ngb, lev, "pml_j_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_fp[2], ba_jz, dm, 1, ngb, lev, "pml_j_fp[z]", 0.0_rt); + auto& warpx = WarpX::GetInstance(); + using ablastr::fields::Direction; + + const amrex::BoxArray ba_Ex = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Ey = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Ez = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{0}, lev, ba_Ex, dm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{1}, lev, ba_Ey, dm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{2}, lev, ba_Ez, dm, ncompe, nge, 0.0_rt, false, false); + + const amrex::BoxArray ba_Bx = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_By = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Bz = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{0}, lev, ba_Bx, dm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{1}, lev, ba_By, dm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{2}, lev, ba_Bz, dm, ncompb, ngb, 0.0_rt, false, false); + + const amrex::BoxArray ba_jx = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_jy = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_jz = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{0}, lev, ba_jx, dm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{1}, lev, ba_jy, dm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{2}, lev, ba_jz, dm, 1, ngb, 0.0_rt, false, false); #ifdef AMREX_USE_EB if (eb_enabled) { const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); - WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{0}, lev, ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{1}, lev, ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{2}, lev, ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || @@ -732,8 +737,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, auto const eb_fact = fieldEBFactory(); - WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); - WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + ablastr::fields::VectorField t_pml_edge_lengths = warpx.m_fields.get_alldirs(FieldType::pml_edge_lengths, lev); + WarpX::ComputeEdgeLengths(t_pml_edge_lengths, eb_fact); + WarpX::ScaleEdges(t_pml_edge_lengths, WarpX::CellSize(lev)); } } @@ -743,7 +749,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, if (m_dive_cleaning) { const amrex::BoxArray ba_F_nodal = amrex::convert(ba, amrex::IntVect::TheNodeVector()); - WarpX::AllocInitMultiFab(pml_F_fp, ba_F_nodal, dm, 3, ngf, lev, "pml_F_fp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_F_fp, lev, ba_F_nodal, dm, 3, ngf, 0.0_rt, false, false); } if (m_divb_cleaning) @@ -753,7 +759,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); const amrex::BoxArray ba_G_nodal = amrex::convert(ba, G_nodal_flag); - WarpX::AllocInitMultiFab(pml_G_fp, ba_G_nodal, dm, 3, ngf, lev, "pml_G_fp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_G_fp, lev, ba_G_nodal, dm, 3, ngf, 0.0_rt, false, false); } Box single_domain_box = is_single_box_domain ? domain0 : Box(); @@ -835,24 +841,24 @@ PML::PML (const int lev, const BoxArray& grid_ba, cdm.define(cba); } - const amrex::BoxArray cba_Ex = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_Ey = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_Ez = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_cp[0], cba_Ex, cdm, ncompe, nge, lev, "pml_E_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_cp[1], cba_Ey, cdm, ncompe, nge, lev, "pml_E_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_cp[2], cba_Ez, cdm, ncompe, nge, lev, "pml_E_cp[z]", 0.0_rt); + const amrex::BoxArray cba_Ex = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Ey = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Ez = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{0}, lev, cba_Ex, cdm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{1}, lev, cba_Ey, cdm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{2}, lev, cba_Ez, cdm, ncompe, nge, 0.0_rt, false, false); - const amrex::BoxArray cba_Bx = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_By = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_Bz = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_cp[0], cba_Bx, cdm, ncompb, ngb, lev, "pml_B_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_cp[1], cba_By, cdm, ncompb, ngb, lev, "pml_B_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_cp[2], cba_Bz, cdm, ncompb, ngb, lev, "pml_B_cp[z]", 0.0_rt); + const amrex::BoxArray cba_Bx = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_By = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Bz = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{0}, lev, cba_Bx, cdm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{1}, lev, cba_By, cdm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{2}, lev, cba_Bz, cdm, ncompb, ngb, 0.0_rt, false, false); if (m_dive_cleaning) { const amrex::BoxArray cba_F_nodal = amrex::convert(cba, amrex::IntVect::TheNodeVector()); - WarpX::AllocInitMultiFab(pml_F_cp, cba_F_nodal, cdm, 3, ngf, lev, "pml_F_cp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_F_cp, lev, cba_F_nodal, cdm, 3, ngf, 0.0_rt, false, false); } if (m_divb_cleaning) @@ -862,15 +868,15 @@ PML::PML (const int lev, const BoxArray& grid_ba, (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); const amrex::BoxArray cba_G_nodal = amrex::convert(cba, G_nodal_flag); - WarpX::AllocInitMultiFab( pml_G_cp, cba_G_nodal, cdm, 3, ngf, lev, "pml_G_cp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_G_cp, lev, cba_G_nodal, cdm, 3, ngf, 0.0_rt, false, false); } - const amrex::BoxArray cba_jx = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_jy = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_jz = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_j_cp[0], cba_jx, cdm, 1, ngb, lev, "pml_j_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_cp[1], cba_jy, cdm, 1, ngb, lev, "pml_j_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_cp[2], cba_jz, cdm, 1, ngb, lev, "pml_j_cp[z]", 0.0_rt); + const amrex::BoxArray cba_jx = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_jy = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_jz = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{0}, lev, cba_jx, cdm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{1}, lev, cba_jy, cdm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{2}, lev, cba_jz, cdm, 1, ngb, 0.0_rt, false, false); single_domain_box = is_single_box_domain ? cdomain : Box(); sigba_cp = std::make_unique(cba, cdm, grid_cba_reduced, cgeom->CellSize(), @@ -1045,96 +1051,32 @@ PML::ComputePMLFactors (amrex::Real dt) } } -std::array -PML::GetE_fp () -{ - return {pml_E_fp[0].get(), pml_E_fp[1].get(), pml_E_fp[2].get()}; -} - -std::array -PML::GetB_fp () -{ - return {pml_B_fp[0].get(), pml_B_fp[1].get(), pml_B_fp[2].get()}; -} - -std::array -PML::Getj_fp () -{ - return {pml_j_fp[0].get(), pml_j_fp[1].get(), pml_j_fp[2].get()}; -} - -std::array -PML::GetE_cp () -{ - return {pml_E_cp[0].get(), pml_E_cp[1].get(), pml_E_cp[2].get()}; -} - -std::array -PML::GetB_cp () -{ - return {pml_B_cp[0].get(), pml_B_cp[1].get(), pml_B_cp[2].get()}; -} - -std::array -PML::Getj_cp () -{ - return {pml_j_cp[0].get(), pml_j_cp[1].get(), pml_j_cp[2].get()}; -} - -std::array -PML::Get_edge_lengths() -{ - return {pml_edge_lengths[0].get(), pml_edge_lengths[1].get(), pml_edge_lengths[2].get()}; -} - - -MultiFab* -PML::GetF_fp () -{ - return pml_F_fp.get(); -} - -MultiFab* -PML::GetF_cp () -{ - return pml_F_cp.get(); -} - -MultiFab* -PML::GetG_fp () +void +PML::CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int lev +) { - return pml_G_fp.get(); -} + using ablastr::fields::Direction; -MultiFab* -PML::GetG_cp () -{ - return pml_G_cp.get(); -} + bool const has_j_fp = fields.has_vector(FieldType::current_fp, lev); + bool const has_pml_j_fp = fields.has_vector(FieldType::pml_j_fp, lev); + bool const has_j_cp = fields.has_vector(FieldType::current_cp, lev); + bool const has_pml_j_cp = fields.has_vector(FieldType::pml_j_cp, lev); -void PML::Exchange (const std::array& mf_pml, - const std::array& mf, - const PatchType& patch_type, - const int do_pml_in_domain) -{ - const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; - if (mf_pml[0] && mf[0]) { Exchange(*mf_pml[0], *mf[0], geom, do_pml_in_domain); } - if (mf_pml[1] && mf[1]) { Exchange(*mf_pml[1], *mf[1], geom, do_pml_in_domain); } - if (mf_pml[2] && mf[2]) { Exchange(*mf_pml[2], *mf[2], geom, do_pml_in_domain); } -} - -void -PML::CopyJtoPMLs (PatchType patch_type, - const std::array& jp) -{ - if (patch_type == PatchType::fine && pml_j_fp[0] && jp[0]) + if (patch_type == PatchType::fine && has_pml_j_fp && has_j_fp) { + ablastr::fields::VectorField pml_j_fp = fields.get_alldirs(FieldType::pml_j_fp, lev); + ablastr::fields::VectorField jp = fields.get_alldirs(FieldType::current_fp, lev); CopyToPML(*pml_j_fp[0], *jp[0], *m_geom); CopyToPML(*pml_j_fp[1], *jp[1], *m_geom); CopyToPML(*pml_j_fp[2], *jp[2], *m_geom); } - else if (patch_type == PatchType::coarse && pml_j_cp[0] && jp[0]) + else if (patch_type == PatchType::coarse && has_j_cp && has_pml_j_cp) { + ablastr::fields::VectorField pml_j_cp = fields.get_alldirs(FieldType::pml_j_cp, lev); + ablastr::fields::VectorField jp = fields.get_alldirs(FieldType::current_cp, lev); CopyToPML(*pml_j_cp[0], *jp[0], *m_cgeom); CopyToPML(*pml_j_cp[1], *jp[1], *m_cgeom); CopyToPML(*pml_j_cp[2], *jp[2], *m_cgeom); @@ -1142,46 +1084,33 @@ PML::CopyJtoPMLs (PatchType patch_type, } void -PML::CopyJtoPMLs (const std::array& j_fp, - const std::array& j_cp) +PML::CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + int lev +) { - CopyJtoPMLs(PatchType::fine, j_fp); - CopyJtoPMLs(PatchType::coarse, j_cp); + CopyJtoPMLs(fields, PatchType::fine, lev); + CopyJtoPMLs(fields, PatchType::coarse, lev); } -void -PML::ExchangeF (amrex::MultiFab* F_fp, amrex::MultiFab* F_cp, int do_pml_in_domain) -{ - ExchangeF(PatchType::fine, F_fp, do_pml_in_domain); - ExchangeF(PatchType::coarse, F_cp, do_pml_in_domain); -} - -void -PML::ExchangeF (PatchType patch_type, amrex::MultiFab* Fp, int do_pml_in_domain) -{ - if (patch_type == PatchType::fine && pml_F_fp && Fp) { - Exchange(*pml_F_fp, *Fp, *m_geom, do_pml_in_domain); - } else if (patch_type == PatchType::coarse && pml_F_cp && Fp) { - Exchange(*pml_F_cp, *Fp, *m_cgeom, do_pml_in_domain); - } -} - -void PML::ExchangeG (amrex::MultiFab* G_fp, amrex::MultiFab* G_cp, int do_pml_in_domain) +void PML::Exchange (ablastr::fields::VectorField mf_pml, + ablastr::fields::VectorField mf, + const PatchType& patch_type, + const int do_pml_in_domain) { - ExchangeG(PatchType::fine, G_fp, do_pml_in_domain); - ExchangeG(PatchType::coarse, G_cp, do_pml_in_domain); + const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; + if (mf_pml[0] && mf[0]) { Exchange(*mf_pml[0], *mf[0], geom, do_pml_in_domain); } + if (mf_pml[1] && mf[1]) { Exchange(*mf_pml[1], *mf[1], geom, do_pml_in_domain); } + if (mf_pml[2] && mf[2]) { Exchange(*mf_pml[2], *mf[2], geom, do_pml_in_domain); } } -void PML::ExchangeG (PatchType patch_type, amrex::MultiFab* Gp, int do_pml_in_domain) +void PML::Exchange (amrex::MultiFab* mf_pml, + amrex::MultiFab* mf, + const PatchType& patch_type, + const int do_pml_in_domain) { - if (patch_type == PatchType::fine && pml_G_fp && Gp) - { - Exchange(*pml_G_fp, *Gp, *m_geom, do_pml_in_domain); - } - else if (patch_type == PatchType::coarse && pml_G_cp && Gp) - { - Exchange(*pml_G_cp, *Gp, *m_cgeom, do_pml_in_domain); - } + const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; + if (mf_pml && mf) { Exchange(*mf_pml, *mf, geom, do_pml_in_domain); } } void @@ -1275,74 +1204,40 @@ PML::CopyToPML (MultiFab& pml, MultiFab& reg, const Geometry& geom) } void -PML::FillBoundaryE (PatchType patch_type, std::optional nodal_sync) +PML::FillBoundary (ablastr::fields::VectorField mf_pml, PatchType patch_type, std::optional nodal_sync) { - if (patch_type == PatchType::fine && pml_E_fp[0] && pml_E_fp[0]->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - const Vector mf{pml_E_fp[0].get(),pml_E_fp[1].get(),pml_E_fp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_E_cp[0] && pml_E_cp[0]->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - const Vector mf{pml_E_cp[0].get(),pml_E_cp[1].get(),pml_E_cp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } -} + const auto& period = + (patch_type == PatchType::fine) ? + m_geom->periodicity() : + m_cgeom->periodicity(); -void -PML::FillBoundaryB (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_B_fp[0]) - { - const auto& period = m_geom->periodicity(); - const Vector mf{pml_B_fp[0].get(),pml_B_fp[1].get(),pml_B_fp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_B_cp[0]) - { - const auto& period = m_cgeom->periodicity(); - const Vector mf{pml_B_cp[0].get(),pml_B_cp[1].get(),pml_B_cp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } + const Vector mf{mf_pml[0], mf_pml[1], mf_pml[2]}; + ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } void -PML::FillBoundaryF (PatchType patch_type, std::optional nodal_sync) +PML::FillBoundary (amrex::MultiFab & mf_pml, PatchType patch_type, std::optional nodal_sync) { - if (patch_type == PatchType::fine && pml_F_fp && pml_F_fp->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_F_fp, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_F_cp && pml_F_cp->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_F_cp, WarpX::do_single_precision_comms, period, nodal_sync); - } -} + const auto& period = + (patch_type == PatchType::fine) ? + m_geom->periodicity() : + m_cgeom->periodicity(); -void -PML::FillBoundaryG (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_G_fp && pml_G_fp->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_G_fp, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_G_cp && pml_G_cp->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_G_cp, WarpX::do_single_precision_comms, period, nodal_sync); - } + ablastr::utils::communication::FillBoundary(mf_pml, WarpX::do_single_precision_comms, period, nodal_sync); } void -PML::CheckPoint (const std::string& dir) const +PML::CheckPoint ( + ablastr::fields::MultiFabRegister& fields, + const std::string& dir +) const { - if (pml_E_fp[0]) + using ablastr::fields::Direction; + + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); VisMF::AsyncWrite(*pml_E_fp[0], dir+"_Ex_fp"); VisMF::AsyncWrite(*pml_E_fp[1], dir+"_Ey_fp"); VisMF::AsyncWrite(*pml_E_fp[2], dir+"_Ez_fp"); @@ -1351,8 +1246,10 @@ PML::CheckPoint (const std::string& dir) const VisMF::AsyncWrite(*pml_B_fp[2], dir+"_Bz_fp"); } - if (pml_E_cp[0]) + if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); VisMF::AsyncWrite(*pml_E_cp[0], dir+"_Ex_cp"); VisMF::AsyncWrite(*pml_E_cp[1], dir+"_Ey_cp"); VisMF::AsyncWrite(*pml_E_cp[2], dir+"_Ez_cp"); @@ -1363,10 +1260,17 @@ PML::CheckPoint (const std::string& dir) const } void -PML::Restart (const std::string& dir) +PML::Restart ( + ablastr::fields::MultiFabRegister& fields, + const std::string& dir +) { - if (pml_E_fp[0]) + using ablastr::fields::Direction; + + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); VisMF::Read(*pml_E_fp[0], dir+"_Ex_fp"); VisMF::Read(*pml_E_fp[1], dir+"_Ey_fp"); VisMF::Read(*pml_E_fp[2], dir+"_Ez_fp"); @@ -1375,8 +1279,10 @@ PML::Restart (const std::string& dir) VisMF::Read(*pml_B_fp[2], dir+"_Bz_fp"); } - if (pml_E_cp[0]) + if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); VisMF::Read(*pml_E_cp[0], dir+"_Ex_cp"); VisMF::Read(*pml_E_cp[1], dir+"_Ey_cp"); VisMF::Read(*pml_E_cp[2], dir+"_Ez_cp"); @@ -1388,11 +1294,20 @@ PML::Restart (const std::string& dir) #ifdef WARPX_USE_FFT void -PML::PushPSATD (const int lev) { +PML::PushPSATD (ablastr::fields::MultiFabRegister& fields, const int lev) +{ + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, lev); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, lev); + ablastr::fields::ScalarField pml_F_fp = fields.get(FieldType::pml_F_fp, lev); + ablastr::fields::ScalarField pml_G_fp = fields.get(FieldType::pml_G_fp, lev); // Update the fields on the fine and coarse patch PushPMLPSATDSinglePatch(lev, *spectral_solver_fp, pml_E_fp, pml_B_fp, pml_F_fp, pml_G_fp, m_fill_guards_fields); if (spectral_solver_cp) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, lev); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, lev); + ablastr::fields::ScalarField pml_F_cp = fields.get(FieldType::pml_F_cp, lev); + ablastr::fields::ScalarField pml_G_cp = fields.get(FieldType::pml_G_cp, lev); PushPMLPSATDSinglePatch(lev, *spectral_solver_cp, pml_E_cp, pml_B_cp, pml_F_cp, pml_G_cp, m_fill_guards_fields); } } @@ -1401,10 +1316,10 @@ void PushPMLPSATDSinglePatch ( const int lev, SpectralSolver& solver, - std::array,3>& pml_E, - std::array,3>& pml_B, - std::unique_ptr& pml_F, - std::unique_ptr& pml_G, + ablastr::fields::VectorField& pml_E, + ablastr::fields::VectorField& pml_B, + ablastr::fields::ScalarField pml_F, + ablastr::fields::ScalarField pml_G, const amrex::IntVect& fill_guards) { const SpectralFieldIndex& Idx = solver.m_spectral_index; diff --git a/Source/BoundaryConditions/PML_RZ.H b/Source/BoundaryConditions/PML_RZ.H index c908681d8e5..20c7d360fc7 100644 --- a/Source/BoundaryConditions/PML_RZ.H +++ b/Source/BoundaryConditions/PML_RZ.H @@ -16,6 +16,8 @@ # include "FieldSolver/SpectralSolver/SpectralSolverRZ.H" #endif +#include + #include #include #include @@ -30,27 +32,24 @@ class PML_RZ { public: - PML_RZ (int lev, const amrex::BoxArray& grid_ba, const amrex::DistributionMapping& grid_dm, - const amrex::Geometry* geom, int ncell, int do_pml_in_domain); + PML_RZ (int lev, amrex::BoxArray const& grid_ba, amrex::DistributionMapping const& grid_dm, + amrex::Geometry const* geom, int ncell, int do_pml_in_domain); void ApplyDamping(amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::MultiFab* Bt_fp, amrex::MultiFab* Bz_fp, - amrex::Real dt); - - std::array GetE_fp (); - std::array GetB_fp (); + amrex::Real dt, ablastr::fields::MultiFabRegister& fields); #ifdef WARPX_USE_FFT void PushPSATD (int lev); #endif - void FillBoundaryE (); - void FillBoundaryB (); - void FillBoundaryE (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryB (PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundaryE (ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundaryB (ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, std::optional nodal_sync=std::nullopt); - void CheckPoint (const std::string& dir) const; - void Restart (const std::string& dir); + void CheckPoint (ablastr::fields::MultiFabRegister& fields, std::string const& dir) const; + void Restart (ablastr::fields::MultiFabRegister& fields, std::string const& dir); private: @@ -58,15 +57,13 @@ private: const int m_do_pml_in_domain; const amrex::Geometry* m_geom; - // Only contains Er and Et, and Br and Bt - std::array,2> pml_E_fp; - std::array,2> pml_B_fp; + // The MultiFabs pml_E_fp and pml_B_fp are setup using the registry. + // They hold Er, Et, and Br, Bt. #ifdef WARPX_USE_FFT - void PushPMLPSATDSinglePatchRZ ( int lev, + void PushPMLPSATDSinglePatchRZ (int lev, SpectralSolverRZ& solver, - std::array,2>& pml_E, - std::array,2>& pml_B); + ablastr::fields::MultiFabRegister& fields); #endif }; diff --git a/Source/BoundaryConditions/PML_RZ.cpp b/Source/BoundaryConditions/PML_RZ.cpp index 78f3cf24987..8fd6a1869ae 100644 --- a/Source/BoundaryConditions/PML_RZ.cpp +++ b/Source/BoundaryConditions/PML_RZ.cpp @@ -8,7 +8,7 @@ #include "PML_RZ.H" #include "BoundaryConditions/PML_RZ.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldDataRZ.H" #endif @@ -33,43 +33,55 @@ #include #include -using namespace amrex; -using namespace warpx::fields; +using namespace amrex::literals; +using warpx::fields::FieldType; +using ablastr::fields::Direction; -PML_RZ::PML_RZ (const int lev, const amrex::BoxArray& grid_ba, const amrex::DistributionMapping& grid_dm, - const amrex::Geometry* geom, const int ncell, const int do_pml_in_domain) +PML_RZ::PML_RZ (int lev, amrex::BoxArray const& grid_ba, amrex::DistributionMapping const& grid_dm, + amrex::Geometry const* geom, int ncell, int do_pml_in_domain) : m_ncell(ncell), m_do_pml_in_domain(do_pml_in_domain), m_geom(geom) { - - const amrex::MultiFab & Er_fp = WarpX::GetInstance().getField(FieldType::Efield_fp, lev,0); - const amrex::MultiFab & Et_fp = WarpX::GetInstance().getField(FieldType::Efield_fp, lev,1); - const amrex::BoxArray ba_Er = amrex::convert(grid_ba, Er_fp.ixType().toIntVect()); - const amrex::BoxArray ba_Et = amrex::convert(grid_ba, Et_fp.ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Er, grid_dm, Er_fp.nComp(), Er_fp.nGrowVect(), lev, "pml_E_fp[0]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Et, grid_dm, Et_fp.nComp(), Et_fp.nGrowVect(), lev, "pml_E_fp[1]", 0.0_rt); - - const amrex::MultiFab & Br_fp = WarpX::GetInstance().getField(FieldType::Bfield_fp, lev,0); - const amrex::MultiFab & Bt_fp = WarpX::GetInstance().getField(FieldType::Bfield_fp, lev,1); - const amrex::BoxArray ba_Br = amrex::convert(grid_ba, Br_fp.ixType().toIntVect()); - const amrex::BoxArray ba_Bt = amrex::convert(grid_ba, Bt_fp.ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Br, grid_dm, Br_fp.nComp(), Br_fp.nGrowVect(), lev, "pml_B_fp[0]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[1], ba_Bt, grid_dm, Bt_fp.nComp(), Bt_fp.nGrowVect(), lev, "pml_B_fp[1]", 0.0_rt); + auto & warpx = WarpX::GetInstance(); + + bool const remake = false; + bool const redistribute_on_remake = false; + + amrex::MultiFab const& Er_fp = *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + amrex::MultiFab const& Et_fp = *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + amrex::BoxArray const ba_Er = amrex::convert(grid_ba, Er_fp.ixType().toIntVect()); + amrex::BoxArray const ba_Et = amrex::convert(grid_ba, Et_fp.ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{0}, lev, ba_Er, grid_dm, Er_fp.nComp(), Er_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{1}, lev, ba_Et, grid_dm, Et_fp.nComp(), Et_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + + amrex::MultiFab const& Br_fp = *warpx.m_fields.get(FieldType::Bfield_fp,Direction{0},lev); + amrex::MultiFab const& Bt_fp = *warpx.m_fields.get(FieldType::Bfield_fp,Direction{1},lev); + amrex::BoxArray const ba_Br = amrex::convert(grid_ba, Br_fp.ixType().toIntVect()); + amrex::BoxArray const ba_Bt = amrex::convert(grid_ba, Bt_fp.ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{0}, lev, ba_Br, grid_dm, Br_fp.nComp(), Br_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{1}, lev, ba_Bt, grid_dm, Bt_fp.nComp(), Bt_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); } void PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::MultiFab* Bt_fp, amrex::MultiFab* Bz_fp, - amrex::Real dt) + amrex::Real dt, ablastr::fields::MultiFabRegister& fields) { - const amrex::Real dr = m_geom->CellSize(0); - const amrex::Real cdt_over_dr = PhysConst::c*dt/dr; + amrex::Real const dr = m_geom->CellSize(0); + amrex::Real const cdt_over_dr = PhysConst::c*dt/dr; + + amrex::MultiFab* pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); + amrex::MultiFab* pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); #ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif for ( amrex::MFIter mfi(*Et_fp, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi ) { @@ -78,8 +90,8 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::Array4 const& Bt_arr = Bt_fp->array(mfi); amrex::Array4 const& Bz_arr = Bz_fp->array(mfi); - amrex::Array4 const& pml_Et_arr = pml_E_fp[1]->array(mfi); - amrex::Array4 const& pml_Bt_arr = pml_B_fp[1]->array(mfi); + amrex::Array4 const& pml_Et_arr = pml_Et->array(mfi); + amrex::Array4 const& pml_Bt_arr = pml_Bt->array(mfi); // Get the tileboxes from Efield and Bfield so that they include the guard cells // They are all the same, cell centered @@ -87,19 +99,19 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, // Box for the whole simulation domain amrex::Box const& domain = m_geom->Domain(); - const int nr_domain = domain.bigEnd(0); + int const nr_domain = domain.bigEnd(0); // Set tilebox to only include the upper radial cells - const int nr_damp = m_ncell; - const int nr_damp_min = (m_do_pml_in_domain)?(nr_domain - nr_damp):(nr_domain); + int const nr_damp = m_ncell; + int const nr_damp_min = (m_do_pml_in_domain)?(nr_domain - nr_damp):(nr_domain); tilebox.setSmall(0, nr_damp_min + 1); amrex::ParallelFor( tilebox, Et_fp->nComp(), [=] AMREX_GPU_DEVICE (int i, int j, int k, int icomp) { - const auto rr = static_cast(i - nr_damp_min); - const amrex::Real wr = rr/nr_damp; - const amrex::Real damp_factor = std::exp( -4._rt * cdt_over_dr * wr*wr ); + auto const rr = static_cast(i - nr_damp_min); + amrex::Real const wr = rr/nr_damp; + amrex::Real const damp_factor = std::exp( -4._rt * cdt_over_dr * wr*wr ); // Substract the theta PML fields from the regular theta fields Et_arr(i,j,k,icomp) -= pml_Et_arr(i,j,k,icomp); @@ -117,105 +129,88 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, } } -std::array -PML_RZ::GetE_fp () -{ - return {pml_E_fp[0].get(), pml_E_fp[1].get()}; -} - -std::array -PML_RZ::GetB_fp () -{ - return {pml_B_fp[0].get(), pml_B_fp[1].get()}; -} - void -PML_RZ::FillBoundaryE () +PML_RZ::FillBoundaryE (ablastr::fields::MultiFabRegister& fields, PatchType patch_type, std::optional nodal_sync) { - FillBoundaryE(PatchType::fine); -} + amrex::MultiFab * pml_Er = fields.get(FieldType::pml_E_fp, Direction{0}, 0); + amrex::MultiFab * pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); -void -PML_RZ::FillBoundaryE (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_E_fp[0] && pml_E_fp[0]->nGrowVect().max() > 0) + if (patch_type == PatchType::fine && pml_Er->nGrowVect().max() > 0) { - const amrex::Periodicity& period = m_geom->periodicity(); - const Vector mf{pml_E_fp[0].get(),pml_E_fp[1].get()}; + amrex::Periodicity const& period = m_geom->periodicity(); + const amrex::Vector mf = {pml_Er, pml_Et}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } } void -PML_RZ::FillBoundaryB () +PML_RZ::FillBoundaryB (ablastr::fields::MultiFabRegister& fields, PatchType patch_type, std::optional nodal_sync) { - FillBoundaryB(PatchType::fine); -} - -void -PML_RZ::FillBoundaryB (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_B_fp[0]) + if (patch_type == PatchType::fine) { - const amrex::Periodicity& period = m_geom->periodicity(); - const Vector mf{pml_B_fp[0].get(),pml_B_fp[1].get()}; + amrex::MultiFab * pml_Br = fields.get(FieldType::pml_B_fp, Direction{0}, 0); + amrex::MultiFab * pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); + + amrex::Periodicity const& period = m_geom->periodicity(); + const amrex::Vector mf = {pml_Br, pml_Bt}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } } void -PML_RZ::CheckPoint (const std::string& dir) const +PML_RZ::CheckPoint (ablastr::fields::MultiFabRegister& fields, std::string const& dir) const { - if (pml_E_fp[0]) - { - VisMF::AsyncWrite(*pml_E_fp[0], dir+"_Er_fp"); - VisMF::AsyncWrite(*pml_E_fp[1], dir+"_Et_fp"); - VisMF::AsyncWrite(*pml_B_fp[0], dir+"_Br_fp"); - VisMF::AsyncWrite(*pml_B_fp[1], dir+"_Bt_fp"); + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_E_fp, Direction{0}, 0), dir+"_Er_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_E_fp, Direction{1}, 0), dir+"_Et_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_B_fp, Direction{0}, 0), dir+"_Br_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_B_fp, Direction{1}, 0), dir+"_Bt_fp"); } } void -PML_RZ::Restart (const std::string& dir) +PML_RZ::Restart (ablastr::fields::MultiFabRegister& fields, std::string const& dir) { - if (pml_E_fp[0]) - { - VisMF::Read(*pml_E_fp[0], dir+"_Er_fp"); - VisMF::Read(*pml_E_fp[1], dir+"_Et_fp"); - VisMF::Read(*pml_B_fp[0], dir+"_Br_fp"); - VisMF::Read(*pml_B_fp[1], dir+"_Bt_fp"); + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + amrex::VisMF::Read(*fields.get(FieldType::pml_E_fp, Direction{0}, 0), dir+"_Er_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_E_fp, Direction{1}, 0), dir+"_Et_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_B_fp, Direction{0}, 0), dir+"_Br_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_B_fp, Direction{1}, 0), dir+"_Bt_fp"); } } #ifdef WARPX_USE_FFT void -PML_RZ::PushPSATD (const int lev) +PML_RZ::PushPSATD (int lev) { // Update the fields on the fine and coarse patch WarpX& warpx = WarpX::GetInstance(); SpectralSolverRZ& solver = warpx.get_spectral_solver_fp(lev); - PushPMLPSATDSinglePatchRZ(lev, solver, pml_E_fp, pml_B_fp); + PushPMLPSATDSinglePatchRZ(lev, solver, warpx.m_fields); } void PML_RZ::PushPMLPSATDSinglePatchRZ ( - const int lev, + int lev, SpectralSolverRZ& solver, - std::array,2>& pml_E, - std::array,2>& pml_B) + ablastr::fields::MultiFabRegister& fields) { - const SpectralFieldIndex& Idx = solver.m_spectral_index; + SpectralFieldIndex const& Idx = solver.m_spectral_index; + amrex::MultiFab * pml_Er = fields.get(FieldType::pml_E_fp, Direction{0}, 0); + amrex::MultiFab * pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); + amrex::MultiFab * pml_Br = fields.get(FieldType::pml_B_fp, Direction{0}, 0); + amrex::MultiFab * pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); // Perform forward Fourier transforms - solver.ForwardTransform(lev, *pml_E[0], Idx.Er_pml, *pml_E[1], Idx.Et_pml); - solver.ForwardTransform(lev, *pml_B[0], Idx.Br_pml, *pml_B[1], Idx.Bt_pml); + solver.ForwardTransform(lev, *pml_Er, Idx.Er_pml, *pml_Et, Idx.Et_pml); + solver.ForwardTransform(lev, *pml_Br, Idx.Br_pml, *pml_Bt, Idx.Bt_pml); // Advance fields in spectral space - const bool doing_pml = true; + bool const doing_pml = true; solver.pushSpectralFields(doing_pml); // Perform backward Fourier transforms - solver.BackwardTransform(lev, *pml_E[0], Idx.Er_pml, *pml_E[1], Idx.Et_pml); - solver.BackwardTransform(lev, *pml_B[0], Idx.Br_pml, *pml_B[1], Idx.Bt_pml); + solver.BackwardTransform(lev, *pml_Er, Idx.Er_pml, *pml_Et, Idx.Et_pml); + solver.BackwardTransform(lev, *pml_Br, Idx.Br_pml, *pml_Bt, Idx.Bt_pml); } #endif diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index c6a89d80c07..cfde83dcf5b 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -12,10 +12,13 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "PML_current.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX_PML_kernels.H" +#include + #ifdef AMREX_USE_SENSEI_INSITU # include #endif @@ -63,9 +66,13 @@ WarpX::DampPML (const int lev, PatchType patch_type) WARPX_PROFILE("WarpX::DampPML()"); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->ApplyDamping(Efield_fp[lev][1].get(), Efield_fp[lev][2].get(), - Bfield_fp[lev][1].get(), Bfield_fp[lev][2].get(), - dt[lev]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + pml_rz[lev]->ApplyDamping( m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), + dt[lev], m_fields); } #endif if (pml[lev]) { @@ -81,12 +88,11 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) if (pml[lev]->ok()) { - const auto& pml_E = (patch_type == PatchType::fine) ? pml[lev]->GetE_fp() : pml[lev]->GetE_cp(); - const auto& pml_B = (patch_type == PatchType::fine) ? pml[lev]->GetB_fp() : pml[lev]->GetB_cp(); - const auto& pml_F = (patch_type == PatchType::fine) ? pml[lev]->GetF_fp() : pml[lev]->GetF_cp(); - const auto& pml_G = (patch_type == PatchType::fine) ? pml[lev]->GetG_fp() : pml[lev]->GetG_cp(); - const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() - : pml[lev]->GetMultiSigmaBox_cp(); + using warpx::fields::FieldType; + + const auto& pml_E = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_E_fp, lev) : m_fields.get_alldirs(FieldType::pml_E_cp, lev); + const auto& pml_B = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_B_fp, lev) : m_fields.get_alldirs(FieldType::pml_B_cp, lev); + const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() : pml[lev]->GetMultiSigmaBox_cp(); const amrex::IntVect Ex_stag = pml_E[0]->ixType().toIntVect(); const amrex::IntVect Ey_stag = pml_E[1]->ixType().toIntVect(); @@ -97,12 +103,16 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) const amrex::IntVect Bz_stag = pml_B[2]->ixType().toIntVect(); amrex::IntVect F_stag; - if (pml_F) { + if (m_fields.has(FieldType::pml_F_fp, lev)) { + amrex::MultiFab* pml_F = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_F_fp, lev) : m_fields.get(FieldType::pml_F_cp, lev); F_stag = pml_F->ixType().toIntVect(); } amrex::IntVect G_stag; - if (pml_G) { + if (m_fields.has(FieldType::pml_G_fp, lev)) { + amrex::MultiFab* pml_G = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_G_fp, lev) : m_fields.get(FieldType::pml_G_cp, lev); G_stag = pml_G->ixType().toIntVect(); } @@ -193,7 +203,9 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) // For warpx_damp_pml_F(), mfi.nodaltilebox is used in the ParallelFor loop and here we // use mfi.tilebox. However, it does not matter because in damp_pml, where nodaltilebox // is used, only a simple multiplication is performed. - if (pml_F) { + if (m_fields.has(FieldType::pml_F_fp, lev)) { + amrex::MultiFab* pml_F = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_F_fp, lev) : m_fields.get(FieldType::pml_F_cp, lev); const Box& tnd = mfi.nodaltilebox(); auto const& pml_F_fab = pml_F->array(mfi); amrex::ParallelFor(tnd, [=] AMREX_GPU_DEVICE (int i, int j, int k) @@ -204,7 +216,10 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) } // Damp G when WarpX::do_divb_cleaning = true - if (pml_G) { + if (m_fields.has(FieldType::pml_G_fp, lev)) { + amrex::MultiFab* pml_G = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_G_fp, lev) : m_fields.get(FieldType::pml_G_cp, lev); + const Box& tb = mfi.tilebox(G_stag); auto const& pml_G_fab = pml_G->array(mfi); amrex::ParallelFor(tb, [=] AMREX_GPU_DEVICE (int i, int j, int k) @@ -243,8 +258,9 @@ WarpX::DampJPML (int lev, PatchType patch_type) if (pml[lev]->ok()) { + using warpx::fields::FieldType; - const auto& pml_j = (patch_type == PatchType::fine) ? pml[lev]->Getj_fp() : pml[lev]->Getj_cp(); + const auto& pml_j = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_j_fp, lev) : m_fields.get_alldirs(FieldType::pml_j_cp, lev); const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() : pml[lev]->GetMultiSigmaBox_cp(); @@ -273,7 +289,7 @@ WarpX::DampJPML (int lev, PatchType patch_type) // Skip the field update if this gridpoint is inside the embedded boundary amrex::Array4 eb_lxfab, eb_lyfab, eb_lzfab; if (EB::enabled()) { - const auto &pml_edge_lenghts = pml[lev]->Get_edge_lengths(); + const auto &pml_edge_lenghts = m_fields.get_alldirs(FieldType::pml_edge_lengths, lev); eb_lxfab = pml_edge_lenghts[0]->array(mfi); eb_lyfab = pml_edge_lenghts[1]->array(mfi); @@ -338,15 +354,12 @@ WarpX::DampJPML (int lev, PatchType patch_type) void WarpX::CopyJPML () { + using ablastr::fields::Direction; + for (int lev = 0; lev <= finest_level; ++lev) { if (pml[lev] && pml[lev]->ok()){ - pml[lev]->CopyJtoPMLs({ current_fp[lev][0].get(), - current_fp[lev][1].get(), - current_fp[lev][2].get() }, - { current_cp[lev][0].get(), - current_cp[lev][1].get(), - current_cp[lev][2].get() }); + pml[lev]->CopyJtoPMLs(m_fields, lev); } } } diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index 6d2525bc724..dc41e95f40f 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -18,7 +18,7 @@ using namespace amrex; using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -50,12 +50,14 @@ namespace void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) { + using ablastr::fields::Direction; + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { PEC::ApplyPECtoEfield( - {getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2)}, + {m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -63,7 +65,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) // apply pec on split E-fields in PML region const bool split_pml_field = true; PEC::ApplyPECtoEfield( - pml[lev]->GetE_fp(), + m_fields.get_alldirs(FieldType::pml_E_fp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, @@ -71,9 +73,9 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) } } else { PEC::ApplyPECtoEfield( - {getFieldPointer(FieldType::Efield_cp, lev, 0), - getFieldPointer(FieldType::Efield_cp, lev, 1), - getFieldPointer(FieldType::Efield_cp, lev, 2)}, + {m_fields.get(FieldType::Efield_cp,Direction{0},lev), + m_fields.get(FieldType::Efield_cp,Direction{1},lev), + m_fields.get(FieldType::Efield_cp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -81,7 +83,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) // apply pec on split E-fields in PML region const bool split_pml_field = true; PEC::ApplyPECtoEfield( - pml[lev]->GetE_cp(), + m_fields.get_alldirs(FieldType::pml_E_cp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, @@ -92,33 +94,35 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) #ifdef WARPX_DIM_RZ if (patch_type == PatchType::fine) { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev), lev); } else { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Efield_cp, lev, 0), - getFieldPointer(FieldType::Efield_cp, lev, 1), - getFieldPointer(FieldType::Efield_cp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev), lev); } #endif } void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_dt_type) { + using ablastr::fields::Direction; + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { PEC::ApplyPECtoBfield( { - getFieldPointer(FieldType::Bfield_fp, lev, 0), - getFieldPointer(FieldType::Bfield_fp, lev, 1), - getFieldPointer(FieldType::Bfield_fp, lev, 2) }, + m_fields.get(FieldType::Bfield_fp,Direction{0},lev), + m_fields.get(FieldType::Bfield_fp,Direction{1},lev), + m_fields.get(FieldType::Bfield_fp,Direction{2},lev) }, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); } else { PEC::ApplyPECtoBfield( { - getFieldPointer(FieldType::Bfield_cp, lev, 0), - getFieldPointer(FieldType::Bfield_cp, lev, 1), - getFieldPointer(FieldType::Bfield_cp, lev, 2)}, + m_fields.get(FieldType::Bfield_cp,Direction{0},lev), + m_fields.get(FieldType::Bfield_cp,Direction{1},lev), + m_fields.get(FieldType::Bfield_cp,Direction{2},lev) }, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -131,6 +135,8 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d if (lev == 0) { if (a_dt_type == DtType::FirstHalf) { if(::isAnyBoundary(field_boundary_lo, field_boundary_hi)){ + auto Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + auto Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); m_fdtd_solver_fp[0]->ApplySilverMuellerBoundary( Efield_fp[lev], Bfield_fp[lev], Geom(lev).Domain(), dt[lev], @@ -141,13 +147,13 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d #ifdef WARPX_DIM_RZ if (patch_type == PatchType::fine) { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Bfield_fp, lev, 0), - getFieldPointer(FieldType::Bfield_fp, lev, 1), - getFieldPointer(FieldType::Bfield_fp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Bfield_fp,Direction{0},lev), + m_fields.get(FieldType::Bfield_fp,Direction{1},lev), + m_fields.get(FieldType::Bfield_fp,Direction{2},lev), lev); } else { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Bfield_cp, lev, 0), - getFieldPointer(FieldType::Bfield_cp, lev, 1), - getFieldPointer(FieldType::Bfield_cp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Bfield_cp,Direction{0},lev), + m_fields.get(FieldType::Bfield_cp,Direction{1},lev), + m_fields.get(FieldType::Bfield_cp,Direction{2},lev), lev); } #endif } @@ -268,8 +274,9 @@ void WarpX::ApplyElectronPressureBoundary (const int lev, PatchType patch_type) { if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { + ablastr::fields::ScalarField electron_pressure_fp = m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); PEC::ApplyPECtoElectronPressure( - m_hybrid_pic_model->get_pointer_electron_pressure_fp(lev), + electron_pressure_fp, field_boundary_lo, field_boundary_hi, Geom(lev), lev, patch_type, ref_ratio); } else { diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 6fdb605f8dc..e00c30aa78e 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -14,13 +14,14 @@ #include "Diagnostics/Diagnostics.H" #include "Diagnostics/FlushFormats/FlushFormat.H" #include "ComputeDiagFunctors/BackTransformParticleFunctor.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/Algorithms/IsIn.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include #include @@ -47,7 +48,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -525,6 +526,8 @@ BTDiagnostics::DefineCellCenteredMultiFab(int lev) void BTDiagnostics::InitializeFieldFunctors (int lev) { + using ablastr::fields::Direction; + // Initialize fields functors only if do_back_transformed_fields is selected if (!m_do_back_transformed_fields) { return; } @@ -567,23 +570,23 @@ BTDiagnostics::InitializeFieldFunctors (int lev) m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ey" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "By" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jy" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); } @@ -598,7 +601,8 @@ BTDiagnostics::UpdateVarnamesForRZopenPMD () { #ifdef WARPX_DIM_RZ auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0,0)->nComp(); + using ablastr::fields::Direction; + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; @@ -656,8 +660,10 @@ void BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; + auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0,0)->nComp(); + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point @@ -683,23 +689,23 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_cell_center_functors_at_lev_size = static_cast(m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Et" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Br" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jr" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, false, -1, false, ncomp); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H index 3d04a56742b..1d36b434ae2 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H @@ -3,6 +3,8 @@ #include "ComputeDiagFunctor.H" +#include + #include #include @@ -22,8 +24,13 @@ public: * (summing over modes) * \param[in] ncomp Number of component of mf_src to cell-center in dst multifab. */ - DivBFunctor(std::array arr_mf_src, int lev, amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian=true, int ncomp=1); + DivBFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + int lev, + amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian=true, + int ncomp=1 + ); /** \brief Compute DivB directly into mf_dst. * @@ -34,7 +41,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer*/) const override; private: /** Vector of pointer to source multifab Bx, By, Bz */ - std::array m_arr_mf_src; + ablastr::fields::VectorField m_arr_mf_src; int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp index b5782e76ae6..224b74ba372 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp @@ -7,8 +7,13 @@ #include #include -DivBFunctor::DivBFunctor(const std::array arr_mf_src, const int lev, const amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian, const int ncomp) +DivBFunctor::DivBFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + const int lev, + const amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian, + const int ncomp +) : ComputeDiagFunctor(ncomp, crse_ratio), m_arr_mf_src(arr_mf_src), m_lev(lev), m_convertRZmodes2cartesian(convertRZmodes2cartesian) {} diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H index 312ccaa5cd6..e7691187f3a 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H @@ -3,6 +3,8 @@ #include "ComputeDiagFunctor.H" +#include + #include #include @@ -21,8 +23,13 @@ public: * \param[in] convertRZmodes2cartesian if true, all RZ modes are averaged into one component * \param[in] ncomp Number of component of mf_src to cell-center in dst multifab. */ - DivEFunctor(std::array arr_mf_src, int lev, amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian=true, int ncomp=1); + DivEFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + int lev, + amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian=true, + int ncomp=1 + ); /** \brief Compute DivE directly into mf_dst. * @@ -33,7 +40,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: /** Vector of pointer to source multifab Bx, By, Bz */ - std::array m_arr_mf_src; + ablastr::fields::VectorField m_arr_mf_src; int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp index 62801cd431a..e2c4d98c708 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp @@ -13,9 +13,13 @@ #include #include -DivEFunctor::DivEFunctor(const std::array arr_mf_src, const int lev, - const amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian, const int ncomp) +DivEFunctor::DivEFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + const int lev, + const amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian, + const int ncomp +) : ComputeDiagFunctor(ncomp, crse_ratio), m_arr_mf_src(arr_mf_src), m_lev(lev), m_convertRZmodes2cartesian(convertRZmodes2cartesian) { diff --git a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp index ebaec47b2f1..df25bf7ff03 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp @@ -6,16 +6,18 @@ #include "JFunctor.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "WarpX.H" +#include + #include #include #include #include -using namespace warpx::fields; +using warpx::fields::FieldType; JFunctor::JFunctor (const int dir, int lev, amrex::IntVect crse_ratio, @@ -29,30 +31,19 @@ JFunctor::JFunctor (const int dir, int lev, void JFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer*/) const { + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); /** pointer to source multifab (can be multi-component) */ - amrex::MultiFab* m_mf_src = warpx.getFieldPointer(FieldType::current_fp, m_lev, m_dir); + amrex::MultiFab* m_mf_src = warpx.m_fields.get(FieldType::current_fp,Direction{m_dir},m_lev); // Deposit current if no solver or the electrostatic solver is being used if (m_deposit_current) { // allocate temporary multifab to deposit current density into - amrex::Vector, 3 > > current_fp_temp; - current_fp_temp.resize(1); - - const auto& current_fp_x = warpx.getField(FieldType::current_fp, m_lev,0); - current_fp_temp[0][0] = std::make_unique( - current_fp_x, amrex::make_alias, 0, current_fp_x.nComp() - ); - - const auto& current_fp_y = warpx.getField(FieldType::current_fp, m_lev,1); - current_fp_temp[0][1] = std::make_unique( - current_fp_y, amrex::make_alias, 0, current_fp_y.nComp() - ); - const auto& current_fp_z = warpx.getField(FieldType::current_fp, m_lev,2); - current_fp_temp[0][2] = std::make_unique( - current_fp_z, amrex::make_alias, 0, current_fp_z.nComp() - ); + ablastr::fields::MultiLevelVectorField current_fp_temp { + warpx.m_fields.get_alldirs(FieldType::current_fp, m_lev) + }; auto& mypc = warpx.GetPartContainer(); mypc.DepositCurrent(current_fp_temp, warpx.getdt(m_lev), 0.0); diff --git a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp index aac5869da65..b4f286506a8 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp @@ -6,7 +6,7 @@ #include "JdispFunctor.H" #include "WarpX.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Particles/MultiParticleContainer.H" @@ -16,7 +16,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; JdispFunctor::JdispFunctor (int dir, int lev, amrex::IntVect crse_ratio, bool convertRZmodes2cartesian, int ncomp) @@ -27,18 +27,20 @@ JdispFunctor::JdispFunctor (int dir, int lev, void JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer*/) const { + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); auto* hybrid_pic_model = warpx.get_pointer_HybridPICModel(); /** pointer to total simulation current (J) multifab */ - amrex::MultiFab* mf_j = warpx.getFieldPointer(FieldType::current_fp, m_lev, m_dir); + amrex::MultiFab* mf_j = warpx.m_fields.get(FieldType::current_fp, Direction{m_dir}, m_lev); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(hybrid_pic_model, "Displacement current diagnostic is only implemented for the HybridPICModel."); AMREX_ASSUME(hybrid_pic_model != nullptr); /** pointer to current calculated from Ampere's Law (Jamp) multifab */ - amrex::MultiFab* mf_curlB = hybrid_pic_model->get_pointer_current_fp_ampere(m_lev, m_dir);; + amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_ampere, Direction{m_dir}, m_lev); //if (!hybrid_pic_model) { // To finish this implementation, we need to implement a method to @@ -64,7 +66,7 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff if (hybrid_pic_model) { // Subtract the interpolated j_external value from j_displacement. /** pointer to external currents (Jext) multifab */ - amrex::MultiFab* mf_j_external = hybrid_pic_model->get_pointer_current_fp_external(m_lev, m_dir); + amrex::MultiFab* mf_j_external = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{m_dir}, m_lev); // Index type required for interpolating Jext from their respective // staggering (nodal) to the Jx_displacement, Jy_displacement, Jz_displacement diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp index 32e11903778..e7f572dd681 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp @@ -47,7 +47,7 @@ RhoFunctor::operator() ( amrex::MultiFab& mf_dst, const int dcomp, const int /*i rho = mypc.GetChargeDensity(m_lev, true); if (warpx.DoFluidSpecies()) { auto& myfl = warpx.GetFluidContainer(); - myfl.DepositCharge(m_lev, *rho); + myfl.DepositCharge(warpx.m_fields, *rho, m_lev); } } // Dump rho per species diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 1a3318ae0d8..4d721dd6abe 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -5,12 +5,14 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "Diagnostics/ParticleDiag/ParticleDiag.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + #include #include #include @@ -20,7 +22,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -43,6 +45,8 @@ FlushFormatCheckpoint::WriteToFile ( const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { + using ablastr::fields::Direction; + WARPX_PROFILE("FlushFormatCheckpoint::WriteToFile()"); auto & warpx = WarpX::GetInstance(); @@ -64,85 +68,85 @@ FlushFormatCheckpoint::WriteToFile ( for (int lev = 0; lev < nlev; ++lev) { - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_fp")); if (WarpX::fft_do_time_averaging) { - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_avg_fp")); } if (warpx.getis_synchronized()) { // Need to save j if synchronized because after restart we need j to evolve E by dt/2. - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_fp")); - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_fp")); - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_fp")); } if (lev > 0) { - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_cp")); if (WarpX::fft_do_time_averaging) { - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_avg_cp")); } if (warpx.getis_synchronized()) { // Need to save j if synchronized because after restart we need j to evolve E by dt/2. - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_cp")); - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_cp")); - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_cp")); } } @@ -150,11 +154,13 @@ FlushFormatCheckpoint::WriteToFile ( if (warpx.DoPML()) { if (warpx.GetPML(lev)) { warpx.GetPML(lev)->CheckPoint( + warpx.m_fields, amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pml")); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (warpx.GetPML_RZ(lev)) { warpx.GetPML_RZ(lev)->CheckPoint( + warpx.m_fields, amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pml_rz")); } #endif diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index f6c73d9fa7e..0f05496e4c0 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -1,6 +1,6 @@ #include "FlushFormatPlotfile.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/Filter/FilterFunctors.H" @@ -13,6 +13,8 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + #include #include #include @@ -48,7 +50,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -554,6 +556,8 @@ FlushFormatPlotfile::WriteAllRawFields( const bool plot_raw_fields, const int nlevels, const std::string& plotfilename, const bool plot_raw_fields_guards) const { + using ablastr::fields::Direction; + if (!plot_raw_fields) { return; } auto & warpx = WarpX::GetInstance(); for (int lev = 0; lev < nlevels; ++lev) @@ -564,84 +568,103 @@ FlushFormatPlotfile::WriteAllRawFields( // Auxiliary patch - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 0), dm, raw_pltname, default_level_prefix, "Ex_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 1), dm, raw_pltname, default_level_prefix, "Ey_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 2), dm, raw_pltname, default_level_prefix, "Ez_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 0), dm, raw_pltname, default_level_prefix, "Bx_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 1), dm, raw_pltname, default_level_prefix, "By_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 2), dm, raw_pltname, default_level_prefix, "Bz_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), dm, raw_pltname, default_level_prefix, "Ex_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), dm, raw_pltname, default_level_prefix, "Ey_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), dm, raw_pltname, default_level_prefix, "Ez_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), dm, raw_pltname, default_level_prefix, "Bx_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), dm, raw_pltname, default_level_prefix, "By_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), dm, raw_pltname, default_level_prefix, "Bz_aux", lev, plot_raw_fields_guards); // fine patch - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 0), dm, raw_pltname, default_level_prefix, "Ex_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 1), dm, raw_pltname, default_level_prefix, "Ey_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 2), dm, raw_pltname, default_level_prefix, "Ez_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 0), dm, raw_pltname, default_level_prefix, "jx_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 1), dm, raw_pltname, default_level_prefix, "jy_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 2), dm, raw_pltname, default_level_prefix, "jz_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 0), dm, raw_pltname, default_level_prefix, "Bx_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 1), dm, raw_pltname, default_level_prefix, "By_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 2), dm, raw_pltname, default_level_prefix, "Bz_fp", lev, plot_raw_fields_guards); - if (warpx.isFieldInitialized(FieldType::F_fp, lev)) + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "Ex_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "Ey_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "Ez_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "jx_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "jy_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "jz_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "Bx_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "By_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "Bz_fp", lev, plot_raw_fields_guards ); + if (warpx.m_fields.has(FieldType::F_fp, lev)) { - WriteRawMF(warpx.getField(FieldType::F_fp, lev), dm, raw_pltname, default_level_prefix, "F_fp", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::F_fp, lev), dm, raw_pltname, + default_level_prefix, "F_fp", lev, plot_raw_fields_guards ); } - if (warpx.isFieldInitialized(FieldType::rho_fp, lev)) + if (warpx.m_fields.has(FieldType::rho_fp, lev)) { // rho_fp will have either ncomps or 2*ncomps (2 being the old and new). When 2, return the new so // there is time synchronization. - const int nstart = warpx.getField(FieldType::rho_fp, lev).nComp() - WarpX::ncomps; - const MultiFab rho_new(warpx.getField(FieldType::rho_fp, lev), amrex::make_alias, nstart, WarpX::ncomps); + const int nstart = warpx.m_fields.get(FieldType::rho_fp, lev)->nComp() - WarpX::ncomps; + const MultiFab rho_new(*warpx.m_fields.get(FieldType::rho_fp, lev), amrex::make_alias, nstart, WarpX::ncomps); WriteRawMF(rho_new, dm, raw_pltname, default_level_prefix, "rho_fp", lev, plot_raw_fields_guards); } - if (warpx.isFieldInitialized(FieldType::phi_fp, lev)) { - WriteRawMF(warpx.getField(FieldType::phi_fp, lev), dm, raw_pltname, default_level_prefix, "phi_fp", lev, plot_raw_fields_guards); + if (warpx.m_fields.has(FieldType::phi_fp, lev)) { + WriteRawMF( *warpx.m_fields.get(FieldType::phi_fp, lev), dm, raw_pltname, + default_level_prefix, "phi_fp", lev, plot_raw_fields_guards ); } // Averaged fields on fine patch if (WarpX::fft_do_time_averaging) { - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 0) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev) , dm, raw_pltname, default_level_prefix, "Ex_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 1) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev) , dm, raw_pltname, default_level_prefix, "Ey_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 2) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev) , dm, raw_pltname, default_level_prefix, "Ez_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 0) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev) , dm, raw_pltname, default_level_prefix, "Bx_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 1) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev) , dm, raw_pltname, default_level_prefix, "By_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 2) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev) , dm, raw_pltname, default_level_prefix, "Bz_avg_fp", lev, plot_raw_fields_guards); } // Coarse path if (lev > 0) { WriteCoarseVector( "E", - warpx.getFieldPointer(FieldType::Efield_cp, lev, 0), warpx.getFieldPointer(FieldType::Efield_cp, lev, 1), warpx.getFieldPointer(FieldType::Efield_cp, lev, 2), - warpx.getFieldPointer(FieldType::Efield_fp, lev, 0), warpx.getFieldPointer(FieldType::Efield_fp, lev, 1), warpx.getFieldPointer(FieldType::Efield_fp, lev, 2), + warpx.m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); WriteCoarseVector( "B", - warpx.getFieldPointer(FieldType::Bfield_cp, lev, 0), warpx.getFieldPointer(FieldType::Bfield_cp, lev, 1), warpx.getFieldPointer(FieldType::Bfield_cp, lev, 2), - warpx.getFieldPointer(FieldType::Bfield_fp, lev, 0), warpx.getFieldPointer(FieldType::Bfield_fp, lev, 1), warpx.getFieldPointer(FieldType::Bfield_fp, lev, 2), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); WriteCoarseVector( "j", - warpx.getFieldPointer(FieldType::current_cp, lev, 0), warpx.getFieldPointer(FieldType::current_cp, lev, 1), warpx.getFieldPointer(FieldType::current_cp, lev, 2), - warpx.getFieldPointer(FieldType::current_fp, lev, 0), warpx.getFieldPointer(FieldType::current_fp, lev, 1), warpx.getFieldPointer(FieldType::current_fp, lev, 2), + warpx.m_fields.get(FieldType::current_cp, Direction{0}, lev), warpx.m_fields.get(FieldType::current_cp, Direction{1}, lev), warpx.m_fields.get(FieldType::current_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); - if (warpx.isFieldInitialized(FieldType::F_fp, lev) && warpx.isFieldInitialized(FieldType::F_cp, lev)) + if (warpx.m_fields.has(FieldType::F_fp, lev) && warpx.m_fields.has(FieldType::F_cp, lev)) { - WriteCoarseScalar("F", warpx.getFieldPointer(FieldType::F_cp, lev), warpx.getFieldPointer(FieldType::F_fp, lev), + WriteCoarseScalar("F", warpx.m_fields.get(FieldType::F_cp, lev), warpx.m_fields.get(FieldType::F_fp, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards, 0); } - if (warpx.isFieldInitialized(FieldType::rho_fp, lev) && warpx.isFieldInitialized(FieldType::rho_cp, lev)) + if (warpx.m_fields.has(FieldType::rho_fp, lev) && warpx.m_fields.has(FieldType::rho_cp, lev)) { // Use the component 1 of `rho_cp`, i.e. rho_new for time synchronization - WriteCoarseScalar("rho", warpx.getFieldPointer(FieldType::rho_cp, lev), warpx.getFieldPointer(FieldType::rho_fp, lev), + WriteCoarseScalar("rho", warpx.m_fields.get(FieldType::rho_cp, lev), warpx.m_fields.get(FieldType::rho_fp, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards, 1); } } diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index bcf613f49b0..e5eefc82de5 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -12,7 +12,7 @@ #include "ComputeDiagFunctors/RhoFunctor.H" #include "Diagnostics/Diagnostics.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FlushFormats/FlushFormat.H" #include "Particles/MultiParticleContainer.H" #include "Utils/Algorithms/IsIn.H" @@ -20,6 +20,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" +#include + #include #include #include @@ -43,7 +45,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; FullDiagnostics::FullDiagnostics (int i, const std::string& name): Diagnostics{i, name}, @@ -172,17 +174,18 @@ void FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp(); + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); // Make sure all multifabs have the same number of components for (int dim=0; dim<3; dim++){ AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::current_fp, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::current_fp, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); } // Species index to loop over species that dump rho per species @@ -217,37 +220,37 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_varname_fields_size = static_cast(m_varnames_fields.size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Er"), ncomp); } } else if ( m_varnames_fields[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Et"), ncomp); } } else if ( m_varnames_fields[comp] == "Ez" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Ez"), ncomp); } } else if ( m_varnames_fields[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Br"), ncomp); } } else if ( m_varnames_fields[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Bt"), ncomp); } } else if ( m_varnames_fields[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Bz"), ncomp); @@ -314,19 +317,19 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) } i_T_species++; } else if ( m_varnames_fields[comp] == "F" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::F_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::F_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("F"), ncomp); } } else if ( m_varnames_fields[comp] == "G" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::G_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique( warpx.m_fields.get(FieldType::G_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("G"), ncomp); } } else if ( m_varnames_fields[comp] == "phi" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::phi_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::phi_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("phi"), ncomp); @@ -343,14 +346,14 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) } } else if ( m_varnames_fields[comp] == "divB" ){ m_all_field_functors[lev][comp] = std::make_unique( - warpx.getFieldPointerArray(FieldType::Bfield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Bfield_aux, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("divB"), ncomp); } } else if ( m_varnames_fields[comp] == "divE" ){ m_all_field_functors[lev][comp] = std::make_unique( - warpx.getFieldPointerArray(FieldType::Efield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("divE"), ncomp); @@ -393,19 +396,20 @@ void FullDiagnostics::AddRZModesToDiags (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; if (!m_dump_rz_modes) { return; } auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp(); + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); // Make sure all multifabs have the same number of components for (int dim=0; dim<3; dim++){ AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::current_fp, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::current_fp, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); } // Check if divE is requested @@ -440,19 +444,19 @@ FullDiagnostics::AddRZModesToDiags (int lev) for (int dim=0; dim<3; dim++){ // 3 components, r theta z m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim), lev, + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("E") + coord[dim], - warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp()); } // B for (int dim=0; dim<3; dim++){ // 3 components, r theta z m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim), lev, + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("B") + coord[dim], - warpx.getFieldPointer(FieldType::Bfield_aux, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, 0)->nComp()); } // j for (int dim=0; dim<3; dim++){ @@ -461,12 +465,12 @@ FullDiagnostics::AddRZModesToDiags (int lev) dim, lev, m_crse_ratio, false, deposit_current, ncomp_multimodefab)); deposit_current = false; AddRZModesToOutputNames(std::string("J") + coord[dim], - warpx.getFieldPointer(FieldType::current_fp, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::current_fp,Direction{0},0)->nComp()); } // divE if (divE_requested) { m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointerArray(FieldType::Efield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("divE"), ncomp_multimodefab); } @@ -658,20 +662,22 @@ FullDiagnostics::InitializeFieldFunctors (int lev) // diagnostic output bool deposit_current = !m_solver_deposits_current; + using ablastr::fields::Direction; + m_all_field_functors[lev].resize(ntot); // Fill vector of functors for all components except individual cylindrical modes. for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jz" ){ m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true, deposit_current); deposit_current = false; } else if ( m_varnames[comp] == "jz_displacement" ) { m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true); } else if ( m_varnames[comp] == "Az" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "rho" ){ // Initialize rho functor to dump total rho m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, true); @@ -684,31 +690,31 @@ FullDiagnostics::InitializeFieldFunctors (int lev) m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, m_T_per_species_index[i_T_species]); i_T_species++; } else if ( m_varnames[comp] == "F" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::F_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::F_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "G" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::G_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::G_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "phi" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::phi_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::phi_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "part_per_cell" ){ m_all_field_functors[lev][comp] = std::make_unique(nullptr, lev, m_crse_ratio); } else if ( m_varnames[comp] == "part_per_grid" ){ m_all_field_functors[lev][comp] = std::make_unique(nullptr, lev, m_crse_ratio); } else if ( m_varnames[comp] == "divB" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointerArray(FieldType::Bfield_aux, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Bfield_aux, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "divE" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointerArray(FieldType::Efield_aux, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio); } else { #ifdef WARPX_DIM_RZ if ( m_varnames[comp] == "Er" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jr" ){ m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); deposit_current = false; @@ -720,22 +726,22 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if (m_varnames[comp] == "jt_displacement" ){ m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, true); } else if ( m_varnames[comp] == "Ar" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "At" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); } else { WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for RZ geometry"); } #else // Valid transverse fields in Cartesian coordinates if ( m_varnames[comp] == "Ex" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ey" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bx" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "By" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jx" ){ m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); deposit_current = false; @@ -747,9 +753,9 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if ( m_varnames[comp] == "jy_displacement" ){ m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ax" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ay" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); } else { std::cout << "Error on component " << m_varnames[comp] << std::endl; WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for this geometry"); diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index bf67b51bbeb..e94039ec079 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -7,7 +7,7 @@ * License: BSD-3-Clause-LBNL */ -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/ParticleIO.H" #include "Particles/MultiParticleContainer.H" #include "Particles/PhysicalParticleContainer.H" @@ -43,7 +43,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; void LaserParticleContainer::ReadHeader (std::istream& is) @@ -268,7 +268,7 @@ storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, const amrex::Geometry& geom = warpx.Geom(lev); auto plo = geom.ProbLoArray(); auto dxi = geom.InvCellSizeArray(); - amrex::MultiFab const& phi = warpx.getField( FieldType::phi_fp, lev, 0 ); + amrex::MultiFab const& phi = *warpx.m_fields.get(FieldType::phi_fp, lev); for (PinnedParIter pti(tmp, lev); pti.isValid(); ++pti) { diff --git a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp index 975fed6d74c..050b18d3b9d 100644 --- a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp +++ b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp @@ -9,7 +9,7 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "Utils/Parser/ParserUtils.H" @@ -29,7 +29,6 @@ #include using namespace amrex; -using namespace warpx::fields; // constructor @@ -97,6 +96,8 @@ void ChargeOnEB::ComputeDiags (const int step) throw std::runtime_error("ChargeOnEB::ComputeDiags only works when EBs are enabled at runtime"); } #if ((defined WARPX_DIM_3D) && (defined AMREX_USE_EB)) + using ablastr::fields::Direction; + // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); @@ -104,9 +105,10 @@ void ChargeOnEB::ComputeDiags (const int step) int const lev = 0; // get MultiFab data at lev - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_fp, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_fp, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_fp, lev,2); + using warpx::fields::FieldType; + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev); // get EB structures amrex::EBFArrayBoxFactory const& eb_box_factory = warpx.fieldEBFactory(lev); diff --git a/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp b/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp index fb683e25319..dfd64fe5af9 100644 --- a/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp +++ b/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp @@ -8,7 +8,7 @@ #include "ColliderRelevant.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #if (defined WARPX_QED) # include "Particles/ElementaryProcess/QEDInternals/QedChiFunctions.H" #endif @@ -59,7 +59,7 @@ #include using namespace amrex; -using namespace warpx::fields; + ColliderRelevant::ColliderRelevant (const std::string& rd_name) : ReducedDiags{rd_name} @@ -429,6 +429,8 @@ void ColliderRelevant::ComputeDiags (int step) amrex::Real chimax_f = 0.0_rt; amrex::Real chiave_f = 0.0_rt; + using ablastr::fields::Direction; + if (myspc.DoQED()) { // define variables in preparation for field gatheeduce_data.value()ring @@ -441,13 +443,14 @@ void ColliderRelevant::ComputeDiags (int step) const int lev = 0; // define variables in preparation for field gathering + using warpx::fields::FieldType; const amrex::XDim3 dinv = WarpX::InvCellSize(std::max(lev, 0)); - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // declare reduce_op ReduceOps reduce_op; diff --git a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp index fbfdaf11017..1a984368b4e 100644 --- a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp @@ -7,12 +7,14 @@ #include "FieldEnergy.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -29,7 +31,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor FieldEnergy::FieldEnergy (const std::string& rd_name) @@ -87,16 +89,18 @@ void FieldEnergy::ComputeDiags (int step) // get number of level const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // get MultiFab data at lev - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // get cell volume const std::array &dx = WarpX::CellSize(lev); diff --git a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp index 4fbbd1ec82c..8c7eb6b4dec 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp @@ -7,10 +7,11 @@ #include "FieldMaximum.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include #include #include @@ -39,7 +40,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor FieldMaximum::FieldMaximum (const std::string& rd_name) @@ -112,16 +113,18 @@ void FieldMaximum::ComputeDiags (int step) // get number of level const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // get MultiFab data at lev - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); constexpr int noutputs = 8; // max of Ex,Ey,Ez,|E|,Bx,By,Bz and |B| constexpr int index_Ex = 0; diff --git a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp index 72bece9265b..764e9874c39 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp @@ -7,11 +7,12 @@ #include "FieldMomentum.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include @@ -38,7 +39,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; FieldMomentum::FieldMomentum (const std::string& rd_name) : ReducedDiags{rd_name} @@ -104,16 +105,18 @@ void FieldMomentum::ComputeDiags (int step) // Get number of refinement levels const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // Loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // Get MultiFab data at given refinement level - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev, 0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev, 1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev, 2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev, 0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev, 1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev, 2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // Cell-centered index type const amrex::GpuArray cc{0,0,0}; diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index f498cea7566..923ae727d08 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -7,7 +7,7 @@ #include "FieldProbe.H" #include "FieldProbeParticleContainer.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/Gather/FieldGather.H" #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/Pusher/UpdatePosition.H" @@ -17,6 +17,7 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include @@ -45,7 +46,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor @@ -381,6 +382,8 @@ void FieldProbe::ComputeDiags (int step) // get number of mesh-refinement levels const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { @@ -398,12 +401,12 @@ void FieldProbe::ComputeDiags (int step) } // get MultiFab data at lev - const amrex::MultiFab &Ex = warpx.getField(FieldType::Efield_aux, lev, 0); - const amrex::MultiFab &Ey = warpx.getField(FieldType::Efield_aux, lev, 1); - const amrex::MultiFab &Ez = warpx.getField(FieldType::Efield_aux, lev, 2); - const amrex::MultiFab &Bx = warpx.getField(FieldType::Bfield_aux, lev, 0); - const amrex::MultiFab &By = warpx.getField(FieldType::Bfield_aux, lev, 1); - const amrex::MultiFab &Bz = warpx.getField(FieldType::Bfield_aux, lev, 2); + const amrex::MultiFab &Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab &Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab &Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab &Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab &By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab &Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); /* * Prepare interpolation of field components to probe_position diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.H b/Source/Diagnostics/ReducedDiags/FieldReduction.H index 9574caa3d5d..d2c6dc6f6da 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.H +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.H @@ -9,7 +9,7 @@ #define WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDREDUCTION_H_ #include "ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "WarpX.H" #include @@ -87,7 +87,9 @@ public: template void ComputeFieldReduction() { + using ablastr::fields::Direction; using namespace amrex::literals; + using warpx::fields::FieldType; // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); @@ -99,15 +101,15 @@ public: const auto dx = geom.CellSizeArray(); // get MultiFab data - const amrex::MultiFab & Ex = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,2); - const amrex::MultiFab & jx = warpx.getField(warpx::fields::FieldType::current_fp, lev,0); - const amrex::MultiFab & jy = warpx.getField(warpx::fields::FieldType::current_fp, lev,1); - const amrex::MultiFab & jz = warpx.getField(warpx::fields::FieldType::current_fp, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); + const amrex::MultiFab & jx = *warpx.m_fields.get(FieldType::current_fp, Direction{0},lev); + const amrex::MultiFab & jy = *warpx.m_fields.get(FieldType::current_fp, Direction{1},lev); + const amrex::MultiFab & jz = *warpx.m_fields.get(FieldType::current_fp, Direction{2},lev); // General preparation of interpolation and reduction operations diff --git a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp index a11a8d35853..c496300c54e 100644 --- a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp +++ b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp @@ -7,12 +7,14 @@ #include "LoadBalanceCosts.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" +#include + #include #include #include @@ -36,7 +38,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -123,11 +125,13 @@ void LoadBalanceCosts::ComputeDiags (int step) // shift index for m_data int shift_m_data = 0; + using ablastr::fields::Direction; + // save data for (int lev = 0; lev < nLevels; ++lev) { const amrex::DistributionMapping& dm = warpx.DistributionMap(lev); - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); for (MFIter mfi(Ex, false); mfi.isValid(); ++mfi) { const Box& tbx = mfi.tilebox(); diff --git a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp index 842dd3c9efd..c82b060b67c 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp @@ -11,7 +11,7 @@ #if (defined WARPX_QED) # include "Particles/ElementaryProcess/QEDInternals/QedChiFunctions.H" #endif -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/Gather/FieldGather.H" #include "Particles/Gather/GetExternalFields.H" #include "Particles/MultiParticleContainer.H" @@ -21,6 +21,8 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -52,7 +54,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor ParticleExtrema::ParticleExtrema (const std::string& rd_name) @@ -260,18 +262,20 @@ void ParticleExtrema::ComputeDiags (int step) const bool galerkin_interpolation = WarpX::galerkin_interpolation; const amrex::IntVect ngEB = warpx.getngEB(); + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev <= level_number; ++lev) { // define variables in preparation for field gathering const amrex::XDim3 dinv = WarpX::InvCellSize(std::max(lev, 0)); - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // declare reduce_op amrex::ReduceOps reduce_op; diff --git a/Source/Diagnostics/SliceDiagnostic.cpp b/Source/Diagnostics/SliceDiagnostic.cpp index 97af967f2be..bcb6070abdf 100644 --- a/Source/Diagnostics/SliceDiagnostic.cpp +++ b/Source/Diagnostics/SliceDiagnostic.cpp @@ -7,10 +7,11 @@ */ #include "SliceDiagnostic.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include #include #include @@ -41,7 +42,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; /* \brief * The functions creates the slice for diagnostics based on the user-input. @@ -175,6 +176,10 @@ CreateSlice( const MultiFab& mf, const Vector &dom_geom, const MultiFab& mfSrc = *smf; MultiFab& mfDst = *cs_mf; + auto & warpx = WarpX::GetInstance(); + + using ablastr::fields::Direction; + MFIter mfi_dst(mfDst); for (MFIter mfi(mfSrc); mfi.isValid(); ++mfi) { @@ -196,27 +201,27 @@ CreateSlice( const MultiFab& mf, const Vector &dom_geom, amrex::amrex_avgdown_nodes(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,0).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 0); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,1).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 1); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,2).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 2); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,0).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 0); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,1).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 1); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,2).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 2); } diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index a3b902386f6..43415daf151 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -12,6 +12,7 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "FieldIO.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" @@ -88,6 +89,9 @@ WarpX::GetRestartDMap (const std::string& chkfile, const amrex::BoxArray& ba, in void WarpX::InitFromCheckpoint () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::InitFromCheckpoint()"); amrex::Print()<< Utils::TextMsg::Info( @@ -279,101 +283,101 @@ WarpX::InitFromCheckpoint () for (int lev = 0; lev < nlevs; ++lev) { for (int i = 0; i < 3; ++i) { - current_fp[lev][i]->setVal(0.0); - Efield_fp[lev][i]->setVal(0.0); - Bfield_fp[lev][i]->setVal(0.0); + m_fields.get(FieldType::current_fp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Efield_fp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_fp, Direction{i}, lev)->setVal(0.0); } if (lev > 0) { for (int i = 0; i < 3; ++i) { - Efield_aux[lev][i]->setVal(0.0); - Bfield_aux[lev][i]->setVal(0.0); + m_fields.get(FieldType::Efield_aux, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_aux, Direction{i}, lev)->setVal(0.0); - current_cp[lev][i]->setVal(0.0); - Efield_cp[lev][i]->setVal(0.0); - Bfield_cp[lev][i]->setVal(0.0); + m_fields.get(FieldType::current_cp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Efield_cp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_cp, Direction{i}, lev)->setVal(0.0); } } - VisMF::Read(*Efield_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_fp")); - VisMF::Read(*Efield_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_fp")); - VisMF::Read(*Efield_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_fp")); - VisMF::Read(*Bfield_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_fp")); - VisMF::Read(*Bfield_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_fp")); - VisMF::Read(*Bfield_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_fp")); if (WarpX::fft_do_time_averaging) { - VisMF::Read(*Efield_avg_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_avg_fp")); - VisMF::Read(*Efield_avg_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_avg_fp")); - VisMF::Read(*Efield_avg_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_avg_fp")); } if (is_synchronized) { - VisMF::Read(*current_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_fp")); - VisMF::Read(*current_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_fp")); - VisMF::Read(*current_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_fp")); } if (lev > 0) { - VisMF::Read(*Efield_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_cp")); - VisMF::Read(*Efield_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_cp")); - VisMF::Read(*Efield_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_cp")); - VisMF::Read(*Bfield_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_cp")); - VisMF::Read(*Bfield_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_cp")); - VisMF::Read(*Bfield_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_cp")); if (WarpX::fft_do_time_averaging) { - VisMF::Read(*Efield_avg_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_avg_cp")); - VisMF::Read(*Efield_avg_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_avg_cp")); - VisMF::Read(*Efield_avg_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_avg_cp")); } if (is_synchronized) { - VisMF::Read(*current_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_cp")); - VisMF::Read(*current_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_cp")); - VisMF::Read(*current_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_cp")); } } @@ -384,11 +388,11 @@ WarpX::InitFromCheckpoint () { for (int lev = 0; lev < nlevs; ++lev) { if (pml[lev]) { - pml[lev]->Restart(amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml")); + pml[lev]->Restart(m_fields, amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml")); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->Restart(amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml_rz")); + pml_rz[lev]->Restart(m_fields, amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml_rz")); } #endif } diff --git a/Source/EmbeddedBoundary/ParticleScraper.H b/Source/EmbeddedBoundary/ParticleScraper.H index c5d9cc68c60..860541542be 100644 --- a/Source/EmbeddedBoundary/ParticleScraper.H +++ b/Source/EmbeddedBoundary/ParticleScraper.H @@ -65,7 +65,7 @@ */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, int lev, F&& f) +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, int lev, F&& f) { scrapeParticlesAtEB(pc, distance_to_eb, lev, lev, std::forward(f)); } @@ -108,7 +108,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, F&& f) +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, F&& f) { scrapeParticlesAtEB(pc, distance_to_eb, 0, pc.finestLevel(), std::forward(f)); } @@ -153,7 +153,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, int lev_min, int lev_max, F&& f) { BL_PROFILE("scrapeParticlesAtEB"); diff --git a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp index 717aa26b021..61009fb46e0 100644 --- a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp +++ b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp @@ -7,15 +7,20 @@ #include "WarpXFaceInfoBox.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" #include +#include #include #include #include +using namespace ablastr::fields; +using warpx::fields::FieldType; + /** * \brief Get the value of arr in the neighbor (i_n, j_n) on the plane with normal 'dim'. * @@ -283,7 +288,7 @@ WarpX::ComputeFaceExtensions () void WarpX::InitBorrowing() { int idim = 0; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_x = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_x.inds_pointer.resize(box); @@ -299,7 +304,7 @@ WarpX::InitBorrowing() { } idim = 1; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_y = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_y.inds_pointer.resize(box); @@ -312,7 +317,7 @@ WarpX::InitBorrowing() { } idim = 2; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_z = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_z.inds_pointer.resize(box); @@ -453,11 +458,10 @@ WarpX::ComputeOneWayExtensions () WARPX_ABORT_WITH_MESSAGE( "ComputeOneWayExtensions: Only implemented in 2D3V and 3D3V"); #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); - - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; @@ -469,11 +473,11 @@ WarpX::ComputeOneWayExtensions () amrex::Real* borrowing_area = borrowing.area.data(); int& vecs_size = borrowing.vecs_size; - auto const &S_mod = m_area_mod[maxLevel()][idim]->array(mfi); + auto const &S_mod = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); vecs_size = amrex::Scan::PrefixSum(ncells, [=] AMREX_GPU_DEVICE (int icell) { @@ -581,11 +585,11 @@ WarpX::ComputeEightWaysExtensions () WARPX_ABORT_WITH_MESSAGE( "ComputeEightWaysExtensions: Only implemented in 2D3V and 3D3V"); #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; @@ -597,10 +601,11 @@ WarpX::ComputeEightWaysExtensions () amrex::Real* borrowing_area = borrowing.area.data(); int& vecs_size = borrowing.vecs_size; - auto const &S_mod = m_area_mod[maxLevel()][idim]->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); + auto const &S_mod = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); + + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); vecs_size += amrex::Scan::PrefixSum(ncells, [=] AMREX_GPU_DEVICE (int icell){ @@ -732,15 +737,15 @@ WarpX::ApplyBCKCorrection (const int idim) const amrex::Real dy = cell_size[1]; const amrex::Real dz = cell_size[2]; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel()), amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box &box = mfi.tilebox(); const amrex::Array4 &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); const amrex::Array4 &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); - const amrex::Array4 &S = m_face_areas[maxLevel()][idim]->array(mfi); - const amrex::Array4 &lx = m_face_areas[maxLevel()][0]->array(mfi); - const amrex::Array4 &ly = m_face_areas[maxLevel()][1]->array(mfi); - const amrex::Array4 &lz = m_face_areas[maxLevel()][2]->array(mfi); + const amrex::Array4 &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); + const amrex::Array4 &lx = m_fields.get(FieldType::face_areas, Direction{0}, maxLevel())->array(mfi);; + const amrex::Array4 &ly = m_fields.get(FieldType::face_areas, Direction{1}, maxLevel())->array(mfi);; + const amrex::Array4 &lz = m_fields.get(FieldType::face_areas, Direction{2}, maxLevel())->array(mfi);; amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE(int i, int j, int k) { if (flag_ext_face(i, j, k)) { @@ -760,7 +765,7 @@ void WarpX::ShrinkBorrowing () { for(int idim = 0; idim < AMREX_SPACEDIM; idim++) { - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing.inds.resize(borrowing.vecs_size); borrowing.neigh_faces.resize(borrowing.vecs_size); diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index f63e4eb45d3..edbc97a8efe 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -9,6 +9,7 @@ #include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB +# include "Fields.H" # include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" @@ -41,6 +42,8 @@ # include # include +using namespace ablastr::fields; + #endif #ifdef AMREX_USE_EB @@ -122,7 +125,7 @@ WarpX::InitEB () #ifdef AMREX_USE_EB void -WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& edge_lengths, +WarpX::ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeEdgeLengths"); @@ -184,7 +187,7 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed void -WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face_areas, +WarpX::ComputeFaceAreas (VectorField& face_areas, const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeFaceAreas"); @@ -238,7 +241,7 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face void -WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengths, +WarpX::ScaleEdges (ablastr::fields::VectorField& edge_lengths, const std::array& cell_size) { BL_PROFILE("ScaleEdges"); @@ -262,8 +265,8 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt } void -WarpX::ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas, - const std::array& cell_size) { +WarpX::ScaleAreas (ablastr::fields::VectorField& face_areas, + const std::array& cell_size) { BL_PROFILE("ScaleAreas"); #if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) @@ -290,7 +293,11 @@ WarpX::ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas void -WarpX::MarkCells () { +WarpX::MarkCells () +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + #ifndef WARPX_DIM_RZ auto const &cell_size = CellSize(maxLevel()); @@ -306,18 +313,20 @@ WarpX::MarkCells () { continue; } #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { - //amrex::Box const &box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect()); - const amrex::Box& box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect(), - m_face_areas[maxLevel()][idim]->nGrowVect() ); + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { + auto* face_areas_idim_max_lev = + m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel()); + + const amrex::Box& box = mfi.tilebox(face_areas_idim_max_lev->ixType().toIntVect(), + face_areas_idim_max_lev->nGrowVect() ); - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = face_areas_idim_max_lev->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); - auto const &mod_areas_dim = m_area_mod[maxLevel()][idim]->array(mfi); + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); + auto const &mod_areas_dim = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); const amrex::Real dx = cell_size[0]; const amrex::Real dy = cell_size[1]; @@ -381,11 +390,12 @@ WarpX::ComputeDistanceToEB () } #ifdef AMREX_USE_EB BL_PROFILE("ComputeDistanceToEB"); + using warpx::fields::FieldType; const amrex::EB2::IndexSpace& eb_is = amrex::EB2::IndexSpace::top(); for (int lev=0; lev<=maxLevel(); lev++) { const amrex::EB2::Level& eb_level = eb_is.getLevel(Geom(lev)); auto const eb_fact = fieldEBFactory(lev); - amrex::FillSignedDistance(*m_distance_to_eb[lev], eb_level, eb_fact, 1); + amrex::FillSignedDistance(*m_fields.get(FieldType::distance_to_eb, lev), eb_level, eb_fact, 1); } #endif } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 9acbe734405..93d265d598f 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -15,6 +15,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #ifdef WARPX_USE_FFT # ifdef WARPX_DIM_RZ @@ -62,6 +63,9 @@ using ablastr::utils::SignalHandling; void WarpX::Synchronize () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + FillBoundaryE(guard_cells.ng_FieldGather); FillBoundaryB(guard_cells.ng_FieldGather); if (fft_do_time_averaging) @@ -72,11 +76,16 @@ WarpX::Synchronize () { UpdateAuxilaryData(); FillBoundaryAux(guard_cells.ng_UpdateAux); for (int lev = 0; lev <= finest_level; ++lev) { - mypc->PushP(lev, 0.5_rt*dt[lev], - *Efield_aux[lev][0],*Efield_aux[lev][1], - *Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1], - *Bfield_aux[lev][2]); + mypc->PushP( + lev, + 0.5_rt*dt[lev], + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } is_synchronized = true; } @@ -87,6 +96,8 @@ WarpX::Evolve (int numsteps) WARPX_PROFILE_REGION("WarpX::Evolve()"); WARPX_PROFILE("WarpX::Evolve()"); + using ablastr::fields::Direction; + Real cur_time = t_new[0]; // Note that the default argument is numsteps = -1 @@ -464,6 +475,9 @@ void WarpX::ExplicitFillBoundaryEBUpdateAux () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(evolve_scheme == EvolveScheme::Explicit, "Cannot call WarpX::ExplicitFillBoundaryEBUpdateAux without Explicit evolve scheme set!"); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // At the beginning, we have B^{n} and E^{n}. // Particles have p^{n} and x^{n}. // is_synchronized is true. @@ -478,9 +492,16 @@ void WarpX::ExplicitFillBoundaryEBUpdateAux () // on first step, push p by -0.5*dt for (int lev = 0; lev <= finest_level; ++lev) { - mypc->PushP(lev, -0.5_rt*dt[lev], - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + mypc->PushP( + lev, + -0.5_rt*dt[lev], + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } is_synchronized = false; @@ -544,9 +565,10 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num // interact the particles with EB walls (if present) if (EB::enabled()) { - mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); + using warpx::fields::FieldType; + mypc->ScrapeParticlesAtEB(m_fields.get_mr_levels(FieldType::distance_to_eb, finest_level)); m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries( - *mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); + *mypc, m_fields.get_mr_levels(FieldType::distance_to_eb, finest_level)); mypc->deleteInvalidParticles(); } @@ -560,23 +582,22 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num void WarpX::SyncCurrentAndRho () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (fft_periodic_single_box) { // With periodic single box, synchronize J and rho here, // even with current correction or Vay deposition - if (current_deposition_algo == CurrentDepositionAlgo::Vay) - { - // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR - SyncCurrent(current_fp_vay, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); - } - else - { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); - } + std::string const current_fp_string = (current_deposition_algo == CurrentDepositionAlgo::Vay) + ? "current_fp_vay" : "current_fp"; + // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR + + SyncCurrent(current_fp_string); + SyncRho(); + } else // no periodic single box { @@ -586,42 +607,46 @@ void WarpX::SyncCurrentAndRho () if (!current_correction && current_deposition_algo != CurrentDepositionAlgo::Vay) { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // TODO This works only without mesh refinement const int lev = 0; - if (use_filter) { ApplyFilterJ(current_fp_vay, lev); } + if (use_filter) { + ApplyFilterJ(m_fields.get_mr_levels_alldirs(FieldType::current_fp_vay, finest_level), lev); + } } } } else // FDTD { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } // Reflect charge and current density over PEC boundaries, if needed. for (int lev = 0; lev <= finest_level; ++lev) { - if (rho_fp[lev]) { - ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + if (m_fields.has(FieldType::rho_fp, lev)) { + ApplyRhofieldBoundary(lev, m_fields.get(FieldType::rho_fp,lev), PatchType::fine); } - ApplyJfieldBoundary( - lev, current_fp[lev][0].get(), current_fp[lev][1].get(), - current_fp[lev][2].get(), PatchType::fine - ); + ApplyJfieldBoundary(lev, + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + PatchType::fine); if (lev > 0) { - if (rho_cp[lev]) { - ApplyRhofieldBoundary(lev, rho_cp[lev].get(), PatchType::coarse); + if (m_fields.has(FieldType::rho_cp, lev)) { + ApplyRhofieldBoundary(lev, m_fields.get(FieldType::rho_cp,lev), PatchType::coarse); } - ApplyJfieldBoundary( - lev, current_cp[lev][0].get(), current_cp[lev][1].get(), - current_cp[lev][2].get(), PatchType::coarse - ); + ApplyJfieldBoundary(lev, + m_fields.get(FieldType::current_cp, Direction{0}, lev), + m_fields.get(FieldType::current_cp, Direction{1}, lev), + m_fields.get(FieldType::current_cp, Direction{2}, lev), + PatchType::coarse); } } } @@ -636,6 +661,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) "multi-J algorithm not implemented for FDTD" ); + using warpx::fields::FieldType; + const int rho_mid = spectral_solver_fp[0]->m_spectral_index.rho_mid; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; @@ -647,7 +674,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Initialize multi-J loop: // 1) Prepare E,B,F,G fields in spectral space - PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDForwardTransformEB(); if (WarpX::do_dive_cleaning) { PSATDForwardTransformF(); } if (WarpX::do_divb_cleaning) { PSATDForwardTransformG(); } @@ -656,31 +683,36 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // 3) Deposit rho (in rho_new, since it will be moved during the loop) // (after checking that pointer to rho_fp on MR level 0 is not null) - if (rho_fp[0] && rho_in_time == RhoInTime::Linear) + if (m_fields.has(FieldType::rho_fp, 0) && rho_in_time == RhoInTime::Linear) { + ablastr::fields::MultiLevelScalarField const rho_fp = m_fields.get_mr_levels(FieldType::rho_fp, finest_level); + + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + // Deposit rho at relative time -dt // (dt[0] denotes the time step on mesh refinement level 0) mypc->DepositCharge(rho_fp, -dt[0]); // Filter, exchange boundary, and interpolate across levels - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); // Forward FFT of rho - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_new); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_new); } // 4) Deposit J at relative time -dt with time step dt // (dt[0] denotes the time step on mesh refinement level 0) if (J_in_time == JInTime::Linear) { - auto& current = (do_current_centering) ? current_fp_nodal : current_fp; - mypc->DepositCurrent(current, dt[0], -dt[0]); + std::string const current_string = (do_current_centering) ? "current_fp_nodal" : "current_fp"; + mypc->DepositCurrent( m_fields.get_mr_levels_alldirs(current_string, finest_level), dt[0], -dt[0]); // Synchronize J: filter, exchange boundary, and interpolate across levels. // With current centering, the nodal current is deposited in 'current', // namely 'current_fp_nodal': SyncCurrent stores the result of its centering // into 'current_fp' and then performs both filtering, if used, and exchange // of guard cells. - SyncCurrent(current_fp, current_cp, current_buf); + SyncCurrent("current_fp"); // Forward FFT of J - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ("current_fp", "current_cp"); } // Number of depositions for multi-J scheme @@ -705,31 +737,36 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Deposit new J at relative time t_deposit_current with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - auto& current = (do_current_centering) ? current_fp_nodal : current_fp; - mypc->DepositCurrent(current, dt[0], t_deposit_current); + std::string const current_string = (do_current_centering) ? "current_fp_nodal" : "current_fp"; + mypc->DepositCurrent( m_fields.get_mr_levels_alldirs(current_string, finest_level), dt[0], t_deposit_current); // Synchronize J: filter, exchange boundary, and interpolate across levels. // With current centering, the nodal current is deposited in 'current', // namely 'current_fp_nodal': SyncCurrent stores the result of its centering // into 'current_fp' and then performs both filtering, if used, and exchange // of guard cells. - SyncCurrent(current_fp, current_cp, current_buf); + SyncCurrent("current_fp"); // Forward FFT of J - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ("current_fp", "current_cp"); // Deposit new rho // (after checking that pointer to rho_fp on MR level 0 is not null) - if (rho_fp[0]) + if (m_fields.has(FieldType::rho_fp, 0)) { + ablastr::fields::MultiLevelScalarField const rho_fp = m_fields.get_mr_levels(FieldType::rho_fp, finest_level); + + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + // Move rho from new to old if rho is linear in time if (rho_in_time == RhoInTime::Linear) { PSATDMoveRhoNewToRhoOld(); } // Deposit rho at relative time t_deposit_charge mypc->DepositCharge(rho_fp, t_deposit_charge); // Filter, exchange boundary, and interpolate across levels - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); // Forward FFT of rho const int rho_idx = (rho_in_time == RhoInTime::Linear) ? rho_new : rho_mid; - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_idx); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_idx); } if (WarpX::current_correction) @@ -745,7 +782,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // (the relative time reached here coincides with an integer full time step) if (i_deposit == n_deposit-1) { - PSATDBackwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDBackwardTransformEB(); if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } if (WarpX::do_divb_cleaning) { PSATDBackwardTransformG(); } } @@ -756,7 +793,12 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { // We summed the integral of the field over 2*dt PSATDScaleAverageFields(1._rt / (2._rt*dt[0])); - PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); + PSATDBackwardTransformEBavg( + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level) + ); } // Evolve fields in PML @@ -764,7 +806,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { if (do_pml && pml[lev]->ok()) { - pml[lev]->PushPSATD(lev); + pml[lev]->PushPSATD(m_fields, lev); } ApplyEfieldBoundary(lev, PatchType::fine); if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } @@ -824,13 +866,25 @@ WarpX::OneStep_sub1 (Real cur_time) const int fine_lev = 1; const int coarse_lev = 0; + using warpx::fields::FieldType; + // i) Push particles and fields on the fine patch (first fine step) PushParticlesandDeposit(fine_lev, cur_time, DtType::FirstHalf); - RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); - RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - if (use_filter) { ApplyFilterJ(current_fp, fine_lev); } - SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, 2*ncomps); + RestrictCurrentFromFineToCoarsePatch( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + RestrictRhoFromFineToCoarsePatch(fine_lev); + if (use_filter) { + ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + } + SumBoundaryJ( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + fine_lev, Geom(fine_lev).periodicity()); + + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + fine_lev, PatchType::fine, 0, 2*ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -858,8 +912,15 @@ WarpX::OneStep_sub1 (Real cur_time) // by only half a coarse step (first half) PushParticlesandDeposit(coarse_lev, cur_time, DtType::Full); StoreCurrent(coarse_lev); - AddCurrentFromFineLevelandSumBoundary(current_fp, current_cp, current_buf, coarse_lev); - AddRhoFromFineLevelandSumBoundary(rho_fp, rho_cp, charge_buf, coarse_lev, 0, ncomps); + AddCurrentFromFineLevelandSumBoundary( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), coarse_lev); + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level), + coarse_lev, 0, ncomps); EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); @@ -887,11 +948,18 @@ WarpX::OneStep_sub1 (Real cur_time) // iv) Push particles and fields on the fine patch (second fine step) PushParticlesandDeposit(fine_lev, cur_time + dt[fine_lev], DtType::SecondHalf); - RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); - RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - if (use_filter) { ApplyFilterJ(current_fp, fine_lev); } - SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, ncomps); + RestrictCurrentFromFineToCoarsePatch( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + RestrictRhoFromFineToCoarsePatch(fine_lev); + if (use_filter) { + ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + } + SumBoundaryJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev, Geom(fine_lev).periodicity()); + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + fine_lev, PatchType::fine, 0, ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -918,8 +986,16 @@ WarpX::OneStep_sub1 (Real cur_time) // v) Push the fields on the coarse patch and mother grid // by only half a coarse step (second half) RestoreCurrent(coarse_lev); - AddCurrentFromFineLevelandSumBoundary(current_fp, current_cp, current_buf, coarse_lev); - AddRhoFromFineLevelandSumBoundary(rho_fp, rho_cp, charge_buf, coarse_lev, ncomps, ncomps); + AddCurrentFromFineLevelandSumBoundary( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), + coarse_lev); + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level), + coarse_lev, ncomps, ncomps); EvolveE(fine_lev, PatchType::coarse, dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolver, @@ -980,9 +1056,18 @@ WarpX::doFieldIonization () void WarpX::doFieldIonization (int lev) { - mypc->doFieldIonization(lev, - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + mypc->doFieldIonization( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } #ifdef WARPX_QED @@ -997,9 +1082,18 @@ WarpX::doQEDEvents () void WarpX::doQEDEvents (int lev) { - mypc->doQedEvents(lev, - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + mypc->doQedEvents( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } #endif @@ -1017,50 +1111,53 @@ void WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, bool skip_current, PushType push_type) { - amrex::MultiFab* current_x = nullptr; - amrex::MultiFab* current_y = nullptr; - amrex::MultiFab* current_z = nullptr; + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + std::string current_fp_string; if (WarpX::do_current_centering) { - current_x = current_fp_nodal[lev][0].get(); - current_y = current_fp_nodal[lev][1].get(); - current_z = current_fp_nodal[lev][2].get(); + current_fp_string = "current_fp_nodal"; } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - // Note that Vay deposition is supported only for PSATD and the code currently aborts otherwise - current_x = current_fp_vay[lev][0].get(); - current_y = current_fp_vay[lev][1].get(); - current_z = current_fp_vay[lev][2].get(); + current_fp_string = "current_fp_vay"; } else { - current_x = current_fp[lev][0].get(); - current_y = current_fp[lev][1].get(); - current_z = current_fp[lev][2].get(); + current_fp_string = "current_fp"; } - mypc->Evolve(lev, - *Efield_aux[lev][0], *Efield_aux[lev][1], *Efield_aux[lev][2], - *Bfield_aux[lev][0], *Bfield_aux[lev][1], *Bfield_aux[lev][2], - *current_x, *current_y, *current_z, - current_buf[lev][0].get(), current_buf[lev][1].get(), current_buf[lev][2].get(), - rho_fp[lev].get(), charge_buf[lev].get(), - Efield_cax[lev][0].get(), Efield_cax[lev][1].get(), Efield_cax[lev][2].get(), - Bfield_cax[lev][0].get(), Bfield_cax[lev][1].get(), Bfield_cax[lev][2].get(), - cur_time, dt[lev], a_dt_type, skip_current, push_type); + mypc->Evolve( + m_fields, + lev, + current_fp_string, + cur_time, + dt[lev], + a_dt_type, + skip_current, + push_type + ); if (! skip_current) { #ifdef WARPX_DIM_RZ // This is called after all particles have deposited their current and charge. - ApplyInverseVolumeScalingToCurrentDensity(current_fp[lev][0].get(), current_fp[lev][1].get(), current_fp[lev][2].get(), lev); - if (current_buf[lev][0].get()) { - ApplyInverseVolumeScalingToCurrentDensity(current_buf[lev][0].get(), current_buf[lev][1].get(), current_buf[lev][2].get(), lev-1); + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev); + if (m_fields.has(FieldType::current_buf, Direction{0}, lev)) { + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_buf, Direction{0}, lev), + m_fields.get(FieldType::current_buf, Direction{1}, lev), + m_fields.get(FieldType::current_buf, Direction{2}, lev), + lev-1); } - if (rho_fp[lev]) { - ApplyInverseVolumeScalingToChargeDensity(rho_fp[lev].get(), lev); - if (charge_buf[lev]) { - ApplyInverseVolumeScalingToChargeDensity(charge_buf[lev].get(), lev-1); + if (m_fields.has(FieldType::rho_fp, lev)) { + ApplyInverseVolumeScalingToChargeDensity(m_fields.get(FieldType::rho_fp, lev), lev); + if (m_fields.has(FieldType::rho_buf, lev)) { + ApplyInverseVolumeScalingToChargeDensity(m_fields.get(FieldType::rho_buf, lev), lev-1); } } // #else @@ -1072,10 +1169,12 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, // Might this be related to issue #1943? #endif if (do_fluid_species) { - myfl->Evolve(lev, - *Efield_aux[lev][0], *Efield_aux[lev][1], *Efield_aux[lev][2], - *Bfield_aux[lev][0], *Bfield_aux[lev][1], *Bfield_aux[lev][2], - rho_fp[lev].get(), *current_x, *current_y, *current_z, cur_time, skip_current); + myfl->Evolve(m_fields, + lev, + current_fp_string, + cur_time, + skip_current + ); } } } @@ -1088,6 +1187,8 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, void WarpX::applyMirrors (Real time) { + using ablastr::fields::Direction; + // something to do? if (num_mirrors == 0) { return; @@ -1114,47 +1215,31 @@ WarpX::applyMirrors (Real time) const amrex::Real dz = WarpX::CellSize(lev)[2]; const amrex::Real z_max = std::max(z_max_tmp, z_min+mirror_z_npoints[i_mirror]*dz); - // Get fine patch field MultiFabs - amrex::MultiFab& Ex = *Efield_fp[lev][0].get(); - amrex::MultiFab& Ey = *Efield_fp[lev][1].get(); - amrex::MultiFab& Ez = *Efield_fp[lev][2].get(); - amrex::MultiFab& Bx = *Bfield_fp[lev][0].get(); - amrex::MultiFab& By = *Bfield_fp[lev][1].get(); - amrex::MultiFab& Bz = *Bfield_fp[lev][2].get(); - - // Set each field to zero between z_min and z_max - NullifyMF(Ex, lev, z_min, z_max); - NullifyMF(Ey, lev, z_min, z_max); - NullifyMF(Ez, lev, z_min, z_max); - NullifyMF(Bx, lev, z_min, z_max); - NullifyMF(By, lev, z_min, z_max); - NullifyMF(Bz, lev, z_min, z_max); + // Set each field on the fine patch to zero between z_min and z_max + NullifyMF(m_fields, "Efield_fp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_fp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_fp", Direction{2}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{2}, lev, z_min, z_max); // If div(E)/div(B) cleaning are used, set F/G field to zero - if (F_fp[lev]) { NullifyMF(*F_fp[lev], lev, z_min, z_max); } - if (G_fp[lev]) { NullifyMF(*G_fp[lev], lev, z_min, z_max); } + NullifyMF(m_fields, "F_fp", lev, z_min, z_max); + NullifyMF(m_fields, "G_fp", lev, z_min, z_max); if (lev>0) { - // Get coarse patch field MultiFabs - amrex::MultiFab& cEx = *Efield_cp[lev][0].get(); - amrex::MultiFab& cEy = *Efield_cp[lev][1].get(); - amrex::MultiFab& cEz = *Efield_cp[lev][2].get(); - amrex::MultiFab& cBx = *Bfield_cp[lev][0].get(); - amrex::MultiFab& cBy = *Bfield_cp[lev][1].get(); - amrex::MultiFab& cBz = *Bfield_cp[lev][2].get(); - - // Set each field to zero between z_min and z_max - NullifyMF(cEx, lev, z_min, z_max); - NullifyMF(cEy, lev, z_min, z_max); - NullifyMF(cEz, lev, z_min, z_max); - NullifyMF(cBx, lev, z_min, z_max); - NullifyMF(cBy, lev, z_min, z_max); - NullifyMF(cBz, lev, z_min, z_max); + // Set each field on the coarse patch to zero between z_min and z_max + NullifyMF(m_fields, "Efield_cp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_cp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_cp", Direction{2}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{2}, lev, z_min, z_max); // If div(E)/div(B) cleaning are used, set F/G field to zero - if (F_cp[lev]) { NullifyMF(*F_cp[lev], lev, z_min, z_max); } - if (G_cp[lev]) { NullifyMF(*G_cp[lev], lev, z_min, z_max); } + NullifyMF(m_fields, "F_cp", lev, z_min, z_max); + NullifyMF(m_fields, "G_cp", lev, z_min, z_max); } } } diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H index 8d23088799f..e58af394a7a 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -50,15 +50,10 @@ public: * This function must be defined in the derived classes. */ virtual void ComputeSpaceChargeField ( - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& charge_buf, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& phi_fp, - [[maybe_unused]] MultiParticleContainer& mpc, - [[maybe_unused]] MultiFluidContainer* mfl, - [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) = 0; + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) = 0; /** * \brief Set Dirichlet boundary conditions for the electrostatic solver. @@ -69,7 +64,7 @@ public: * \param[in] idim The dimension for which the Dirichlet boundary condition is set */ void setPhiBC ( - amrex::Vector>& phi, + ablastr::fields::MultiLevelScalarField const& phi, amrex::Real t ) const; @@ -91,8 +86,8 @@ public: * \param[in] verbosity The verbosity setting for the MLMG solver */ void computePhi ( - const amrex::Vector >& rho, - amrex::Vector >& phi, + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta, amrex::Real required_precision, amrex::Real absolute_tolerance, @@ -116,8 +111,8 @@ public: * \param[in] beta Represents the velocity of the source of `phi` */ void computeE ( - amrex::Vector, 3> >& E, - const amrex::Vector >& phi, + ablastr::fields::MultiLevelVectorField const& E, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta ) const; @@ -136,8 +131,8 @@ public: *\param[in] beta Represents the velocity of the source of `phi` */ void computeB ( - amrex::Vector, 3> >& B, - const amrex::Vector >& phi, + ablastr::fields::MultiLevelVectorField const& B, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta ) const; diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp index 895615a5b21..1ced0a07152 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -8,10 +8,14 @@ */ #include "ElectrostaticSolver.H" -#include #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" + +#include + using namespace amrex; +using warpx::fields::FieldType; ElectrostaticSolver::ElectrostaticSolver (int nlevs_max) : num_levels{nlevs_max} { @@ -39,7 +43,7 @@ void ElectrostaticSolver::ReadParameters () { void ElectrostaticSolver::setPhiBC ( - amrex::Vector>& phi, + ablastr::fields::MultiLevelScalarField const& phi, amrex::Real t ) const { @@ -110,19 +114,23 @@ ElectrostaticSolver::setPhiBC ( void -ElectrostaticSolver::computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array const beta, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const { +ElectrostaticSolver::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + std::array const beta, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) const +{ + using ablastr::fields::Direction; + // create a vector to our fields, sorted by level amrex::Vector sorted_rho; amrex::Vector sorted_phi; for (int lev = 0; lev < num_levels; ++lev) { - sorted_rho.emplace_back(rho[lev].get()); - sorted_phi.emplace_back(phi[lev].get()); + sorted_rho.emplace_back(rho[lev]); + sorted_phi.emplace_back(phi[lev]); } std::optional post_phi_calculation; @@ -149,18 +157,18 @@ ElectrostaticSolver::computePhi (const amrex::Vector{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::Array{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #elif defined(WARPX_DIM_3D) amrex::Array{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 1), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #endif ); @@ -206,9 +214,10 @@ ElectrostaticSolver::computePhi (const amrex::Vector, 3> >& E, - const amrex::Vector >& phi, - std::array const beta ) const +ElectrostaticSolver::computeE ( + ablastr::fields::MultiLevelVectorField const& E, + ablastr::fields::MultiLevelScalarField const& phi, + std::array beta ) const { auto & warpx = WarpX::GetInstance(); for (int lev = 0; lev < num_levels; lev++) { @@ -369,9 +378,10 @@ ElectrostaticSolver::computeE (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array const beta ) const +void ElectrostaticSolver::computeB ( + ablastr::fields::MultiLevelVectorField const& B, + ablastr::fields::MultiLevelScalarField const& phi, + std::array beta) const { // return early if beta is 0 since there will be no B-field if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H index 7dc41f0a056..5606ebef2f1 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H @@ -22,19 +22,14 @@ public: void InitData () override; void ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) override; + int max_level) override; void computePhiTriDiagonal ( - const amrex::Vector >& rho, - amrex::Vector >& phi + const ablastr::fields::MultiLevelScalarField& rho, + const ablastr::fields::MultiLevelScalarField& phi ); }; diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp index d14abd1848a..e973ae66975 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -9,6 +9,7 @@ #include "LabFrameExplicitES.H" #include "Fluids/MultiFluidContainer_fwd.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Particles/MultiParticleContainer_fwd.H" #include "Python/callbacks.H" #include "WarpX.H" @@ -21,35 +22,35 @@ void LabFrameExplicitES::InitData() { } void LabFrameExplicitES::ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& /*Bfield_fp*/ -) { + int max_level) +{ + using ablastr::fields::MultiLevelScalarField; + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; + + const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); + const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + mpc.DepositCharge(rho_fp, 0.0_rt); if (mfl) { const int lev = 0; - mfl->DepositCharge(lev, *rho_fp[lev]); + mfl->DepositCharge(fields, *rho_fp[lev], lev); } + // Apply filter, perform MPI exchange, interpolate across levels + const Vector > rho_buf(num_levels); auto & warpx = WarpX::GetInstance(); - for (int lev = 0; lev < num_levels; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - warpx.SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + warpx.SyncRho( rho_fp, rho_cp, amrex::GetVecOfPtrs(rho_buf) ); #ifndef WARPX_DIM_RZ for (int lev = 0; lev < num_levels; lev++) { // Reflect density over PEC boundaries, if needed. - warpx.ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev], PatchType::fine); } #endif // beta is zero in lab frame @@ -94,8 +95,8 @@ void LabFrameExplicitES::ComputeSpaceChargeField ( \param[out] phi The potential to be computed by this function */ void LabFrameExplicitES::computePhiTriDiagonal ( - const amrex::Vector >& rho, - amrex::Vector >& phi) + const ablastr::fields::MultiLevelScalarField& rho, + const ablastr::fields::MultiLevelScalarField& phi) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(num_levels == 1, "The tridiagonal solver cannot be used with mesh refinement"); diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H index 70382d7ced5..cf831a7ab10 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H @@ -44,15 +44,10 @@ public: * \param[in,out] Bfield Field contribution from phi computed from each species' charge density is added */ void ComputeSpaceChargeField ( - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, - MultiParticleContainer& mpc, - [[maybe_unused]] MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) override; + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) override; /** * Compute the charge density of the species paricle container, pc, @@ -65,10 +60,9 @@ public: * \param[in] Bfield Bfield updated to include potential computed for selected species charge density as source */ void AddSpaceChargeField ( - amrex::Vector >& charge_buf, WarpXParticleContainer& pc, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>>& Bfield + ablastr::fields::MultiLevelVectorField& Efield_fp, + ablastr::fields::MultiLevelVectorField& Bfield_fp ); /** Compute the potential `phi` by solving the Poisson equation with the @@ -77,7 +71,7 @@ public: * \param[in] Efield Efield updated to include potential gradient from boundary condition */ void AddBoundaryField ( - amrex::Vector, 3>>& Efield + ablastr::fields::MultiLevelVectorField& Efield ); }; diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp index 1660efd48c2..69647da1702 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp @@ -6,12 +6,13 @@ * * License: BSD-3-Clause-LBNL */ -#include "WarpX.H" - #include "RelativisticExplicitES.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" +#include "WarpX.H" + using namespace amrex; @@ -30,26 +31,27 @@ void RelativisticExplicitES::InitData () { } void RelativisticExplicitES::ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, - MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp -) { + [[maybe_unused]] MultiFluidContainer* mfl, + int max_level) +{ WARPX_PROFILE("RelativisticExplicitES::ComputeSpaceChargeField"); - amrex::ignore_unused(rho_fp, rho_cp, phi_fp, mfl); + + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; const bool always_run_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + MultiLevelVectorField Bfield_fp = fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); + // Loop over the species and add their space-charge contribution to E and B. // Note that the fields calculated here does not include the E field // due to simulation boundary potentials for (auto const& species : mpc) { if (always_run_solve || (species->initialize_self_fields)) { - AddSpaceChargeField(charge_buf, *species, Efield_fp, Bfield_fp); + AddSpaceChargeField(*species, Efield_fp, Bfield_fp); } } @@ -61,10 +63,9 @@ void RelativisticExplicitES::ComputeSpaceChargeField ( } void RelativisticExplicitES::AddSpaceChargeField ( - amrex::Vector >& charge_buf, WarpXParticleContainer& pc, - amrex::Vector, 3>>& Efield_fp, - amrex::Vector, 3>>& Bfield_fp) + ablastr::fields::MultiLevelVectorField& Efield_fp, + ablastr::fields::MultiLevelVectorField& Bfield_fp) { WARPX_PROFILE("RelativisticExplicitES::AddSpaceChargeField"); @@ -78,9 +79,9 @@ void RelativisticExplicitES::AddSpaceChargeField ( auto & warpx = WarpX::GetInstance(); // Allocate fields for charge and potential - Vector > rho(num_levels); - Vector > rho_coarse(num_levels); // Used in order to interpolate between levels - Vector > phi(num_levels); + Vector> rho(num_levels); + Vector> rho_coarse(num_levels); // Used in order to interpolate between levels + Vector> phi(num_levels); // Use number of guard cells used for local deposition of rho const amrex::IntVect ng = warpx.get_ng_depos_rho(); for (int lev = 0; lev < num_levels; lev++) { @@ -96,9 +97,6 @@ void RelativisticExplicitES::AddSpaceChargeField ( cba.coarsen(warpx.refRatio(lev-1)); rho_coarse[lev] = std::make_unique(cba, warpx.DistributionMap(lev), 1, ng); rho_coarse[lev]->setVal(0.); - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } } } // Deposit particle charge density (source of Poisson solver) @@ -108,10 +106,17 @@ void RelativisticExplicitES::AddSpaceChargeField ( bool const apply_boundary_and_scale_volume = true; bool const interpolate_across_levels = false; if ( !pc.do_not_deposit) { - pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, - interpolate_across_levels); + pc.DepositCharge(amrex::GetVecOfPtrs(rho), + local, reset, apply_boundary_and_scale_volume, + interpolate_across_levels); } - warpx.SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + + // Apply filter, perform MPI exchange, interpolate across levels + const Vector> rho_buf(num_levels); + warpx.SyncRho( + amrex::GetVecOfPtrs(rho), + amrex::GetVecOfPtrs(rho_coarse), + amrex::GetVecOfPtrs(rho_buf)); // Get the particle beta vector bool const local_average = false; // Average across all MPI ranks @@ -122,25 +127,26 @@ void RelativisticExplicitES::AddSpaceChargeField ( } // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, pc.self_fields_required_precision, + computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), + beta, pc.self_fields_required_precision, pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, pc.self_fields_verbosity ); // Compute the corresponding electric and magnetic field, from the potential phi - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); + computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); + computeB( Bfield_fp, amrex::GetVecOfPtrs(phi), beta ); } -void RelativisticExplicitES::AddBoundaryField (amrex::Vector, 3>>& Efield_fp) +void RelativisticExplicitES::AddBoundaryField (ablastr::fields::MultiLevelVectorField& Efield_fp) { WARPX_PROFILE("RelativisticExplicitES::AddBoundaryField"); auto & warpx = WarpX::GetInstance(); // Allocate fields for charge and potential - amrex::Vector > rho(num_levels); - amrex::Vector > phi(num_levels); + Vector> rho(num_levels); + Vector> phi(num_levels); // Use number of guard cells used for local deposition of rho const amrex::IntVect ng = warpx.get_ng_depos_rho(); for (int lev = 0; lev < num_levels; lev++) { @@ -153,16 +159,17 @@ void RelativisticExplicitES::AddBoundaryField (amrex::Vector beta = {0._rt}; // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, self_fields_required_precision, + computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), + beta, self_fields_required_precision, self_fields_absolute_tolerance, self_fields_max_iters, self_fields_verbosity ); // Compute the corresponding electric field, from the potential phi. - computeE( Efield_fp, phi, beta ); + computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); } diff --git a/Source/FieldSolver/Fields.H b/Source/FieldSolver/Fields.H deleted file mode 100644 index 9e4ce5a71a7..00000000000 --- a/Source/FieldSolver/Fields.H +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright 2024 Luca Fedeli - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_FIELDS_H_ -#define WARPX_FIELDS_H_ - -#include -#include - -namespace warpx::fields -{ - enum struct FieldType : int - { - None, - Efield_aux, - Bfield_aux, - Efield_fp, - Bfield_fp, - Efield_fp_external, - Bfield_fp_external, - current_fp, - current_fp_nodal, - rho_fp, - F_fp, - G_fp, - phi_fp, - vector_potential_fp, - Efield_cp, - Bfield_cp, - current_cp, - rho_cp, - F_cp, - G_cp, - edge_lengths, - face_areas, - Efield_avg_fp, - Bfield_avg_fp, - Efield_avg_cp, - Bfield_avg_cp - }; - - constexpr FieldType ArrayFieldTypes[] = { - FieldType::Efield_aux, FieldType::Bfield_aux, FieldType::Efield_fp, FieldType::Bfield_fp, - FieldType::current_fp, FieldType::current_fp_nodal, FieldType::vector_potential_fp, - FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, - FieldType::Efield_avg_fp, FieldType::Bfield_avg_fp, FieldType::Efield_avg_cp, FieldType::Bfield_avg_cp}; - - inline bool - isFieldArray (const FieldType field_type) - { - return std::any_of( std::begin(ArrayFieldTypes), std::end(ArrayFieldTypes), - [field_type](const FieldType& f) { return f == field_type; }); - } - -} - -#endif //WARPX_FIELDS_H_ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp index e6f010e6f44..e72260fcf4f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp @@ -35,13 +35,15 @@ using namespace amrex; * \brief Update the B field at the boundary, using the Silver-Mueller condition */ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Bfield, + ablastr::fields::VectorField& Efield, + ablastr::fields::VectorField& Bfield, amrex::Box domain_box, amrex::Real const dt, amrex::Array field_boundary_lo, amrex::Array field_boundary_hi) { + using ablastr::fields::Direction; + // Ensure that we are using the Yee solver WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_fdtd_algo == ElectromagneticSolverAlgo::Yee, @@ -79,14 +81,14 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( // tiling is usually set by TilingIfNotGPU() // but here, we set it to false because of potential race condition, // since we grow the tiles by one guard cell after creating them. - for ( MFIter mfi(*Efield[0], false); mfi.isValid(); ++mfi ) { + for ( MFIter mfi(*Efield[Direction{0}], false); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& Er = Efield[0]->array(mfi); - Array4 const& Et = Efield[1]->array(mfi); - Array4 const& Ez = Efield[2]->array(mfi); - Array4 const& Br = Bfield[0]->array(mfi); - Array4 const& Bt = Bfield[1]->array(mfi); - Array4 const& Bz = Bfield[2]->array(mfi); + Array4 const& Er = Efield[Direction{0}]->array(mfi); + Array4 const& Et = Efield[Direction{1}]->array(mfi); + Array4 const& Ez = Efield[Direction{2}]->array(mfi); + Array4 const& Br = Bfield[Direction{0}]->array(mfi); + Array4 const& Bt = Bfield[Direction{1}]->array(mfi); + Array4 const& Bz = Bfield[Direction{2}]->array(mfi); // Extract tileboxes for which to loop Box tbr = mfi.tilebox(Bfield[0]->ixType().toIntVect()); @@ -203,18 +205,18 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( // tiling is usually set by TilingIfNotGPU() // but here, we set it to false because of potential race condition, // since we grow the tiles by one guard cell after creating them. - for ( MFIter mfi(*Efield[0], false); mfi.isValid(); ++mfi ) { + for ( MFIter mfi(*Efield[Direction{0}], false); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& Ex = Efield[0]->array(mfi); - Array4 const& Ey = Efield[1]->array(mfi); + Array4 const& Ex = Efield[Direction{0}]->array(mfi); + Array4 const& Ey = Efield[Direction{1}]->array(mfi); #ifndef WARPX_DIM_1D_Z - Array4 const& Ez = Efield[2]->array(mfi); + Array4 const& Ez = Efield[Direction{2}]->array(mfi); #endif - Array4 const& Bx = Bfield[0]->array(mfi); - Array4 const& By = Bfield[1]->array(mfi); + Array4 const& Bx = Bfield[Direction{0}]->array(mfi); + Array4 const& By = Bfield[Direction{1}]->array(mfi); #ifndef WARPX_DIM_1D_Z - Array4 const& Bz = Bfield[2]->array(mfi); + Array4 const& Bz = Bfield[Direction{2}]->array(mfi); #endif // Extract the tileboxes for which to loop diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp index 0702b264874..3f757603845 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp @@ -16,6 +16,8 @@ # include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include + #include #include #include @@ -40,9 +42,10 @@ using namespace amrex; * \brief Update the F field, over one timestep */ void FiniteDifferenceSolver::ComputeDivE ( - const std::array,3>& Efield, - amrex::MultiFab& divEfield ) { - + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divEfield +) +{ // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ @@ -77,7 +80,7 @@ void FiniteDifferenceSolver::ComputeDivE ( template void FiniteDifferenceSolver::ComputeDivECartesian ( - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divEfield ) { // Loop through the grids, and over the tiles within each grid @@ -123,9 +126,10 @@ void FiniteDifferenceSolver::ComputeDivECartesian ( template void FiniteDifferenceSolver::ComputeDivECylindrical ( - const std::array,3>& Efield, - amrex::MultiFab& divEfield ) { - + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divEfield +) +{ // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 4a71afda671..63b51cb8416 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -7,6 +7,7 @@ #include "FiniteDifferenceSolver.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" # include "FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" @@ -48,17 +49,21 @@ using namespace amrex; * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveB ( - [[maybe_unused]] std::array< std::unique_ptr, 3 >& Bfield, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& Efield, - [[maybe_unused]] std::unique_ptr const& Gfield, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& face_areas, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& area_mod, - [[maybe_unused]] std::array< std::unique_ptr, 3 >& ECTRhofield, - [[maybe_unused]] std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::MultiFabRegister& fields, + int lev, + PatchType patch_type, [[maybe_unused]] std::array< std::unique_ptr, 3 >& flag_info_cell, [[maybe_unused]] std::array< std::unique_ptr >, 3 >& borrowing, - [[maybe_unused]] int lev, - [[maybe_unused]] amrex::Real const dt ) { + [[maybe_unused]] amrex::Real const dt ) +{ + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Bfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Bfield_fp, lev) : fields.get_alldirs(FieldType::Bfield_cp, lev); + const ablastr::fields::VectorField Efield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Efield_fp, lev) : fields.get_alldirs(FieldType::Efield_cp, lev); // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) @@ -68,6 +73,28 @@ void FiniteDifferenceSolver::EvolveB ( EvolveBCylindrical ( Bfield, Efield, lev, dt ); #else + amrex::MultiFab const * Gfield = nullptr; + if (fields.has(FieldType::G_fp, lev)) { + Gfield = patch_type == PatchType::fine ? + fields.get(FieldType::G_fp, lev) : fields.get(FieldType::G_cp, lev); + } + ablastr::fields::VectorField face_areas; + if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + face_areas = fields.get_alldirs(FieldType::face_areas, lev); + } + ablastr::fields::VectorField area_mod; + if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + area_mod = fields.get_alldirs(FieldType::area_mod, lev); + } + ablastr::fields::VectorField ECTRhofield; + if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); + } + ablastr::fields::VectorField Venl; + if (fields.has(FieldType::Venl, Direction{0}, lev)) { + Venl = fields.get_alldirs(FieldType::Venl, lev); + } + if (m_grid_type == GridType::Collocated) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); @@ -94,9 +121,9 @@ void FiniteDifferenceSolver::EvolveB ( template void FiniteDifferenceSolver::EvolveBCartesian ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab const * Gfield, int lev, amrex::Real const dt ) { amrex::LayoutData* cost = WarpX::getCosts(lev); @@ -162,7 +189,7 @@ void FiniteDifferenceSolver::EvolveBCartesian ( if (Gfield) { // Extract field data for this grid/tile - const Array4 G = Gfield->array(mfi); + Array4 const G = Gfield->array(mfi); // Loop over cells and update G amrex::ParallelFor(tbx, tby, tbz, @@ -193,11 +220,11 @@ void FiniteDifferenceSolver::EvolveBCartesian ( void FiniteDifferenceSolver::EvolveBCartesianECT ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& area_mod, + ablastr::fields::VectorField const& ECTRhofield, + ablastr::fields::VectorField const& Venl, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, const int lev, amrex::Real const dt ) { @@ -359,8 +386,8 @@ void FiniteDifferenceSolver::EvolveBCartesianECT ( template void FiniteDifferenceSolver::EvolveBCylindrical ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, int lev, amrex::Real const dt ) { amrex::LayoutData* cost = WarpX::getCosts(lev); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp index 0ad2c8d6802..e3289d52cfe 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp @@ -7,6 +7,7 @@ #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "BoundaryConditions/PMLComponent.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" @@ -41,18 +42,27 @@ using namespace amrex; * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveBPML ( - std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, amrex::Real const dt, - const bool dive_cleaning) { + const bool dive_cleaning +) +{ + using warpx::fields::FieldType; // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - amrex::ignore_unused(Bfield, Efield, dt, dive_cleaning); + amrex::ignore_unused(fields, patch_type, level, dt, dive_cleaning); WARPX_ABORT_WITH_MESSAGE( "PML are not implemented in cylindrical geometry."); #else + const ablastr::fields::VectorField Bfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_B_fp, level) : fields.get_alldirs(FieldType::pml_B_cp, level); + const ablastr::fields::VectorField Efield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_E_fp, level) : fields.get_alldirs(FieldType::pml_E_cp, level); + if (m_grid_type == ablastr::utils::enums::GridType::Collocated) { EvolveBPMLCartesian (Bfield, Efield, dt, dive_cleaning); @@ -78,7 +88,7 @@ void FiniteDifferenceSolver::EvolveBPML ( template void FiniteDifferenceSolver::EvolveBPMLCartesian ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt, const bool dive_cleaning) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index 566a81da021..db8e80cc972 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -6,6 +6,7 @@ */ #include "FiniteDifferenceSolver.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" @@ -19,6 +20,8 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -42,22 +45,48 @@ #include using namespace amrex; +using namespace ablastr::fields; /** * \brief Update the E field, over one timestep */ void FiniteDifferenceSolver::EvolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::unique_ptr const& Ffield, - int lev, amrex::Real const dt ) { + ablastr::fields::MultiFabRegister & fields, + int lev, + PatchType patch_type, + ablastr::fields::VectorField const& Efield, + amrex::Real const dt +) +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Bfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Bfield_fp, lev) : fields.get_alldirs(FieldType::Bfield_cp, lev); + const ablastr::fields::VectorField Jfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::current_fp, lev) : fields.get_alldirs(FieldType::current_cp, lev); + + amrex::MultiFab* Ffield = nullptr; + if (fields.has(FieldType::F_fp, lev)) { + Ffield = patch_type == PatchType::fine ? + fields.get(FieldType::F_fp, lev) : fields.get(FieldType::F_cp, lev); + } - if (m_fdtd_algo != ElectromagneticSolverAlgo::ECT) { - amrex::ignore_unused(face_areas, ECTRhofield); + ablastr::fields::VectorField edge_lengths; + if (fields.has(FieldType::edge_lengths, Direction{0}, lev)) { + edge_lengths = fields.get_alldirs(FieldType::edge_lengths, lev); + } + ablastr::fields::VectorField face_areas; + if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + face_areas = fields.get_alldirs(FieldType::face_areas, lev); + } + ablastr::fields::VectorField area_mod; + if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + area_mod = fields.get_alldirs(FieldType::area_mod, lev); + } + ablastr::fields::VectorField ECTRhofield; + if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } // Select algorithm (The choice of algorithm is a runtime option, @@ -90,11 +119,11 @@ void FiniteDifferenceSolver::EvolveE ( template void FiniteDifferenceSolver::EvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { #ifndef AMREX_USE_EB @@ -191,7 +220,7 @@ void FiniteDifferenceSolver::EvolveECartesian ( if (Ffield) { // Extract field data for this grid/tile - const Array4 F = Ffield->array(mfi); + const Array4 F = Ffield->array(mfi); // Loop over the cells and update the fields amrex::ParallelFor(tex, tey, tez, @@ -224,11 +253,11 @@ void FiniteDifferenceSolver::EvolveECartesian ( template void FiniteDifferenceSolver::EvolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { #ifndef AMREX_USE_EB @@ -391,7 +420,7 @@ void FiniteDifferenceSolver::EvolveECylindrical ( if (Ffield) { // Extract field data for this grid/tile - const Array4 F = Ffield->array(mfi); + const Array4 F = Ffield->array(mfi); // Loop over the cells and update the fields amrex::ParallelFor(ter, tet, tez, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp index 8abdab71300..0740a190bec 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp @@ -42,15 +42,16 @@ #include using namespace amrex; +using namespace ablastr::fields; /** * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveECTRho ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, const int lev) { #if !defined(WARPX_DIM_RZ) and defined(AMREX_USE_EB) @@ -67,10 +68,10 @@ void FiniteDifferenceSolver::EvolveECTRho ( // If we implement ECT in 1D we will need to take care of this #ifndef differently #ifndef WARPX_DIM_RZ void FiniteDifferenceSolver::EvolveRhoCartesianECT ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, const int lev ) { + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, const int lev ) { #ifdef AMREX_USE_EB #if !(defined(WARPX_DIM_3D) || defined(WARPX_DIM_XZ)) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index a1ba6e44a8c..9ecae05516d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -17,6 +17,7 @@ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -45,21 +46,38 @@ using namespace amrex; * \brief Update the E field, over one timestep */ void FiniteDifferenceSolver::EvolveEPML ( - std::array< amrex::MultiFab*, 3 > Efield, - std::array< amrex::MultiFab*, 3 > const Bfield, - std::array< amrex::MultiFab*, 3 > const Jfield, - std::array< amrex::MultiFab*, 3 > const edge_lengths, - amrex::MultiFab* const Ffield, + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, MultiSigmaBox const& sigba, amrex::Real const dt, bool pml_has_particles ) { // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - amrex::ignore_unused(Efield, Bfield, Jfield, Ffield, sigba, dt, pml_has_particles, edge_lengths); + amrex::ignore_unused(fields, patch_type, level, sigba, dt, pml_has_particles); WARPX_ABORT_WITH_MESSAGE( "PML are not implemented in cylindrical geometry."); #else + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Efield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_E_fp, level) : fields.get_alldirs(FieldType::pml_E_cp, level); + const ablastr::fields::VectorField Bfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_B_fp, level) : fields.get_alldirs(FieldType::pml_B_cp, level); + const ablastr::fields::VectorField Jfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_j_fp, level) : fields.get_alldirs(FieldType::pml_j_cp, level); + ablastr::fields::VectorField edge_lengths; + if (fields.has(FieldType::pml_edge_lengths, Direction{0}, level)) { + edge_lengths = fields.get_alldirs(FieldType::pml_edge_lengths, level); + } + amrex::MultiFab * Ffield = nullptr; + if (fields.has(FieldType::pml_F_fp, level)) { + Ffield = (patch_type == PatchType::fine) ? + fields.get(FieldType::pml_F_fp, level) : fields.get(FieldType::pml_F_cp, level); + } + if (m_grid_type == GridType::Collocated) { EvolveEPMLCartesian ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp index 8ce578bb52a..c7f836e47ec 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp @@ -44,9 +44,9 @@ using namespace amrex; * \brief Update the F field, over one timestep */ void FiniteDifferenceSolver::EvolveF ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { @@ -82,9 +82,9 @@ void FiniteDifferenceSolver::EvolveF ( template void FiniteDifferenceSolver::EvolveFCartesian ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { @@ -135,9 +135,9 @@ void FiniteDifferenceSolver::EvolveFCartesian ( template void FiniteDifferenceSolver::EvolveFCylindrical ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp index 4ef056c937a..f14a42f451b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp @@ -41,7 +41,7 @@ using namespace amrex; */ void FiniteDifferenceSolver::EvolveFPML ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -75,7 +75,7 @@ void FiniteDifferenceSolver::EvolveFPML ( template void FiniteDifferenceSolver::EvolveFPMLCartesian ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt ) { // Loop through the grids, and over the tiles within each grid diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp index b6bc8fdca7f..759644201bc 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp @@ -38,8 +38,8 @@ using namespace amrex; void FiniteDifferenceSolver::EvolveG ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real const dt) { #ifdef WARPX_DIM_RZ @@ -70,8 +70,8 @@ void FiniteDifferenceSolver::EvolveG ( template void FiniteDifferenceSolver::EvolveGCartesian ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real const dt) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 0a9f21e6863..03f51f7ba62 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -18,6 +18,7 @@ #include "MacroscopicProperties/MacroscopicProperties_fwd.H" #include +#include #include #include @@ -51,52 +52,47 @@ class FiniteDifferenceSolver std::array cell_size, ablastr::utils::enums::GridType grid_type ); - void EvolveB ( std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + void EvolveB ( ablastr::fields::MultiFabRegister& fields, + int lev, + PatchType patch_type, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, - int lev, amrex::Real dt ); - - void EvolveE ( std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::unique_ptr const& Ffield, - int lev, amrex::Real dt ); - - void EvolveF ( std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::Real dt ); + + void EvolveE ( ablastr::fields::MultiFabRegister & fields, + int lev, + PatchType patch_type, + ablastr::fields::VectorField const& Efield, + amrex::Real dt ); + + void EvolveF ( amrex::MultiFab* Ffield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); - void EvolveG (std::unique_ptr& Gfield, - std::array,3> const& Bfield, + void EvolveG (amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real dt); - void EvolveECTRho ( std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, + void EvolveECTRho ( ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, int lev ); - void ApplySilverMuellerBoundary( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Bfield, + void ApplySilverMuellerBoundary ( + ablastr::fields::VectorField & Efield, + ablastr::fields::VectorField & Bfield, amrex::Box domain_box, amrex::Real dt, amrex::Array field_boundary_lo, amrex::Array field_boundary_hi); - void ComputeDivE ( const std::array,3>& Efield, - amrex::MultiFab& divE ); + void ComputeDivE ( + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ); /** * \brief Macroscopic E-update for non-vacuum medium using the user-selected @@ -110,29 +106,34 @@ class FiniteDifferenceSolver * \param[in] dt timestep of the simulation * \param[in] macroscopic_properties contains user-defined properties of the medium. */ - void MacroscopicEvolveE ( std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + void MacroscopicEvolveE ( + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real dt, std::unique_ptr const& macroscopic_properties); - void EvolveBPML ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt, - bool dive_cleaning); + void EvolveBPML ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, + amrex::Real dt, + bool dive_cleaning + ); - void EvolveEPML ( std::array< amrex::MultiFab*, 3 > Efield, - std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Jfield, - std::array< amrex::MultiFab*, 3 > edge_lengths, - amrex::MultiFab* Ffield, - MultiSigmaBox const& sigba, - amrex::Real dt, bool pml_has_particles ); + void EvolveEPML ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, + MultiSigmaBox const& sigba, + amrex::Real dt, + bool pml_has_particles + ); void EvolveFPML ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt ); + ablastr::fields::VectorField Efield, + amrex::Real dt ); /** * \brief E-update in the hybrid PIC algorithm as described in @@ -151,14 +152,14 @@ class FiniteDifferenceSolver * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ - void HybridPICSolveE ( std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -172,9 +173,9 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); private: @@ -206,98 +207,99 @@ class FiniteDifferenceSolver #ifdef WARPX_DIM_RZ template< typename T_Algo > void EvolveBCylindrical ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveFCylindrical ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); template< typename T_Algo > void ComputeDivECylindrical ( - const std::array,3>& Efield, - amrex::MultiFab& divE ); + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ); template void HybridPICSolveECylindrical ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); template void CalculateCurrentAmpereCylindrical ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); #else template< typename T_Algo > void EvolveBCartesian ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab const * Gfield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveFCartesian ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); template< typename T_Algo > void EvolveGCartesian ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real dt); void EvolveRhoCartesianECT ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, int lev); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, int lev); void EvolveBCartesianECT ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& area_mod, + ablastr::fields::VectorField const& ECTRhofield, + ablastr::fields::VectorField const& Venl, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, int lev, amrex::Real dt @@ -305,28 +307,28 @@ class FiniteDifferenceSolver template< typename T_Algo > void ComputeDivECartesian ( - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ); template< typename T_Algo, typename T_MacroAlgo > void MacroscopicEvolveECartesian ( - std::array< std::unique_ptr< amrex::MultiFab>, 3>& Efield, - std::array< std::unique_ptr< amrex::MultiFab>, 3> const& Bfield, - std::array< std::unique_ptr< amrex::MultiFab>, 3> const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real dt, std::unique_ptr const& macroscopic_properties); template< typename T_Algo > void EvolveBPMLCartesian ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Efield, + ablastr::fields::VectorField Efield, amrex::Real dt, bool dive_cleaning); template< typename T_Algo > void EvolveEPMLCartesian ( - std::array< amrex::MultiFab*, 3 > Efield, + ablastr::fields::VectorField Efield, std::array< amrex::MultiFab*, 3 > Bfield, std::array< amrex::MultiFab*, 3 > Jfield, std::array< amrex::MultiFab*, 3 > edge_lengths, @@ -336,27 +338,27 @@ class FiniteDifferenceSolver template< typename T_Algo > void EvolveFPMLCartesian ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt ); + ablastr::fields::VectorField Efield, + amrex::Real dt ); template void HybridPICSolveECartesian ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); template void CalculateCurrentAmpereCartesian ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); #endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 3a49d5fad4b..b0f63dd8018 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -19,6 +19,8 @@ #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" +#include + #include #include @@ -31,21 +33,18 @@ class HybridPICModel { public: - HybridPICModel (int nlevs_max); // constructor + HybridPICModel (); /** Read user-defined model parameters. Called in constructor. */ void ReadParameters (); /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ - void AllocateMFs (int nlevs_max); - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, + void AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, + int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, const amrex::IntVect& jz_nodal_flag, const amrex::IntVect& rho_nodal_flag); - /** Helper function to clear values from hybrid-PIC specific multifabs. */ - void ClearLevel (int lev); - void InitData (); /** @@ -55,10 +54,10 @@ public: * of time and therefore this should be re-evaluated at every step. */ void GetCurrentExternal ( - amrex::Vector, 3>> const& edge_lengths + ablastr::fields::MultiLevelVectorField const& edge_lengths ); void GetCurrentExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -72,12 +71,12 @@ public: * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account */ void CalculateCurrentAmpere ( - amrex::Vector, 3>> const& Bfield, - amrex::Vector, 3>> const& edge_lengths + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& edge_lengths ); void CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -86,53 +85,53 @@ public: * Function to update the E-field using Ohm's law (hybrid-PIC model). */ void HybridPICSolveE ( - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector, 3>> const& Bfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, - bool solve_for_Faraday); + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, + bool solve_for_Faraday) const; void HybridPICSolveE ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, bool solve_for_Faraday); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + int lev, bool solve_for_Faraday) const; void HybridPICSolveE ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, PatchType patch_type, bool solve_for_Faraday); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + int lev, PatchType patch_type, bool solve_for_Faraday) const; void BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); void BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); void FieldPush ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -141,8 +140,8 @@ public: * Function to calculate the electron pressure using the simulation charge * density. Used in the Ohm's law solver (kinetic-fluid hybrid model). */ - void CalculateElectronPressure (); - void CalculateElectronPressure (int lev); + void CalculateElectronPressure () const; + void CalculateElectronPressure (int lev) const; /** * \brief Fill the electron pressure multifab given the kinetic particle @@ -153,8 +152,8 @@ public: * \param[in] rho_field scalar ion charge density Multifab at a given level */ void FillElectronPressureMF ( - std::unique_ptr const& Pe_field, - amrex::MultiFab* const& rho_field ) const; + amrex::MultiFab& Pe_field, + amrex::MultiFab const& rho_field ) const; // Declare variables to hold hybrid-PIC model parameters /** Number of substeps to take when evolving B */ @@ -187,32 +186,6 @@ public: std::array< amrex::ParserExecutor<4>, 3> m_J_external; bool m_external_field_has_time_dependence = false; - // Declare multifabs specifically needed for the hybrid-PIC model - amrex::Vector< std::unique_ptr > rho_fp_temp; - amrex::Vector, 3 > > current_fp_temp; - amrex::Vector, 3 > > current_fp_ampere; - amrex::Vector, 3 > > current_fp_external; - amrex::Vector< std::unique_ptr > electron_pressure_fp; - - // Helper functions to retrieve hybrid-PIC multifabs - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_ampere (int lev, int direction) const - { - return current_fp_ampere[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_external (int lev, int direction) const - { - return current_fp_external[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_electron_pressure_fp (int lev) const - { - return electron_pressure_fp[lev].get(); - } - /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; /** Gpu Vector with index type of the Jy multifab */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 70efc04e259..dbf56a0e899 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -10,16 +10,15 @@ #include "HybridPICModel.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "WarpX.H" using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; -HybridPICModel::HybridPICModel ( int nlevs_max ) +HybridPICModel::HybridPICModel () { ReadParameters(); - AllocateMFs(nlevs_max); } void HybridPICModel::ReadParameters () @@ -56,59 +55,62 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); } -void HybridPICModel::AllocateMFs (int nlevs_max) -{ - electron_pressure_fp.resize(nlevs_max); - rho_fp_temp.resize(nlevs_max); - current_fp_temp.resize(nlevs_max); - current_fp_ampere.resize(nlevs_max); - current_fp_external.resize(nlevs_max); -} - -void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm, +void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, const int ncomps, const IntVect& ngJ, const IntVect& ngRho, const IntVect& jx_nodal_flag, const IntVect& jy_nodal_flag, const IntVect& jz_nodal_flag, const IntVect& rho_nodal_flag) { - // The "electron_pressure_fp" multifab stores the electron pressure calculated + using ablastr::fields::Direction; + + // The "hybrid_electron_pressure_fp" multifab stores the electron pressure calculated // from the specified equation of state. - // The "rho_fp_temp" multifab is used to store the ion charge density + // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density // interpolated or extrapolated to appropriate timesteps. - // The "current_fp_temp" multifab is used to store the ion current density + // The "hybrid_current_fp_temp" multifab is used to store the ion current density // interpolated or extrapolated to appropriate timesteps. - // The "current_fp_ampere" multifab stores the total current calculated as + // The "hybrid_current_fp_ampere" multifab stores the total current calculated as // the curl of B. - WarpX::AllocInitMultiFab(electron_pressure_fp[lev], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "electron_pressure_fp", 0.0_rt); - - WarpX::AllocInitMultiFab(rho_fp_temp[lev], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "rho_fp_temp", 0.0_rt); - - WarpX::AllocInitMultiFab(current_fp_temp[lev][0], amrex::convert(ba, jx_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_temp[lev][1], amrex::convert(ba, jy_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_temp[lev][2], amrex::convert(ba, jz_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[z]", 0.0_rt); - - WarpX::AllocInitMultiFab(current_fp_ampere[lev][0], amrex::convert(ba, jx_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_ampere[lev][1], amrex::convert(ba, jy_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_ampere[lev][2], amrex::convert(ba, jz_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[z]", 0.0_rt); + fields.alloc_init(FieldType::hybrid_electron_pressure_fp, + lev, amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, 0.0_rt); + fields.alloc_init(FieldType::hybrid_rho_fp_temp, + lev, amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{0}, + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{1}, + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{2}, + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + + fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{0}, + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{1}, + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{2}, + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); // the external current density multifab is made nodal to avoid needing to interpolate // to a nodal grid as has to be done for the ion and total current density multifabs // this also allows the external current multifab to not have any ghost cells - WarpX::AllocInitMultiFab(current_fp_external[lev][0], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_external[lev][1], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_external[lev][2], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[z]", 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{0}, + lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), + dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{1}, + lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), + dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{2}, + lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), + dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -117,17 +119,6 @@ void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const Distri #endif } -void HybridPICModel::ClearLevel (int lev) -{ - electron_pressure_fp[lev].reset(); - rho_fp_temp[lev].reset(); - for (int i = 0; i < 3; ++i) { - current_fp_temp[lev][i].reset(); - current_fp_ampere[lev][i].reset(); - current_fp_external[lev][i].reset(); - } -} - void HybridPICModel::InitData () { m_resistivity_parser = std::make_unique( @@ -153,17 +144,18 @@ void HybridPICModel::InitData () } auto & warpx = WarpX::GetInstance(); + using ablastr::fields::Direction; // Get the grid staggering of the fields involved in calculating E - amrex::IntVect Jx_stag = warpx.getField(FieldType::current_fp, 0,0).ixType().toIntVect(); - amrex::IntVect Jy_stag = warpx.getField(FieldType::current_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Jz_stag = warpx.getField(FieldType::current_fp, 0,2).ixType().toIntVect(); - amrex::IntVect Bx_stag = warpx.getField(FieldType::Bfield_fp, 0,0).ixType().toIntVect(); - amrex::IntVect By_stag = warpx.getField(FieldType::Bfield_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Bz_stag = warpx.getField(FieldType::Bfield_fp, 0,2).ixType().toIntVect(); - amrex::IntVect Ex_stag = warpx.getField(FieldType::Efield_fp, 0,0).ixType().toIntVect(); - amrex::IntVect Ey_stag = warpx.getField(FieldType::Efield_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Ez_stag = warpx.getField(FieldType::Efield_fp, 0,2).ixType().toIntVect(); + amrex::IntVect Jx_stag = warpx.m_fields.get(FieldType::current_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect Jy_stag = warpx.m_fields.get(FieldType::current_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Jz_stag = warpx.m_fields.get(FieldType::current_fp, Direction{2}, 0)->ixType().toIntVect(); + amrex::IntVect Bx_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect By_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Bz_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, 0)->ixType().toIntVect(); + amrex::IntVect Ex_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect Ey_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Ez_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, 0)->ixType().toIntVect(); // Check that the grid types are appropriate const bool appropriate_grids = ( @@ -231,9 +223,10 @@ void HybridPICModel::InitData () auto edge_lengths = std::array, 3>(); #ifdef AMREX_USE_EB if (EB::enabled()) { - auto const & edge_lengths_x = warpx.getField(FieldType::edge_lengths, lev, 0); - auto const & edge_lengths_y = warpx.getField(FieldType::edge_lengths, lev, 1); - auto const & edge_lengths_z = warpx.getField(FieldType::edge_lengths, lev, 2); + using ablastr::fields::Direction; + auto const & edge_lengths_x = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); + auto const & edge_lengths_y = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); + auto const & edge_lengths_z = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); edge_lengths = std::array< std::unique_ptr, 3 >{ std::make_unique( @@ -245,12 +238,12 @@ void HybridPICModel::InitData () }; } #endif - GetCurrentExternal(edge_lengths, lev); + GetCurrentExternal(ablastr::fields::a2m(edge_lengths), lev); } } void HybridPICModel::GetCurrentExternal ( - amrex::Vector, 3>> const& edge_lengths) + ablastr::fields::MultiLevelVectorField const& edge_lengths) { if (!m_external_field_has_time_dependence) { return; } @@ -263,7 +256,7 @@ void HybridPICModel::GetCurrentExternal ( void HybridPICModel::GetCurrentExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& edge_lengths, int lev) { // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser @@ -275,9 +268,10 @@ void HybridPICModel::GetCurrentExternal ( auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - auto& mfx = current_fp_external[lev][0]; - auto& mfy = current_fp_external[lev][1]; - auto& mfz = current_fp_external[lev][2]; + using ablastr::fields::Direction; + amrex::MultiFab * mfx = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{0}, lev); + amrex::MultiFab * mfy = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{1}, lev); + amrex::MultiFab * mfz = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -391,8 +385,8 @@ void HybridPICModel::GetCurrentExternal ( } void HybridPICModel::CalculateCurrentAmpere ( - amrex::Vector, 3>> const& Bfield, - amrex::Vector, 3>> const& edge_lengths) + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& edge_lengths) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -402,49 +396,50 @@ void HybridPICModel::CalculateCurrentAmpere ( } void HybridPICModel::CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, const int lev) { WARPX_PROFILE("WarpX::CalculateCurrentAmpere()"); auto& warpx = WarpX::GetInstance(); + ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_ampere[lev], Bfield, edge_lengths, lev + current_fp_ampere, Bfield, edge_lengths, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but // the boundary correction was already applied to J_i and the B-field // boundary ensures that J itself complies with the boundary conditions, right? // ApplyJfieldBoundary(lev, Jfield[0].get(), Jfield[1].get(), Jfield[2].get()); - for (int i=0; i<3; i++) { current_fp_ampere[lev][i]->FillBoundary(warpx.Geom(lev).periodicity()); } + for (int i=0; i<3; i++) { current_fp_ampere[i]->FillBoundary(warpx.Geom(lev).periodicity()); } } void HybridPICModel::HybridPICSolveE ( - amrex::Vector, 3>> & Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector, 3>> const& Bfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, - const bool solve_for_Faraday) + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, + const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { HybridPICSolveE( - Efield[lev], Jfield[lev], Bfield[lev], rhofield[lev], + Efield[lev], Jfield[lev], Bfield[lev], *rhofield[lev], edge_lengths[lev], lev, solve_for_Faraday ); } } void HybridPICModel::HybridPICSolveE ( - std::array< std::unique_ptr, 3> & Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - const int lev, const bool solve_for_Faraday) + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + const int lev, const bool solve_for_Faraday) const { WARPX_PROFILE("WarpX::HybridPICSolveE()"); @@ -460,27 +455,32 @@ void HybridPICModel::HybridPICSolveE ( } void HybridPICModel::HybridPICSolveE ( - std::array< std::unique_ptr, 3> & Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, const int lev, PatchType patch_type, - const bool solve_for_Faraday) + const bool solve_for_Faraday) const { + auto& warpx = WarpX::GetInstance(); + ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); + const ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + const ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); + // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( - Efield, current_fp_ampere[lev], Jfield, current_fp_external[lev], + Efield, current_fp_ampere, Jfield, current_fp_external, Bfield, rhofield, - electron_pressure_fp[lev], + *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } -void HybridPICModel::CalculateElectronPressure() +void HybridPICModel::CalculateElectronPressure() const { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -489,22 +489,27 @@ void HybridPICModel::CalculateElectronPressure() } } -void HybridPICModel::CalculateElectronPressure(const int lev) +void HybridPICModel::CalculateElectronPressure(const int lev) const { WARPX_PROFILE("WarpX::CalculateElectronPressure()"); auto& warpx = WarpX::GetInstance(); + ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); + ablastr::fields::ScalarField rho_fp = warpx.m_fields.get(FieldType::rho_fp, lev); + // Calculate the electron pressure using rho^{n+1}. FillElectronPressureMF( - electron_pressure_fp[lev], warpx.getFieldPointer(FieldType::rho_fp, lev) + *electron_pressure_fp, + *rho_fp ); warpx.ApplyElectronPressureBoundary(lev, PatchType::fine); - electron_pressure_fp[lev]->FillBoundary(warpx.Geom(lev).periodicity()); + electron_pressure_fp->FillBoundary(warpx.Geom(lev).periodicity()); } void HybridPICModel::FillElectronPressureMF ( - std::unique_ptr const& Pe_field, - amrex::MultiFab* const& rho_field ) const + amrex::MultiFab& Pe_field, + amrex::MultiFab const& rho_field +) const { const auto n0_ref = m_n0_ref; const auto elec_temp = m_elec_temp; @@ -514,11 +519,11 @@ void HybridPICModel::FillElectronPressureMF ( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*Pe_field, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + for ( MFIter mfi(Pe_field, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& rho = rho_field->const_array(mfi); - Array4 const& Pe = Pe_field->array(mfi); + Array4 const& rho = rho_field.const_array(mfi); + Array4 const& Pe = Pe_field.array(mfi); // Extract tileboxes for which to loop const Box& tilebox = mfi.tilebox(); @@ -532,11 +537,11 @@ void HybridPICModel::FillElectronPressureMF ( } void HybridPICModel::BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -551,11 +556,11 @@ void HybridPICModel::BfieldEvolveRK ( } void HybridPICModel::BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -664,11 +669,11 @@ void HybridPICModel::BfieldEvolveRK ( } void HybridPICModel::FieldPush ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index baeaf7a6c18..34a84756203 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -24,9 +24,9 @@ using namespace amrex; void FiniteDifferenceSolver::CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -59,9 +59,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpere ( #ifdef WARPX_DIM_RZ template void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -242,9 +242,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( template void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -351,14 +351,14 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( void FiniteDifferenceSolver::HybridPICSolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -389,14 +389,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( #ifdef WARPX_DIM_RZ template void FiniteDifferenceSolver::HybridPICSolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -449,8 +449,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Also note that enE_nodal_mf does not need to have any guard cells since // these values will be interpolated to the Yee mesh which is contained // by the nodal mesh. - auto const& ba = convert(rhofield->boxArray(), IntVect::TheNodeVector()); - MultiFab enE_nodal_mf(ba, rhofield->DistributionMap(), 3, IntVect::TheZeroVector()); + auto const& ba = convert(rhofield.boxArray(), IntVect::TheNodeVector()); + MultiFab enE_nodal_mf(ba, rhofield.DistributionMap(), 3, IntVect::TheZeroVector()); // Loop through the grids, and over the tiles within each grid for the // initial, nodal calculation of E @@ -539,8 +539,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Jt = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); Array4 const& enE = enE_nodal_mf.const_array(mfi); - Array4 const& rho = rhofield->const_array(mfi); - Array4 const& Pe = Pefield->array(mfi); + Array4 const& rho = rhofield.const_array(mfi); + Array4 const& Pe = Pefield.const_array(mfi); amrex::Array4 lr, lz; if (EB::enabled()) { @@ -704,14 +704,14 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( template void FiniteDifferenceSolver::HybridPICSolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Jextfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -758,8 +758,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Also note that enE_nodal_mf does not need to have any guard cells since // these values will be interpolated to the Yee mesh which is contained // by the nodal mesh. - auto const& ba = convert(rhofield->boxArray(), IntVect::TheNodeVector()); - MultiFab enE_nodal_mf(ba, rhofield->DistributionMap(), 3, IntVect::TheZeroVector()); + auto const& ba = convert(rhofield.boxArray(), IntVect::TheNodeVector()); + MultiFab enE_nodal_mf(ba, rhofield.DistributionMap(), 3, IntVect::TheZeroVector()); // Loop through the grids, and over the tiles within each grid for the // initial, nodal calculation of E @@ -848,8 +848,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Jy = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); Array4 const& enE = enE_nodal_mf.const_array(mfi); - Array4 const& rho = rhofield->const_array(mfi); - Array4 const& Pe = Pefield->array(mfi); + Array4 const& rho = rhofield.const_array(mfi); + Array4 const& Pe = Pefield.array(mfi); amrex::Array4 lx, ly, lz; if (EB::enabled()) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 46e4d3efa06..708728c4e5b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -34,12 +34,13 @@ #include using namespace amrex; +using namespace ablastr::fields; void FiniteDifferenceSolver::MacroscopicEvolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + VectorField const& edge_lengths, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { @@ -99,10 +100,10 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( template void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp index a6a389fe056..18c010d9385 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp @@ -1,6 +1,6 @@ #include "MacroscopicProperties.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" @@ -23,7 +23,6 @@ #include using namespace amrex; -using namespace warpx::fields; MacroscopicProperties::MacroscopicProperties () { diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index 33e2ced7b53..2236118a30c 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -7,7 +7,7 @@ #include "SemiImplicitEM.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; using namespace amrex::literals; void SemiImplicitEM::Define ( WarpX* a_WarpX ) @@ -20,7 +20,7 @@ void SemiImplicitEM::Define ( WarpX* a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_E.Define( m_WarpX, "Efield_fp" ); m_Eold.Define( m_E ); // Parse implicit solver parameters diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H index 009c2c7e546..aba66782154 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H @@ -98,14 +98,6 @@ private: */ WarpXSolverVec m_E, m_Eold; - /** - * \brief B is a derived variable from E. Need to save Bold to update B during - * the iterative nonlinear solve for E. Bold is owned here, but only used by WarpX. - * It is not used directly by the nonlinear solver, nor is it the same size as the - * solver vector (size E), and so it should not be WarpXSolverVec type. - */ - amrex::Vector, 3 > > m_Bold; - /** * \brief Update the E and B fields owned by WarpX */ diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 3d74ddfde69..e62ced29f6d 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -4,11 +4,11 @@ * * License: BSD-3-Clause-LBNL */ -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "ThetaImplicitEM.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; using namespace amrex::literals; void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) @@ -21,20 +21,21 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_E.Define( m_WarpX, "Efield_fp" ); m_Eold.Define( m_E ); - // Define Bold MultiFab + // Define Bold MultiFabs + using ablastr::fields::Direction; const int num_levels = 1; - m_Bold.resize(num_levels); // size is number of levels for (int lev = 0; lev < num_levels; ++lev) { - for (int n=0; n<3; n++) { - const amrex::MultiFab& Bfp = m_WarpX->getField( FieldType::Bfield_fp,lev,n); - m_Bold[lev][n] = std::make_unique( Bfp.boxArray(), - Bfp.DistributionMap(), - Bfp.nComp(), - Bfp.nGrowVect() ); - } + const auto& ba_Bx = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->boxArray(); + const auto& ba_By = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->boxArray(); + const auto& ba_Bz = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->boxArray(); + const auto& dm = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->DistributionMap(); + const amrex::IntVect ngb = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->nGrowVect(); + m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{0}, lev, ba_Bx, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{1}, lev, ba_By, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{2}, lev, ba_Bz, dm, 1, ngb, 0.0_rt); } // Parse theta-implicit solver specific parameters @@ -88,12 +89,13 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, // Save Eg at the start of the time step m_Eold.Copy( FieldType::Efield_fp ); - const int num_levels = static_cast(m_Bold.size()); + const int num_levels = 1; for (int lev = 0; lev < num_levels; ++lev) { - for (int n=0; n<3; n++) { - const amrex::MultiFab& Bfp = m_WarpX->getField(FieldType::Bfield_fp,lev,n); - amrex::MultiFab& Bold = *m_Bold[lev][n]; - amrex::MultiFab::Copy(Bold, Bfp, 0, 0, 1, Bold.nGrowVect()); + const ablastr::fields::VectorField Bfp = m_WarpX->m_fields.get_alldirs(FieldType::Bfield_fp, lev); + ablastr::fields::VectorField Bold = m_WarpX->m_fields.get_alldirs(FieldType::Bold, lev); + for (int n = 0; n < 3; ++n) { + amrex::MultiFab::Copy( *Bold[n], *Bfp[n], 0, 0, Bold[n]->nComp(), + Bold[n]->nGrowVect() ); } } @@ -145,7 +147,8 @@ void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, m_WarpX->SetElectricFieldAndApplyBCs( a_E ); // Update Bfield_fp owned by WarpX - m_WarpX->UpdateMagneticFieldAndApplyBCs( m_Bold, m_theta*a_dt ); + ablastr::fields::MultiLevelVectorField const& Bold = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::Bold, 0); + m_WarpX->UpdateMagneticFieldAndApplyBCs( Bold, m_theta*a_dt ); } @@ -160,6 +163,7 @@ void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) const amrex::Real c1 = 1._rt - c0; m_E.linComb( c0, m_E, c1, m_Eold ); m_WarpX->SetElectricFieldAndApplyBCs( m_E ); - m_WarpX->FinishMagneticFieldAndApplyBCs( m_Bold, m_theta ); + ablastr::fields::MultiLevelVectorField const & Bold = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::Bold, 0); + m_WarpX->FinishMagneticFieldAndApplyBCs( Bold, m_theta ); } diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index 8dd97ed5525..806c3412990 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -11,7 +11,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "Parallelization/GuardCellManager.H" #include "Particles/MultiParticleContainer.H" @@ -73,7 +73,10 @@ WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) a_E.getArrayVecType()==warpx::fields::FieldType::Efield_fp, "WarpX::SetElectricFieldAndApplyBCs() must be called with Efield_fp type"); - const amrex::Vector, 3 > >& Evec = a_E.getArrayVec(); + using warpx::fields::FieldType; + + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + const ablastr::fields::MultiLevelVectorField& Evec = a_E.getArrayVec(); amrex::MultiFab::Copy(*Efield_fp[0][0], *Evec[0][0], 0, 0, ncomps, Evec[0][0]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][1], *Evec[0][1], 0, 0, ncomps, Evec[0][1]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][2], *Evec[0][2], 0, 0, ncomps, Evec[0][2]->nGrowVect()); @@ -82,21 +85,29 @@ WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) } void -WarpX::UpdateMagneticFieldAndApplyBCs( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_thetadt ) +WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt ) { - amrex::MultiFab::Copy(*Bfield_fp[0][0], *a_Bn[0][0], 0, 0, ncomps, a_Bn[0][0]->nGrowVect()); - amrex::MultiFab::Copy(*Bfield_fp[0][1], *a_Bn[0][1], 0, 0, ncomps, a_Bn[0][1]->nGrowVect()); - amrex::MultiFab::Copy(*Bfield_fp[0][2], *a_Bn[0][2], 0, 0, ncomps, a_Bn[0][2]->nGrowVect()); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + for (int lev = 0; lev <= finest_level; ++lev) { + ablastr::fields::VectorField Bfp = m_fields.get_alldirs(FieldType::Bfield_fp, lev); + amrex::MultiFab::Copy(*Bfp[0], *a_Bn[lev][0], 0, 0, ncomps, a_Bn[lev][0]->nGrowVect()); + amrex::MultiFab::Copy(*Bfp[1], *a_Bn[lev][1], 0, 0, ncomps, a_Bn[lev][1]->nGrowVect()); + amrex::MultiFab::Copy(*Bfp[2], *a_Bn[lev][2], 0, 0, ncomps, a_Bn[lev][2]->nGrowVect()); + } EvolveB(a_thetadt, DtType::Full); ApplyMagneticFieldBCs(); } void -WarpX::FinishMagneticFieldAndApplyBCs( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_theta ) +WarpX::FinishMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta ) { - FinishImplicitField(Bfield_fp, a_Bn, a_theta); + using warpx::fields::FieldType; + + FinishImplicitField(m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, 0), a_Bn, a_theta); ApplyMagneticFieldBCs(); } @@ -248,9 +259,9 @@ WarpX::FinishImplicitParticleUpdate () } void -WarpX::FinishImplicitField( amrex::Vector, 3 > >& Field_fp, - const amrex::Vector, 3 > >& Field_n, - amrex::Real theta ) +WarpX::FinishImplicitField( ablastr::fields::MultiLevelVectorField const& Field_fp, + ablastr::fields::MultiLevelVectorField const& Field_n, + amrex::Real theta ) { using namespace amrex::literals; @@ -335,15 +346,17 @@ WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, War // a_Erhs_vec storing only the RHS of the update equation. I.e., // c^2*dt*(curl(B^{n+theta} - mu0*J^{n+1/2}) if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_fp[lev], lev, a_dt ); + m_fdtd_solver_fp[lev]->EvolveE( m_fields, + lev, + patch_type, + a_Erhs_vec.getArrayVec()[lev], + a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_cp[lev], - current_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_cp[lev], lev, a_dt ); + m_fdtd_solver_cp[lev]->EvolveE( m_fields, + lev, + patch_type, + a_Erhs_vec.getArrayVec()[lev], + a_dt ); } // Compute Efield_rhs in PML cells by calling EvolveEPML diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H index f884f5fa623..29c808b48cd 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H @@ -8,10 +8,11 @@ #define WarpXSolverVec_H_ #include "Utils/TextMsg.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include #include +#include #include #include @@ -59,7 +60,7 @@ public: WarpXSolverVec(const WarpXSolverVec&) = delete; - ~WarpXSolverVec() = default; + ~WarpXSolverVec(); using value_type = amrex::Real; using RT = value_type; @@ -67,16 +68,16 @@ public: [[nodiscard]] inline bool IsDefined () const { return m_is_defined; } void Define ( WarpX* a_WarpX, - warpx::fields::FieldType a_array_type, - warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None ); + const std::string& a_vector_type_name, + const std::string& a_scalar_type_name = "none" ); inline void Define ( const WarpXSolverVec& a_solver_vec ) { assertIsDefined( a_solver_vec ); Define( WarpXSolverVec::m_WarpX, - a_solver_vec.getArrayVecType(), - a_solver_vec.getScalarVecType() ); + a_solver_vec.getVectorType(), + a_solver_vec.getScalarType() ); } [[nodiscard]] RT dotProduct( const WarpXSolverVec& a_X ) const; @@ -94,13 +95,13 @@ public: for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != warpx::fields::FieldType::None) { for (int n = 0; n < 3; ++n) { - const std::unique_ptr& this_field = a_solver_vec.getArrayVec()[lev][n]; + const amrex::MultiFab* this_field = a_solver_vec.getArrayVec()[lev][n]; amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_field, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } if (m_scalar_type != warpx::fields::FieldType::None) { - const std::unique_ptr& this_scalar = a_solver_vec.getScalarVec()[lev]; + const amrex::MultiFab* this_scalar = a_solver_vec.getScalarVec()[lev]; amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } @@ -270,26 +271,34 @@ public: return std::sqrt(norm); } - [[nodiscard]] const amrex::Vector,3>>& getArrayVec() const {return m_array_vec;} - amrex::Vector,3>>& getArrayVec() {return m_array_vec;} + [[nodiscard]] const ablastr::fields::MultiLevelVectorField& getArrayVec() const {return m_array_vec;} + ablastr::fields::MultiLevelVectorField& getArrayVec() {return m_array_vec;} - [[nodiscard]] const amrex::Vector>& getScalarVec() const {return m_scalar_vec;} - amrex::Vector>& getScalarVec() {return m_scalar_vec;} + [[nodiscard]] const ablastr::fields::MultiLevelScalarField& getScalarVec() const {return m_scalar_vec;} + ablastr::fields::MultiLevelScalarField& getScalarVec() {return m_scalar_vec;} // solver vector types are type warpx::fields::FieldType [[nodiscard]] warpx::fields::FieldType getArrayVecType () const { return m_array_type; } [[nodiscard]] warpx::fields::FieldType getScalarVecType () const { return m_scalar_type; } + // solver vector type names + [[nodiscard]] std::string getVectorType () const { return m_vector_type_name; } + [[nodiscard]] std::string getScalarType () const { return m_scalar_type_name; } + + private: bool m_is_defined = false; - amrex::Vector,3>> m_array_vec; - amrex::Vector> m_scalar_vec; + ablastr::fields::MultiLevelVectorField m_array_vec; + ablastr::fields::MultiLevelScalarField m_scalar_vec; warpx::fields::FieldType m_array_type = warpx::fields::FieldType::None; warpx::fields::FieldType m_scalar_type = warpx::fields::FieldType::None; + std::string m_vector_type_name = "none"; + std::string m_scalar_type_name = "none"; + static constexpr int m_ncomp = 1; static constexpr int m_num_amr_levels = 1; diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index f2a88d82d42..6a0e6bb8a91 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -7,11 +7,22 @@ #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; -void WarpXSolverVec::Define ( WarpX* a_WarpX, - FieldType a_array_type, - FieldType a_scalar_type ) +WarpXSolverVec::~WarpXSolverVec () +{ + for (auto & lvl : m_array_vec) + { + for (int i =0; i<3; ++i) + { + delete lvl[i]; + } + } +} + +void WarpXSolverVec::Define ( WarpX* a_WarpX, + const std::string& a_vector_type_name, + const std::string& a_scalar_type_name ) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsDefined(), @@ -23,8 +34,33 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, m_warpx_ptr_defined = true; } - m_array_type = a_array_type; - m_scalar_type = a_scalar_type; + m_vector_type_name = a_vector_type_name; + m_scalar_type_name = a_scalar_type_name; + + if (m_vector_type_name=="Efield_fp") { + m_array_type = FieldType::Efield_fp; + } + else if (m_vector_type_name=="Bfield_fp") { + m_array_type = FieldType::Bfield_fp; + } + else if (m_vector_type_name=="vector_potential_fp_nodal") { + m_array_type = FieldType::vector_potential_fp; + } + else if (m_vector_type_name!="none") { + WARPX_ABORT_WITH_MESSAGE(a_vector_type_name + +"is not a valid option for array type used in Definining" + +"a WarpXSolverVec. Valid array types are: Efield_fp, Bfield_fp," + +"and vector_potential_fp_nodal"); + } + + if (m_scalar_type_name=="phi_fp") { + m_scalar_type = FieldType::phi_fp; + } + else if (m_scalar_type_name!="none") { + WARPX_ABORT_WITH_MESSAGE(a_scalar_type_name + +"is not a valid option for scalar type used in Definining" + +"a WarpXSolverVec. Valid scalar types are: phi_fp"); + } m_array_vec.resize(m_num_amr_levels); m_scalar_vec.resize(m_num_amr_levels); @@ -37,13 +73,12 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, "WarpXSolverVec::Define() called with array_type not an array field"); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - using arr_mf_type = std::array; - const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(m_vector_type_name, lev); for (int n = 0; n < 3; n++) { - m_array_vec[lev][n] = std::make_unique( this_array[n]->boxArray(), - this_array[n]->DistributionMap(), - this_array[n]->nComp(), - amrex::IntVect::TheZeroVector() ); + m_array_vec[lev][n] = new amrex::MultiFab( this_array[n]->boxArray(), + this_array[n]->DistributionMap(), + this_array[n]->nComp(), + amrex::IntVect::TheZeroVector() ); } } @@ -57,11 +92,11 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, "WarpXSolverVec::Define() called with scalar_type not a scalar field "); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - const amrex::MultiFab* this_mf = m_WarpX->getFieldPointer(m_scalar_type,lev,0); - m_scalar_vec[lev] = std::make_unique( this_mf->boxArray(), - this_mf->DistributionMap(), - this_mf->nComp(), - amrex::IntVect::TheZeroVector() ); + const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(m_scalar_type_name,lev); + m_scalar_vec[lev] = new amrex::MultiFab( this_mf->boxArray(), + this_mf->DistributionMap(), + this_mf->nComp(), + amrex::IntVect::TheZeroVector() ); } } @@ -87,16 +122,15 @@ void WarpXSolverVec::Copy ( FieldType a_array_type, for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != FieldType::None) { - using arr_mf_type = std::array; - const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(m_vector_type_name, lev); for (int n = 0; n < 3; ++n) { amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_array[n], 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } if (m_scalar_type != FieldType::None) { - const amrex::MultiFab* this_scalar = m_WarpX->getFieldPointer(m_scalar_type,lev,0); - amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, + const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(m_scalar_type_name,lev); + amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_mf, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H index a8bbc954e29..c07551c165c 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H @@ -7,6 +7,8 @@ #ifndef WARPX_MAGNETOSTATICSOLVER_H_ #define WARPX_MAGNETOSTATICSOLVER_H_ +#include + #include #include #include @@ -34,23 +36,23 @@ namespace MagnetostaticSolver { */ class EBCalcBfromVectorPotentialPerLevel { private: - const amrex::Vector, 3>>& m_b_field; - const amrex::Vector, 3>>& m_grad_buf_e_stag; - const amrex::Vector, 3>>& m_grad_buf_b_stag; + ablastr::fields::MultiLevelVectorField m_b_field; + ablastr::fields::MultiLevelVectorField m_grad_buf_e_stag; + ablastr::fields::MultiLevelVectorField m_grad_buf_b_stag; public: - EBCalcBfromVectorPotentialPerLevel(const amrex::Vector, 3>>& b_field, - const amrex::Vector, 3>>& grad_buf_e_stag, - const amrex::Vector, 3>>& grad_buf_b_stag) + EBCalcBfromVectorPotentialPerLevel (ablastr::fields::MultiLevelVectorField const & b_field, + ablastr::fields::MultiLevelVectorField const & grad_buf_e_stag, + ablastr::fields::MultiLevelVectorField const & grad_buf_b_stag) : m_b_field(b_field), m_grad_buf_e_stag(grad_buf_e_stag), m_grad_buf_b_stag(grad_buf_b_stag) {} - void operator()(amrex::Array,3> & mlmg, int lev); + void operator() (amrex::Array, 3> & mlmg, int lev); // Function to perform interpolation from cell edges to cell faces - void doInterp(const std::unique_ptr &src, const std::unique_ptr &dst); + void doInterp (amrex::MultiFab & src, amrex::MultiFab & dst); }; } // namespace MagnetostaticSolver diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 031bc915afc..84efe8bf45a 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "Fields.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "EmbeddedBoundary/Enabled.H" #include "Parallelization/GuardCellManager.H" @@ -19,6 +20,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "Parallelization/WarpXComm_K.H" +#include #include #include #include @@ -71,6 +73,9 @@ WarpX::ComputeMagnetostaticField() void WarpX::AddMagnetostaticFieldLabFrame() { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::AddMagnetostaticFieldLabFrame"); // Store the boundary conditions for the field solver if they haven't been @@ -87,7 +92,7 @@ WarpX::AddMagnetostaticFieldLabFrame() // reset current_fp before depositing current density for this step for (int lev = 0; lev <= max_level; lev++) { for (int dim=0; dim < 3; dim++) { - current_fp[lev][dim]->setVal(0.); + m_fields.get(FieldType::current_fp, Direction{dim}, lev)->setVal(0.); } } @@ -95,22 +100,26 @@ WarpX::AddMagnetostaticFieldLabFrame() for (int ispecies=0; ispeciesnSpecies(); ispecies++){ WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); if (!species.do_not_deposit) { - species.DepositCurrent(current_fp, dt[0], 0.); + species.DepositCurrent( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + dt[0], 0.); } } #ifdef WARPX_DIM_RZ for (int lev = 0; lev <= max_level; lev++) { - ApplyInverseVolumeScalingToCurrentDensity(current_fp[lev][0].get(), - current_fp[lev][1].get(), - current_fp[lev][2].get(), lev); + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev ); } #endif - SyncCurrent(current_fp, current_cp, current_buf); // Apply filter, perform MPI exchange, interpolate across levels + SyncCurrent("current_fp"); // set the boundary and current density potentials - setVectorPotentialBC(vector_potential_fp_nodal); + setVectorPotentialBC(m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level)); // Compute the vector potential A, by solving the Poisson equation WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsPythonCallbackInstalled("poissonsolver"), @@ -123,9 +132,11 @@ WarpX::AddMagnetostaticFieldLabFrame() const int self_fields_max_iters = 200; const int self_fields_verbosity = 2; - computeVectorPotential( current_fp, vector_potential_fp_nodal, self_fields_required_precision, - magnetostatic_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); + computeVectorPotential( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level), + self_fields_required_precision, magnetostatic_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); } /* Compute the vector potential `A` by solving the Poisson equation with `J` as @@ -145,29 +156,36 @@ WarpX::AddMagnetostaticFieldLabFrame() \param[in] verbosity The verbosity setting for the MLMG solver */ void -WarpX::computeVectorPotential (const amrex::Vector,3> >& curr, - amrex::Vector,3> >& A, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const +WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, + ablastr::fields::MultiLevelVectorField const& A, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) // const // This breaks non-const m_fields.get_mr_levels_alldirs { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // create a vector to our fields, sorted by level amrex::Vector> sorted_curr; amrex::Vector> sorted_A; for (int lev = 0; lev <= finest_level; ++lev) { - sorted_curr.emplace_back(amrex::Array ({curr[lev][0].get(), - curr[lev][1].get(), - curr[lev][2].get()})); - sorted_A.emplace_back(amrex::Array ({A[lev][0].get(), - A[lev][1].get(), - A[lev][2].get()})); + sorted_curr.emplace_back(amrex::Array ({curr[lev][Direction{0}], + curr[lev][Direction{1}], + curr[lev][Direction{2}]})); + sorted_A.emplace_back(amrex::Array ({A[lev][Direction{0}], + A[lev][Direction{1}], + A[lev][Direction{2}]})); } #if defined(AMREX_USE_EB) - const std::optional post_A_calculation({Bfield_fp, - vector_potential_grad_buf_e_stag, - vector_potential_grad_buf_b_stag}); + const ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + const std::optional post_A_calculation( + { + Bfield_fp, + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_e_stag, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_b_stag, finest_level) + }); amrex::Vector factories; for (int lev = 0; lev <= finest_level; ++lev) { @@ -210,8 +228,10 @@ WarpX::computeVectorPotential (const amrex::Vector,3>>& A ) const +WarpX::setVectorPotentialBC (ablastr::fields::MultiLevelVectorField const& A) const { + using ablastr::fields::Direction; + // check if any dimension has non-periodic boundary conditions if (!m_vector_poisson_boundary_handler.has_non_periodic) { return; } @@ -226,11 +246,11 @@ WarpX::setVectorPotentialBC ( amrex::Vectorarray(mfi); + auto A_arr = A[lev][Direction{adim}]->array(mfi); // Extract tileboxes for which to loop - const Box& tb = mfi.tilebox( A[lev][adim]->ixType().toIntVect()); + const Box& tb = mfi.tilebox( A[lev][Direction{adim}]->ixType().toIntVect()); // loop over dimensions for (int idim=0; idim &src, - const std::unique_ptr &dst) +void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::doInterp (amrex::MultiFab & src, + amrex::MultiFab & dst) { WarpX &warpx = WarpX::GetInstance(); @@ -373,20 +393,20 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::doInterp(const std amrex::Real const * stencil_coeffs_z = warpx.device_field_centering_stencil_coeffs_z.data(); // Synchronize the ghost cells, do halo exchange - ablastr::utils::communication::FillBoundary(*src, - src->nGrowVect(), + ablastr::utils::communication::FillBoundary(src, + src.nGrowVect(), WarpX::do_single_precision_comms); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - IntVect const src_stag = src->ixType().toIntVect(); - IntVect const dst_stag = dst->ixType().toIntVect(); + IntVect const src_stag = src.ixType().toIntVect(); + IntVect const dst_stag = dst.ixType().toIntVect(); - Array4 const& src_arr = src->const_array(mfi); - Array4 const& dst_arr = dst->array(mfi); + Array4 const& src_arr = src.const_array(mfi); + Array4 const& dst_arr = dst.array(mfi); const Box bx = mfi.tilebox(); @@ -408,12 +428,12 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: const amrex::Array buf_ptr = { #if defined(WARPX_DIM_3D) - m_grad_buf_e_stag[lev][0].get(), - m_grad_buf_e_stag[lev][1].get(), - m_grad_buf_e_stag[lev][2].get() + m_grad_buf_e_stag[lev][0], + m_grad_buf_e_stag[lev][1], + m_grad_buf_e_stag[lev][2] #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - m_grad_buf_e_stag[lev][0].get(), - m_grad_buf_e_stag[lev][2].get() + m_grad_buf_e_stag[lev][0], + m_grad_buf_e_stag[lev][2] #endif }; @@ -421,13 +441,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[0]->getGradSolution({buf_ptr}); // Interpolate dAx/dz to By grid buffer, then add to By - this->doInterp(m_grad_buf_e_stag[lev][2], - m_grad_buf_b_stag[lev][1]); + this->doInterp(*m_grad_buf_e_stag[lev][2], + *m_grad_buf_b_stag[lev][1]); MultiFab::Add(*(m_b_field[lev][1]), *(m_grad_buf_b_stag[lev][1]), 0, 0, 1, 0 ); // Interpolate dAx/dy to Bz grid buffer, then subtract from Bz - this->doInterp(m_grad_buf_e_stag[lev][1], - m_grad_buf_b_stag[lev][2]); + this->doInterp(*m_grad_buf_e_stag[lev][1], + *m_grad_buf_b_stag[lev][2]); m_grad_buf_b_stag[lev][2]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][2]), *(m_grad_buf_b_stag[lev][2]), 0, 0, 1, 0 ); @@ -435,13 +455,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[1]->getGradSolution({buf_ptr}); // Interpolate dAy/dx to Bz grid buffer, then add to Bz - this->doInterp(m_grad_buf_e_stag[lev][0], - m_grad_buf_b_stag[lev][2]); + this->doInterp(*m_grad_buf_e_stag[lev][0], + *m_grad_buf_b_stag[lev][2]); MultiFab::Add(*(m_b_field[lev][2]), *(m_grad_buf_b_stag[lev][2]), 0, 0, 1, 0 ); // Interpolate dAy/dz to Bx grid buffer, then subtract from Bx - this->doInterp(m_grad_buf_e_stag[lev][2], - m_grad_buf_b_stag[lev][0]); + this->doInterp(*m_grad_buf_e_stag[lev][2], + *m_grad_buf_b_stag[lev][0]); m_grad_buf_b_stag[lev][0]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][0]), *(m_grad_buf_b_stag[lev][0]), 0, 0, 1, 0 ); @@ -449,13 +469,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[2]->getGradSolution({buf_ptr}); // Interpolate dAz/dy to Bx grid buffer, then add to Bx - this->doInterp(m_grad_buf_e_stag[lev][1], - m_grad_buf_b_stag[lev][0]); + this->doInterp(*m_grad_buf_e_stag[lev][1], + *m_grad_buf_b_stag[lev][0]); MultiFab::Add(*(m_b_field[lev][0]), *(m_grad_buf_b_stag[lev][0]), 0, 0, 1, 0 ); // Interpolate dAz/dx to By grid buffer, then subtract from By - this->doInterp(m_grad_buf_e_stag[lev][0], - m_grad_buf_b_stag[lev][1]); + this->doInterp(*m_grad_buf_e_stag[lev][0], + *m_grad_buf_b_stag[lev][1]); m_grad_buf_b_stag[lev][1]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][1]), *(m_grad_buf_b_stag[lev][1]), 0, 0, 1, 0 ); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H index c72e7db250d..462bce23c23 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H @@ -13,6 +13,7 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData_fwd.H" #include "FieldSolver/SpectralSolver/SpectralFieldData.H" +#include #include #include @@ -74,7 +75,7 @@ class SpectralBaseAlgorithm */ void ComputeSpectralDivE ( int lev, SpectralFieldData& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const& Efield, amrex::MultiFab& divE ); protected: // Meant to be used in the subclasses diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp index b3f18dd6912..069b724f96c 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp @@ -9,6 +9,8 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData.H" #include "Utils/WarpX_Complex.H" +#include + #include #include #include @@ -58,8 +60,9 @@ void SpectralBaseAlgorithm::ComputeSpectralDivE ( const int lev, SpectralFieldData& field_data, - const std::array,3>& Efield, - amrex::MultiFab& divE ) + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE +) { const SpectralFieldIndex& Idx = m_spectral_index; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H index 8e03a2a2559..9f6b5b09219 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H @@ -10,6 +10,7 @@ #include "FieldSolver/SpectralSolver/SpectralKSpaceRZ.H" #include "FieldSolver/SpectralSolver/SpectralFieldDataRZ.H" +#include #include @@ -66,7 +67,7 @@ class SpectralBaseAlgorithmRZ */ void ComputeSpectralDivE ( int lev, SpectralFieldDataRZ& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ); /** diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp index f8ef0ef4730..3e556363a6f 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp @@ -6,6 +6,8 @@ */ #include "SpectralBaseAlgorithmRZ.H" +#include + #include using namespace amrex; @@ -18,7 +20,7 @@ void SpectralBaseAlgorithmRZ::ComputeSpectralDivE ( const int lev, SpectralFieldDataRZ& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ) { using amrex::operator""_rt; diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 1aa1e540711..bcd80e421a8 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -11,9 +11,9 @@ #include "SpectralAlgorithms/SpectralBaseAlgorithm.H" #include "SpectralFieldData.H" - #include "Utils/WarpXAlgorithmSelection.H" +#include #include #include @@ -127,9 +127,12 @@ class SpectralSolver * \brief Public interface to call the member function ComputeSpectralDivE * of the base class SpectralBaseAlgorithm from objects of class SpectralSolver */ - void ComputeSpectralDivE ( int lev, - const std::array,3>& Efield, - amrex::MultiFab& divE ) { + void ComputeSpectralDivE ( + int lev, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ) + { algorithm->ComputeSpectralDivE( lev, field_data, Efield, divE ); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H index 004255e4d72..61cf64036eb 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H @@ -12,6 +12,7 @@ #include "SpectralAlgorithms/SpectralBaseAlgorithmRZ.H" #include "SpectralFieldDataRZ.H" +#include #include @@ -95,7 +96,8 @@ class SpectralSolverRZ * \brief Public interface to call the member function ComputeSpectralDivE * of the base class SpectralBaseAlgorithmRZ from objects of class SpectralSolverRZ */ - void ComputeSpectralDivE (int lev, const std::array,3>& Efield, + void ComputeSpectralDivE (int lev, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE); /** diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp index 7eb3f2c3ae6..9a8cff9f1f3 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp @@ -142,8 +142,10 @@ SpectralSolverRZ::pushSpectralFields (const bool doing_pml) { */ void SpectralSolverRZ::ComputeSpectralDivE (const int lev, - const std::array,3>& Efield, - amrex::MultiFab& divE) { + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE +) +{ algorithm->ComputeSpectralDivE(lev, field_data, Efield, divE); } diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 602a2666b27..fd786dc65ba 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -10,6 +10,7 @@ #include "BoundaryConditions/PML.H" #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #if defined(WARPX_USE_FFT) # include "FieldSolver/SpectralSolver/SpectralFieldData.H" @@ -53,6 +54,7 @@ #include using namespace amrex; +using warpx::fields::FieldType; #ifdef WARPX_USE_FFT namespace { @@ -64,7 +66,7 @@ namespace { #else SpectralSolver& solver, #endif - const std::array,3>& vector_field, + const ablastr::fields::VectorField& vector_field, const int compx, const int compy, const int compz) { #ifdef WARPX_DIM_RZ @@ -84,7 +86,7 @@ namespace { #else SpectralSolver& solver, #endif - const std::array,3>& vector_field, + const ablastr::fields::VectorField& vector_field, const int compx, const int compy, const int compz, const amrex::IntVect& fill_guards) { @@ -100,63 +102,93 @@ namespace { } } -void WarpX::PSATDForwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp) +void WarpX::PSATDForwardTransformEB () { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; + const std::string Efield_fp_string = "Efield_fp"; + const std::string Efield_cp_string = "Efield_cp"; + const std::string Bfield_fp_string = "Bfield_fp"; + const std::string Bfield_cp_string = "Bfield_cp"; + for (int lev = 0; lev <= finest_level; ++lev) { - ForwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - ForwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], Idx.Bx, Idx.By, Idx.Bz); + if (m_fields.has_vector(Efield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], E_fp, Idx.Ex, Idx.Ey, Idx.Ez); + } + if (m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], B_fp, Idx.Bx, Idx.By, Idx.Bz); + } if (spectral_solver_cp[lev]) { - ForwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - ForwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], Idx.Bx, Idx.By, Idx.Bz); + if (m_fields.has_vector(Efield_cp_string, lev)) { + ablastr::fields::VectorField const E_cp = m_fields.get_alldirs(Efield_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], E_cp, Idx.Ex, Idx.Ey, Idx.Ez); + } + if (m_fields.has_vector(Bfield_cp_string, lev)) { + ablastr::fields::VectorField const B_cp = m_fields.get_alldirs(Bfield_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], B_cp, Idx.Bx, Idx.By, Idx.Bz); + } } } } -void WarpX::PSATDBackwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp) +void WarpX::PSATDBackwardTransformEB () { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; + const std::string Efield_fp_string = "Efield_fp"; + const std::string Efield_cp_string = "Efield_cp"; + const std::string Bfield_fp_string = "Bfield_fp"; + const std::string Bfield_cp_string = "Bfield_cp"; + for (int lev = 0; lev <= finest_level; ++lev) { - BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], - Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); - BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], - Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + if (m_fields.has_vector(Efield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp, + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + } + if (m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp, + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + } if (spectral_solver_cp[lev]) { - BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], - Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); - BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], - Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + if (m_fields.has_vector(Efield_cp_string, lev)) { + ablastr::fields::VectorField const E_cp = m_fields.get_alldirs(Efield_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp, + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + } + if (m_fields.has_vector(Bfield_cp_string, lev)) { + ablastr::fields::VectorField const B_cp = m_fields.get_alldirs(Bfield_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp, + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + } } } // Damp the fields in the guard cells for (int lev = 0; lev <= finest_level; ++lev) { - DampFieldsInGuards(lev, E_fp[lev], B_fp[lev]); + if (m_fields.has_vector(Efield_fp_string, lev) && m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + DampFieldsInGuards(lev, E_fp, B_fp); + } } } void WarpX::PSATDBackwardTransformEBavg ( - const amrex::Vector,3>>& E_avg_fp, - const amrex::Vector,3>>& B_avg_fp, - const amrex::Vector,3>>& E_avg_cp, - const amrex::Vector,3>>& B_avg_cp) + ablastr::fields::MultiLevelVectorField const& E_avg_fp, + ablastr::fields::MultiLevelVectorField const& B_avg_fp, + ablastr::fields::MultiLevelVectorField const& E_avg_cp, + ablastr::fields::MultiLevelVectorField const& B_avg_cp) { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; @@ -184,11 +216,15 @@ WarpX::PSATDForwardTransformF () for (int lev = 0; lev <= finest_level; ++lev) { - if (F_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *F_fp[lev], Idx.F); } + if (m_fields.has(FieldType::F_fp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F); + } if (spectral_solver_cp[lev]) { - if (F_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *F_cp[lev], Idx.F); } + if (m_fields.has(FieldType::F_cp, lev)) { + spectral_solver_cp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F); + } } } } @@ -201,17 +237,17 @@ WarpX::PSATDBackwardTransformF () for (int lev = 0; lev <= finest_level; ++lev) { #ifdef WARPX_DIM_RZ - if (F_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F); } + if (m_fields.has(FieldType::F_fp, lev)) { spectral_solver_fp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F); } #else - if (F_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F, m_fill_guards_fields); } + if (m_fields.has(FieldType::F_fp, lev)) { spectral_solver_fp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F, m_fill_guards_fields); } #endif if (spectral_solver_cp[lev]) { #ifdef WARPX_DIM_RZ - if (F_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F); } + if (m_fields.has(FieldType::F_cp, lev)) { spectral_solver_cp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F); } #else - if (F_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F, m_fill_guards_fields); } + if (m_fields.has(FieldType::F_cp, lev)) { spectral_solver_cp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F, m_fill_guards_fields); } #endif } } @@ -219,7 +255,7 @@ WarpX::PSATDBackwardTransformF () // Damp the field in the guard cells for (int lev = 0; lev <= finest_level; ++lev) { - DampFieldsInGuards(lev, F_fp[lev]); + DampFieldsInGuards(lev, m_fields.get(FieldType::F_fp, lev)); } } @@ -230,11 +266,15 @@ WarpX::PSATDForwardTransformG () for (int lev = 0; lev <= finest_level; ++lev) { - if (G_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *G_fp[lev], Idx.G); } + if (m_fields.has(FieldType::G_fp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::G_fp, lev), Idx.G); + } if (spectral_solver_cp[lev]) { - if (G_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *G_cp[lev], Idx.G); } + if (m_fields.has(FieldType::G_cp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::G_cp, lev), Idx.G); + } } } } @@ -246,34 +286,38 @@ WarpX::PSATDBackwardTransformG () for (int lev = 0; lev <= finest_level; ++lev) { + if (m_fields.has(FieldType::G_fp, lev)) { + MultiFab* G_fp = m_fields.get(FieldType::G_fp, lev); #ifdef WARPX_DIM_RZ - if (G_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp, Idx.G); #else - if (G_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G, m_fill_guards_fields); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp, Idx.G, m_fill_guards_fields); #endif + DampFieldsInGuards(lev, G_fp); + } + if (spectral_solver_cp[lev]) { + if (m_fields.has(FieldType::G_cp, lev)) { + MultiFab* G_cp = m_fields.get(FieldType::G_cp, lev); #ifdef WARPX_DIM_RZ - if (G_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_cp, Idx.G); #else - if (G_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G, m_fill_guards_fields); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_cp, Idx.G, m_fill_guards_fields); #endif + } } } - - // Damp the field in the guard cells - for (int lev = 0; lev <= finest_level; ++lev) - { - DampFieldsInGuards(lev, G_fp[lev]); - } } void WarpX::PSATDForwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + std::string const & J_fp_string, + std::string const & J_cp_string, const bool apply_kspace_filter) { + if (!m_fields.has_vector(J_fp_string, 0)) { return; } + SpectralFieldIndex Idx; int idx_jx, idx_jy, idx_jz; @@ -285,7 +329,10 @@ void WarpX::PSATDForwardTransformJ ( idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); - ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz); + if (m_fields.has_vector(J_fp_string, lev)) { + ablastr::fields::VectorField const J_fp = m_fields.get_alldirs(J_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp, idx_jx, idx_jy, idx_jz); + } if (spectral_solver_cp[lev]) { @@ -295,7 +342,10 @@ void WarpX::PSATDForwardTransformJ ( idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); - ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz); + if (m_fields.has_vector(J_cp_string, lev)) { + ablastr::fields::VectorField const J_cp = m_fields.get_alldirs(J_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp, idx_jx, idx_jy, idx_jz); + } } } @@ -331,9 +381,11 @@ void WarpX::PSATDForwardTransformJ ( } void WarpX::PSATDBackwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp) + std::string const & J_fp_string, + std::string const & J_cp_string) { + if (!m_fields.has_vector(J_fp_string, 0)) { return; } + SpectralFieldIndex Idx; int idx_jx, idx_jy, idx_jz; @@ -347,8 +399,11 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy_mid); idx_jz = static_cast(Idx.Jz_mid); - BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], - idx_jx, idx_jy, idx_jz, m_fill_guards_current); + if (m_fields.has_vector(J_fp_string, lev)) { + ablastr::fields::VectorField const J_fp = m_fields.get_alldirs(J_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp, + idx_jx, idx_jy, idx_jz, m_fill_guards_current); + } if (spectral_solver_cp[lev]) { @@ -360,26 +415,35 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy_mid); idx_jz = static_cast(Idx.Jz_mid); - BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], - idx_jx, idx_jy, idx_jz, m_fill_guards_current); + if (m_fields.has_vector(J_cp_string, lev)) { + ablastr::fields::VectorField const J_cp = m_fields.get_alldirs(J_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp, + idx_jx, idx_jy, idx_jz, m_fill_guards_current); + } } } } void WarpX::PSATDForwardTransformRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + std::string const & charge_fp_string, + std::string const & charge_cp_string, const int icomp, const int dcomp, const bool apply_kspace_filter) { - if (charge_fp[0] == nullptr) { return; } + if (!m_fields.has(charge_fp_string, 0)) { return; } for (int lev = 0; lev <= finest_level; ++lev) { - if (charge_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *charge_fp[lev], dcomp, icomp); } + if (m_fields.has(charge_fp_string, lev)) { + amrex::MultiFab const & charge_fp = *m_fields.get(charge_fp_string, lev); + spectral_solver_fp[lev]->ForwardTransform(lev, charge_fp, dcomp, icomp); + } if (spectral_solver_cp[lev]) { - if (charge_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *charge_cp[lev], dcomp, icomp); } + if (m_fields.has(charge_cp_string, lev)) { + amrex::MultiFab const & charge_cp = *m_fields.get(charge_cp_string, lev); + spectral_solver_cp[lev]->ForwardTransform(lev, charge_cp, dcomp, icomp); + } } } @@ -430,6 +494,8 @@ void WarpX::PSATDVayDeposition () void WarpX::PSATDSubtractCurrentPartialSumsAvg () { + using ablastr::fields::Direction; + // Subtraction of cumulative sum for Vay deposition // implemented only in 2D and 3D Cartesian geometry #if !defined (WARPX_DIM_1D_Z) && !defined (WARPX_DIM_RZ) @@ -441,15 +507,15 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () { const std::array& dx = WarpX::CellSize(lev); - amrex::MultiFab const& Dx = *current_fp_vay[lev][0]; - amrex::MultiFab const& Dy = *current_fp_vay[lev][1]; - amrex::MultiFab const& Dz = *current_fp_vay[lev][2]; + amrex::MultiFab const& Dx = *m_fields.get(FieldType::current_fp_vay, Direction{0}, lev); + amrex::MultiFab const& Dy = *m_fields.get(FieldType::current_fp_vay, Direction{1}, lev); + amrex::MultiFab const& Dz = *m_fields.get(FieldType::current_fp_vay, Direction{2}, lev); #if defined (WARPX_DIM_XZ) amrex::ignore_unused(Dy); #endif - amrex::MultiFab& Jx = *current_fp[lev][0]; + amrex::MultiFab& Jx = *m_fields.get(FieldType::current_fp, Direction{0}, lev); #ifdef AMREX_USE_OMP @@ -480,7 +546,7 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () #if defined (WARPX_DIM_3D) // Subtract average of cumulative sum from Jy - amrex::MultiFab& Jy = *current_fp[lev][1]; + amrex::MultiFab& Jy = *m_fields.get(FieldType::current_fp, Direction{1}, lev);; for (amrex::MFIter mfi(Jy); mfi.isValid(); ++mfi) { const amrex::Box& bx = mfi.fabbox(); @@ -505,7 +571,7 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () #endif // Subtract average of cumulative sum from Jz - amrex::MultiFab& Jz = *current_fp[lev][2]; + amrex::MultiFab& Jz = *m_fields.get(FieldType::current_fp, Direction{2}, lev); for (amrex::MFIter mfi(Jz); mfi.isValid(); ++mfi) { const amrex::Box& bx = mfi.fabbox(); @@ -658,46 +724,55 @@ WarpX::PushPSATD () const int rho_old = spectral_solver_fp[0]->m_spectral_index.rho_old; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + + const ablastr::fields::MultiLevelVectorField current_fp = m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level); + std::string current_fp_string = "current_fp"; + std::string const current_cp_string = "current_cp"; + if (fft_periodic_single_box) { if (current_correction) { // FFT of J and rho - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Correct J in k-space PSATDCurrentCorrection(); // Inverse FFT of J - PSATDBackwardTransformJ(current_fp, current_cp); + PSATDBackwardTransformJ(current_fp_string, current_cp_string); } else if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // FFT of D and rho (if used) // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR - PSATDForwardTransformJ(current_fp_vay, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + current_fp_string = "current_fp_vay"; + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Compute J from D in k-space PSATDVayDeposition(); // Inverse FFT of J, subtract cumulative sums of D - PSATDBackwardTransformJ(current_fp, current_cp); + current_fp_string = "current_fp"; + PSATDBackwardTransformJ(current_fp_string, current_cp_string); // TODO Cumulative sums need to be fixed with periodic single box PSATDSubtractCurrentPartialSumsAvg(); // FFT of J after subtraction of cumulative sums - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ(current_fp_string, current_cp_string); } else // no current correction, no Vay deposition { // FFT of J and rho (if used) - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); } } else // no periodic single box @@ -709,35 +784,37 @@ WarpX::PushPSATD () // In RZ geometry, do not apply filtering here, since it is // applied in the subsequent calls to these functions (below) const bool apply_kspace_filter = false; - PSATDForwardTransformJ(current_fp, current_cp, apply_kspace_filter); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old, apply_kspace_filter); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new, apply_kspace_filter); + PSATDForwardTransformJ(current_fp_string, current_cp_string, apply_kspace_filter); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old, apply_kspace_filter); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new, apply_kspace_filter); #else - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); #endif // Correct J in k-space PSATDCurrentCorrection(); // Inverse FFT of J - PSATDBackwardTransformJ(current_fp, current_cp); + PSATDBackwardTransformJ(current_fp_string, current_cp_string); // Synchronize J and rho - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } else if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // FFT of D - PSATDForwardTransformJ(current_fp_vay, current_cp); + current_fp_string = "current_fp_vay"; + PSATDForwardTransformJ(current_fp_string, current_cp_string); // Compute J from D in k-space PSATDVayDeposition(); // Inverse FFT of J, subtract cumulative sums of D - PSATDBackwardTransformJ(current_fp, current_cp); + current_fp_string = "current_fp"; + PSATDBackwardTransformJ(current_fp_string, current_cp_string); PSATDSubtractCurrentPartialSumsAvg(); // Synchronize J and rho (if used). @@ -747,17 +824,17 @@ WarpX::PushPSATD () // TODO This works only without mesh refinement const int lev = 0; SumBoundaryJ(current_fp, lev, Geom(lev).periodicity()); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); } // FFT of J and rho (if used) - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); } // FFT of E and B - PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDForwardTransformEB(); #ifdef WARPX_DIM_RZ if (pml_rz[0]) { pml_rz[0]->PushPSATD(0); } @@ -771,8 +848,12 @@ WarpX::PushPSATD () PSATDPushSpectralFields(); // Inverse FFT of E, B, F, and G - PSATDBackwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDBackwardTransformEB(); if (WarpX::fft_do_time_averaging) { + auto Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + auto Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); + auto Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + auto Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); } if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } @@ -783,7 +864,7 @@ WarpX::PushPSATD () { if (pml[lev] && pml[lev]->ok()) { - pml[lev]->PushPSATD(lev); + pml[lev]->PushPSATD(m_fields, lev); } ApplyEfieldBoundary(lev, PatchType::fine); if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } @@ -818,26 +899,27 @@ WarpX::EvolveB (int lev, amrex::Real a_dt, DtType a_dt_type) void WarpX::EvolveB (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_type) { - // Evolve B field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveB(Bfield_fp[lev], Efield_fp[lev], G_fp[lev], - m_face_areas[lev], m_area_mod[lev], ECTRhofield[lev], Venl[lev], - m_flag_info_face[lev], m_borrowing[lev], lev, a_dt); + m_fdtd_solver_fp[lev]->EvolveB( m_fields, + lev, + patch_type, + m_flag_info_face[lev], m_borrowing[lev], a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveB(Bfield_cp[lev], Efield_cp[lev], G_cp[lev], - m_face_areas[lev], m_area_mod[lev], ECTRhofield[lev], Venl[lev], - m_flag_info_face[lev], m_borrowing[lev], lev, a_dt); + m_fdtd_solver_cp[lev]->EvolveB( m_fields, + lev, + patch_type, + m_flag_info_face[lev], m_borrowing[lev], a_dt ); } // Evolve B field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveBPML( - pml[lev]->GetB_fp(), pml[lev]->GetE_fp(), a_dt, WarpX::do_dive_cleaning); + m_fields, patch_type, lev, a_dt, WarpX::do_dive_cleaning); } else { m_fdtd_solver_cp[lev]->EvolveBPML( - pml[lev]->GetB_cp(), pml[lev]->GetE_cp(), a_dt, WarpX::do_dive_cleaning); + m_fields, patch_type, lev, a_dt, WarpX::do_dive_cleaning); } } @@ -873,31 +955,33 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { // Evolve E field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveE(Efield_fp[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_fp[lev], lev, a_dt ); + m_fdtd_solver_fp[lev]->EvolveE( m_fields, + lev, + patch_type, + m_fields.get_alldirs(FieldType::Efield_fp, lev), + a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveE(Efield_cp[lev], Bfield_cp[lev], - current_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_cp[lev], lev, a_dt ); + m_fdtd_solver_cp[lev]->EvolveE( m_fields, + lev, + patch_type, + m_fields.get_alldirs(FieldType::Efield_cp, lev), + a_dt ); } // Evolve E field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveEPML( - pml[lev]->GetE_fp(), pml[lev]->GetB_fp(), - pml[lev]->Getj_fp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_fp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_fp(), a_dt, pml_has_particles ); } else { m_fdtd_solver_cp[lev]->EvolveEPML( - pml[lev]->GetE_cp(), pml[lev]->GetB_cp(), - pml[lev]->Getj_cp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_cp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_cp(), a_dt, pml_has_particles ); } @@ -910,11 +994,17 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) #ifdef AMREX_USE_EB if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveECTRho(Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fdtd_solver_fp[lev]->EvolveECTRho( m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev ); } else { - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fdtd_solver_cp[lev]->EvolveECTRho( m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif @@ -952,21 +1042,27 @@ WarpX::EvolveF (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_typ // Evolve F field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveF( F_fp[lev], Efield_fp[lev], - rho_fp[lev], rhocomp, a_dt ); + m_fdtd_solver_fp[lev]->EvolveF( m_fields.get(FieldType::F_fp, lev), + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get(FieldType::rho_fp,lev), rhocomp, a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveF( F_cp[lev], Efield_cp[lev], - rho_cp[lev], rhocomp, a_dt ); + m_fdtd_solver_cp[lev]->EvolveF( m_fields.get(FieldType::F_cp, lev), + m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get(FieldType::rho_cp,lev), rhocomp, a_dt ); } // Evolve F field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveFPML( - pml[lev]->GetF_fp(), pml[lev]->GetE_fp(), a_dt ); + m_fields.get(FieldType::pml_F_fp, lev), + m_fields.get_alldirs(FieldType::pml_E_fp, lev), + a_dt ); } else { m_fdtd_solver_cp[lev]->EvolveFPML( - pml[lev]->GetF_cp(), pml[lev]->GetE_cp(), a_dt ); + m_fields.get(FieldType::pml_F_cp, lev), + m_fields.get_alldirs(FieldType::pml_E_cp, lev), + a_dt ); } } } @@ -1005,11 +1101,17 @@ WarpX::EvolveG (int lev, PatchType patch_type, amrex::Real a_dt, DtType /*a_dt_t // Evolve G field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveG(G_fp[lev], Bfield_fp[lev], a_dt); + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + m_fdtd_solver_fp[lev]->EvolveG( + m_fields.get(FieldType::G_fp, lev), + Bfield_fp[lev], a_dt); } else // coarse patch { - m_fdtd_solver_cp[lev]->EvolveG(G_cp[lev], Bfield_cp[lev], a_dt); + ablastr::fields::MultiLevelVectorField const& Bfield_cp_new = m_fields.get_mr_levels_alldirs(FieldType::Bfield_cp, finest_level); + m_fdtd_solver_cp[lev]->EvolveG( + m_fields.get(FieldType::G_cp, lev), + Bfield_cp_new[lev], a_dt); } // TODO Evolution in PML cells will go here @@ -1045,23 +1147,25 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { ); m_fdtd_solver_fp[lev]->MacroscopicEvolveE( - Efield_fp[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::Bfield_fp, lev), + m_fields.get_alldirs(FieldType::current_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), a_dt, m_macroscopic_properties); if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveEPML( - pml[lev]->GetE_fp(), pml[lev]->GetB_fp(), - pml[lev]->Getj_fp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_fp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_fp(), a_dt, pml_has_particles ); } else { m_fdtd_solver_cp[lev]->EvolveEPML( - pml[lev]->GetE_cp(), pml[lev]->GetB_cp(), - pml[lev]->Getj_cp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_cp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_cp(), a_dt, pml_has_particles ); } @@ -1072,8 +1176,8 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { void WarpX::DampFieldsInGuards(const int lev, - const std::array,3>& Efield, - const std::array,3>& Bfield) { + const ablastr::fields::VectorField& Efield, + const ablastr::fields::VectorField& Bfield) { // Loop over dimensions for (int dampdir = 0 ; dampdir < AMREX_SPACEDIM ; dampdir++) @@ -1169,7 +1273,7 @@ WarpX::DampFieldsInGuards(const int lev, } } -void WarpX::DampFieldsInGuards(const int lev, std::unique_ptr& mf) +void WarpX::DampFieldsInGuards(const int lev, amrex::MultiFab* mf) { // Loop over dimensions for (int dampdir = 0; dampdir < AMREX_SPACEDIM; dampdir++) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index c16f0193b8d..556b8f8fca4 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -7,6 +7,7 @@ * License: BSD-3-Clause-LBNL */ #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" @@ -15,10 +16,16 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + + using namespace amrex; void WarpX::HybridPICEvolveFields () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::HybridPICEvolveFields()"); // The below deposition is hard coded for a single level simulation @@ -28,15 +35,19 @@ void WarpX::HybridPICEvolveFields () // The particles have now been pushed to their t_{n+1} positions. // Perform charge deposition in component 0 of rho_fp at t_{n+1}. - mypc->DepositCharge(rho_fp, 0._rt); + mypc->DepositCharge(m_fields.get_mr_levels(FieldType::rho_fp, finest_level), 0._rt); // Perform current deposition at t_{n+1/2}. - mypc->DepositCurrent(current_fp, dt[0], -0.5_rt * dt[0]); + mypc->DepositCurrent(m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), dt[0], -0.5_rt * dt[0]); // Deposit cold-relativistic fluid charge and current if (do_fluid_species) { int const lev = 0; - myfl->DepositCharge(lev, *rho_fp[lev]); - myfl->DepositCurrent(lev, *current_fp[lev][0], *current_fp[lev][1], *current_fp[lev][2]); + myfl->DepositCharge(m_fields, *m_fields.get(FieldType::rho_fp, lev), lev); + myfl->DepositCurrent(m_fields, + *m_fields.get(FieldType::current_fp, Direction{0}, lev), + *m_fields.get(FieldType::current_fp, Direction{1}, lev), + *m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev); } // Synchronize J and rho: @@ -49,7 +60,7 @@ void WarpX::HybridPICEvolveFields () // a nodal grid for (int lev = 0; lev <= finest_level; ++lev) { for (int idim = 0; idim < 3; ++idim) { - current_fp[lev][idim]->FillBoundary(Geom(lev).periodicity()); + m_fields.get(FieldType::current_fp, Direction{idim}, lev)->FillBoundary(Geom(lev).periodicity()); } } @@ -57,11 +68,12 @@ void WarpX::HybridPICEvolveFields () const int sub_steps = m_hybrid_pic_model->m_substeps; // Get the external current - m_hybrid_pic_model->GetCurrentExternal(m_edge_lengths); + m_hybrid_pic_model->GetCurrentExternal( + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); // Reference hybrid-PIC multifabs - auto& rho_fp_temp = m_hybrid_pic_model->rho_fp_temp; - auto& current_fp_temp = m_hybrid_pic_model->current_fp_temp; + ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); + ablastr::fields::MultiLevelVectorField current_fp_temp = m_fields.get_mr_levels_alldirs(FieldType::hybrid_current_fp_temp, finest_level); // During the above deposition the charge and current density were updated // so that, at this time, we have rho^{n} in rho_fp_temp, rho{n+1} in the @@ -82,7 +94,7 @@ void WarpX::HybridPICEvolveFields () MultiFab::LinComb( *current_fp_temp[lev][idim], 0.5_rt, *current_fp_temp[lev][idim], 0, - 0.5_rt, *current_fp[lev][idim], 0, + 0.5_rt, *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect() ); } @@ -94,8 +106,11 @@ void WarpX::HybridPICEvolveFields () for (int sub_step = 0; sub_step < sub_steps; sub_step++) { m_hybrid_pic_model->BfieldEvolveRK( - Bfield_fp, Efield_fp, current_fp_temp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + current_fp_temp, rho_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + 0.5_rt/sub_steps*dt[0], DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -109,7 +124,7 @@ void WarpX::HybridPICEvolveFields () // the result into the 0'th index of `rho_fp_temp[lev]` MultiFab::LinComb( *rho_fp_temp[lev], 0.5_rt, *rho_fp_temp[lev], 0, - 0.5_rt, *rho_fp[lev], 0, 0, 1, rho_fp_temp[lev]->nGrowVect() + 0.5_rt, *m_fields.get(FieldType::rho_fp, lev), 0, 0, 1, rho_fp_temp[lev]->nGrowVect() ); } @@ -117,8 +132,12 @@ void WarpX::HybridPICEvolveFields () for (int sub_step = 0; sub_step < sub_steps; sub_step++) { m_hybrid_pic_model->BfieldEvolveRK( - Bfield_fp, Efield_fp, current_fp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + rho_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + 0.5_rt/sub_steps*dt[0], DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -136,7 +155,7 @@ void WarpX::HybridPICEvolveFields () MultiFab::LinComb( *current_fp_temp[lev][idim], -1._rt, *current_fp_temp[lev][idim], 0, - 2._rt, *current_fp[lev][idim], 0, + 2._rt, *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect() ); } @@ -146,9 +165,15 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->CalculateElectronPressure(); // Update the E field to t=n+1 using the extrapolated J_i^n+1 value - m_hybrid_pic_model->CalculateCurrentAmpere(Bfield_fp, m_edge_lengths); + m_hybrid_pic_model->CalculateCurrentAmpere( + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); m_hybrid_pic_model->HybridPICSolveE( - Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, false + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + current_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), false ); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); @@ -158,10 +183,10 @@ void WarpX::HybridPICEvolveFields () for (int lev = 0; lev <= finest_level; ++lev) { // copy 1 component value starting at index 0 to index 0 - MultiFab::Copy(*rho_fp_temp[lev], *rho_fp[lev], + MultiFab::Copy(*rho_fp_temp[lev], *m_fields.get(FieldType::rho_fp, lev), 0, 0, 1, rho_fp_temp[lev]->nGrowVect()); for (int idim = 0; idim < 3; ++idim) { - MultiFab::Copy(*current_fp_temp[lev][idim], *current_fp[lev][idim], + MultiFab::Copy(*current_fp_temp[lev][idim], *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect()); } } @@ -169,12 +194,14 @@ void WarpX::HybridPICEvolveFields () void WarpX::HybridPICDepositInitialRhoAndJ () { - auto& rho_fp_temp = m_hybrid_pic_model->rho_fp_temp; - auto& current_fp_temp = m_hybrid_pic_model->current_fp_temp; + using warpx::fields::FieldType; + + ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); + ablastr::fields::MultiLevelVectorField current_fp_temp = m_fields.get_mr_levels_alldirs(FieldType::hybrid_current_fp_temp, finest_level); mypc->DepositCharge(rho_fp_temp, 0._rt); mypc->DepositCurrent(current_fp_temp, dt[0], 0._rt); - SyncRho(rho_fp_temp, rho_cp, charge_buf); - SyncCurrent(current_fp_temp, current_cp, current_buf); + SyncRho(rho_fp_temp, m_fields.get_mr_levels(FieldType::rho_cp, finest_level), m_fields.get_mr_levels(FieldType::rho_buf, finest_level)); + SyncCurrent("hybrid_current_fp_temp"); for (int lev=0; lev <= finest_level; ++lev) { // SyncCurrent does not include a call to FillBoundary, but it is needed // for the hybrid-PIC solver since current values are interpolated to @@ -183,12 +210,12 @@ void WarpX::HybridPICDepositInitialRhoAndJ () current_fp_temp[lev][1]->FillBoundary(Geom(lev).periodicity()); current_fp_temp[lev][2]->FillBoundary(Geom(lev).periodicity()); - ApplyRhofieldBoundary(lev, rho_fp_temp[lev].get(), PatchType::fine); + ApplyRhofieldBoundary(lev, rho_fp_temp[lev], PatchType::fine); // Set current density at PEC boundaries, if needed. ApplyJfieldBoundary( - lev, current_fp_temp[lev][0].get(), - current_fp_temp[lev][1].get(), - current_fp_temp[lev][2].get(), + lev, current_fp_temp[lev][0], + current_fp_temp[lev][1], + current_fp_temp[lev][2], PatchType::fine ); } diff --git a/Source/FieldSolver/WarpXSolveFieldsES.cpp b/Source/FieldSolver/WarpXSolveFieldsES.cpp index 42a537b5c2a..6194570cd2d 100644 --- a/Source/FieldSolver/WarpXSolveFieldsES.cpp +++ b/Source/FieldSolver/WarpXSolveFieldsES.cpp @@ -7,24 +7,29 @@ * License: BSD-3-Clause-LBNL */ #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" + +#include "Fields.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" + void WarpX::ComputeSpaceChargeField (bool const reset_fields) { WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (reset_fields) { // Reset all E and B fields to 0, before calculating space-charge fields WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); for (int lev = 0; lev <= max_level; lev++) { for (int comp=0; comp<3; comp++) { - Efield_fp[lev][comp]->setVal(0); - Bfield_fp[lev][comp]->setVal(0); + m_fields.get(FieldType::Efield_fp, Direction{comp}, lev)->setVal(0); + m_fields.get(FieldType::Bfield_fp, Direction{comp}, lev)->setVal(0); } } } m_electrostatic_solver->ComputeSpaceChargeField( - rho_fp, rho_cp, charge_buf, phi_fp, *mypc, myfl.get(), Efield_fp, Bfield_fp - ); + m_fields, *mypc, myfl.get(), max_level ); } diff --git a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp index 9741d9b667b..1ff1d1f866d 100644 --- a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp +++ b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" @@ -69,36 +70,41 @@ WarpX::Hybrid_QED_Push (int lev, amrex::Real a_dt) void WarpX::Hybrid_QED_Push (int lev, PatchType patch_type, amrex::Real a_dt) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + const int patch_level = (patch_type == PatchType::fine) ? lev : lev-1; const std::array& dx_vec= WarpX::CellSize(patch_level); const Real dx = dx_vec[0]; const Real dy = dx_vec[1]; const Real dz = dx_vec[2]; + using ablastr::fields::Direction; + MultiFab *Ex, *Ey, *Ez, *Bx, *By, *Bz, *Jx, *Jy, *Jz; if (patch_type == PatchType::fine) { - Ex = Efield_fp[lev][0].get(); - Ey = Efield_fp[lev][1].get(); - Ez = Efield_fp[lev][2].get(); - Bx = Bfield_fp[lev][0].get(); - By = Bfield_fp[lev][1].get(); - Bz = Bfield_fp[lev][2].get(); - Jx = current_fp[lev][0].get(); - Jy = current_fp[lev][1].get(); - Jz = current_fp[lev][2].get(); + Ex = m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + Ey = m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + Ez = m_fields.get(FieldType::Efield_fp, Direction{2}, lev); + Bx = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev); + By = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev); + Bz = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev); + Jx = m_fields.get(FieldType::current_fp, Direction{0}, lev); + Jy = m_fields.get(FieldType::current_fp, Direction{1}, lev); + Jz = m_fields.get(FieldType::current_fp, Direction{2}, lev); } else { - Ex = Efield_cp[lev][0].get(); - Ey = Efield_cp[lev][1].get(); - Ez = Efield_cp[lev][2].get(); - Bx = Bfield_cp[lev][0].get(); - By = Bfield_cp[lev][1].get(); - Bz = Bfield_cp[lev][2].get(); - Jx = current_cp[lev][0].get(); - Jy = current_cp[lev][1].get(); - Jz = current_cp[lev][2].get(); + Ex = m_fields.get(FieldType::Efield_cp, Direction{0}, lev); + Ey = m_fields.get(FieldType::Efield_cp, Direction{1}, lev); + Ez = m_fields.get(FieldType::Efield_cp, Direction{2}, lev); + Bx = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev); + By = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev); + Bz = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev); + Jx = m_fields.get(FieldType::current_cp, Direction{0}, lev); + Jy = m_fields.get(FieldType::current_cp, Direction{1}, lev); + Jz = m_fields.get(FieldType::current_cp, Direction{2}, lev); } amrex::LayoutData* cost = WarpX::getCosts(lev); diff --git a/Source/Fields.H b/Source/Fields.H new file mode 100644 index 00000000000..00d1872a049 --- /dev/null +++ b/Source/Fields.H @@ -0,0 +1,131 @@ +/* Copyright 2024 Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_FIELDS_H_ +#define WARPX_FIELDS_H_ + +#include + +#include + +#include +#include + + +namespace warpx::fields +{ + AMREX_ENUM(FieldType, + None, + Efield_aux, // Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData + Bfield_aux, // Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData + Efield_fp, // The field that is updated by the field solver at each timestep + Bfield_fp, // The field that is updated by the field solver at each timestep + Efield_fp_external, // Stores grid particle fields provided by the user as through an openPMD file + Bfield_fp_external, // Stores grid particle fields provided by the user as through an openPMD file + current_fp, // The current that is used as a source for the field solver + current_fp_nodal, // Only used when using nodal current deposition + current_fp_vay, // Only used when using Vay current deposition + current_buf, // Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. + current_store, // Only used when doing subcycling with mesh refinement, for book-keeping of currents + rho_buf, // Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. + rho_fp, // The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) + F_fp, // Used for divE cleaning + G_fp, // Used for divB cleaning + phi_fp, // Obtained by the Poisson solver, for labframe electrostatic + vector_potential_fp, // Obtained by the magnetostatic solver + vector_potential_fp_nodal, + vector_potential_grad_buf_e_stag, + vector_potential_grad_buf_b_stag, + hybrid_electron_pressure_fp, + hybrid_rho_fp_temp, + hybrid_current_fp_temp, + hybrid_current_fp_ampere, + hybrid_current_fp_external, + Efield_cp, // Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + Bfield_cp, // Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + current_cp, // Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level + rho_cp, // Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level + F_cp, // Only used with MR. Used for divE cleaning, on the coarse patch of each level + G_cp, // Only used with MR. Used for divB cleaning, on the coarse patch of each level + Efield_cax, // Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + Bfield_cax, // Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + E_external_particle_field, // Stores external particle fields provided by the user as through an openPMD file + B_external_particle_field, // Stores external particle fields provided by the user as through an openPMD file + distance_to_eb, // Only used with embedded boundaries (EB). Stores the distance to the nearest EB + edge_lengths, // Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units + face_areas, // Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units + area_mod, + pml_E_fp, + pml_B_fp, + pml_j_fp, + pml_F_fp, + pml_G_fp, + pml_E_cp, + pml_B_cp, + pml_j_cp, + pml_F_cp, + pml_G_cp, + pml_edge_lengths, + Efield_avg_fp, + Bfield_avg_fp, + Efield_avg_cp, + Bfield_avg_cp, + Bold, // Stores the value of B at the beginning of the timestep, for the implicit solver + ECTRhofield, + Venl + ); + + /** these are vector fields */ + constexpr FieldType ArrayFieldTypes[] = { + FieldType::Efield_aux, + FieldType::Bfield_aux, + FieldType::Efield_fp, + FieldType::Bfield_fp, + FieldType::current_fp, + FieldType::current_fp_nodal, + FieldType::current_fp_vay, + FieldType::current_buf, + FieldType::current_store, + FieldType::vector_potential_fp, + FieldType::vector_potential_fp_nodal, + FieldType::vector_potential_grad_buf_e_stag, + FieldType::vector_potential_grad_buf_b_stag, + FieldType::hybrid_current_fp_temp, + FieldType::hybrid_current_fp_ampere, + FieldType::hybrid_current_fp_external, + FieldType::Efield_cp, + FieldType::Bfield_cp, + FieldType::current_cp, + FieldType::Efield_cax, + FieldType::Bfield_cax, + FieldType::E_external_particle_field, + FieldType::B_external_particle_field, + FieldType::pml_E_fp, + FieldType::pml_B_fp, + FieldType::pml_j_fp, + FieldType::pml_E_cp, + FieldType::pml_B_cp, + FieldType::pml_j_cp, + FieldType::Efield_avg_fp, + FieldType::Bfield_avg_fp, + FieldType::Efield_avg_cp, + FieldType::Bfield_avg_cp, + FieldType::Bold, + FieldType::ECTRhofield, + FieldType::Venl + }; + + /** Returns true if a FieldType represents a vector field */ + inline bool + isFieldArray (const FieldType field_type) + { + return std::any_of( std::begin(ArrayFieldTypes), std::end(ArrayFieldTypes), + [field_type](const FieldType& f) { return f == field_type; }); + } + +} + +#endif //WARPX_FIELDS_H_ diff --git a/Source/Fluids/MultiFluidContainer.H b/Source/Fluids/MultiFluidContainer.H index 23f0c46590b..c2cdfc3e19f 100644 --- a/Source/Fluids/MultiFluidContainer.H +++ b/Source/Fluids/MultiFluidContainer.H @@ -10,6 +10,8 @@ #include "WarpXFluidContainer_fwd.H" +#include + #include #include @@ -34,7 +36,7 @@ class MultiFluidContainer public: - MultiFluidContainer (int nlevs_max); + MultiFluidContainer (); ~MultiFluidContainer() = default; @@ -52,25 +54,26 @@ public: } #endif - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm); + void AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int lev); - void InitData (int lev, amrex::Box init_box, amrex::Real cur_time); + void InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev); /// /// This evolves all the fluids by one PIC time step, including current deposition, the /// field solve, and pushing the fluids, for all the species in the MultiFluidContainer. /// - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab* rho, amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::Real cur_time, bool skip_deposition=false); + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real cur_time, + bool skip_deposition=false); [[nodiscard]] int nSpecies() const {return static_cast(species_names.size());} - void DepositCharge (int lev, amrex::MultiFab &rho); - void DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz); + void DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev); + void DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, + int lev); private: diff --git a/Source/Fluids/MultiFluidContainer.cpp b/Source/Fluids/MultiFluidContainer.cpp index 234cefb4f07..b160817d886 100644 --- a/Source/Fluids/MultiFluidContainer.cpp +++ b/Source/Fluids/MultiFluidContainer.cpp @@ -13,7 +13,7 @@ using namespace amrex; -MultiFluidContainer::MultiFluidContainer (int nlevs_max) +MultiFluidContainer::MultiFluidContainer () { const ParmParse pp_fluids("fluids"); pp_fluids.queryarr("species_names", species_names); @@ -22,52 +22,52 @@ MultiFluidContainer::MultiFluidContainer (int nlevs_max) allcontainers.resize(nspecies); for (int i = 0; i < nspecies; ++i) { - allcontainers[i] = std::make_unique(nlevs_max, i, species_names[i]); + allcontainers[i] = std::make_unique(i, species_names[i]); } } void -MultiFluidContainer::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm) +MultiFluidContainer::AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const BoxArray& ba, const DistributionMapping& dm, int lev) { for (auto& fl : allcontainers) { - fl->AllocateLevelMFs(lev, ba, dm); + fl->AllocateLevelMFs(m_fields, ba, dm, lev); } } void -MultiFluidContainer::InitData (int lev, amrex::Box init_box, amrex::Real cur_time) +MultiFluidContainer::InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev) { for (auto& fl : allcontainers) { - fl->InitData(lev, init_box, cur_time); + fl->InitData(m_fields, init_box, cur_time, lev); } } void -MultiFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho) +MultiFluidContainer::DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev) { for (auto& fl : allcontainers) { - fl->DepositCharge(lev,rho); + fl->DepositCharge(m_fields,rho,lev); } } void -MultiFluidContainer::DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz) +MultiFluidContainer::DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, int lev) { for (auto& fl : allcontainers) { - fl->DepositCurrent(lev,jx,jy,jz); + fl->DepositCurrent(m_fields,jx,jy,jz,lev); } } void -MultiFluidContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab* rho, MultiFab& jx, MultiFab& jy, MultiFab& jz, - amrex::Real cur_time, bool skip_deposition) +MultiFluidContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real cur_time, + bool skip_deposition) { for (auto& fl : allcontainers) { - fl->Evolve(lev, Ex, Ey, Ez, Bx, By, Bz, rho, jx, jy, jz, cur_time, skip_deposition); + fl->Evolve(fields, lev, current_fp_string, cur_time, skip_deposition); } } diff --git a/Source/Fluids/WarpXFluidContainer.H b/Source/Fluids/WarpXFluidContainer.H index 04ec4d9e80d..f3ea2d9a498 100644 --- a/Source/Fluids/WarpXFluidContainer.H +++ b/Source/Fluids/WarpXFluidContainer.H @@ -30,7 +30,7 @@ class WarpXFluidContainer public: friend MultiFluidContainer; - WarpXFluidContainer (int nlevs_max, int ispecies, const std::string& name); + WarpXFluidContainer (int ispecies, const std::string &name); ~WarpXFluidContainer() = default; WarpXFluidContainer (WarpXFluidContainer const &) = delete; @@ -38,20 +38,20 @@ public: WarpXFluidContainer(WarpXFluidContainer&& ) = default; WarpXFluidContainer& operator=(WarpXFluidContainer&& ) = default; - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm); + void AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int lev) const; - void InitData (int lev, amrex::Box init_box, amrex::Real cur_time); + void InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev); void ReadParameters (); /** * Evolve updates a single timestep (dt) of the cold relativistic fluid equations */ - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab* rho, amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::Real cur_time, bool skip_deposition=false); + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real cur_time, + bool skip_deposition=false); /** * AdvectivePush_Muscl takes a single timestep (dt) of the cold relativistic fluid equations @@ -61,7 +61,7 @@ public: * * \param[in] lev refinement level */ - void AdvectivePush_Muscl (int lev); + void AdvectivePush_Muscl (ablastr::fields::MultiFabRegister& m_fields, int lev); /** @@ -72,7 +72,7 @@ public: * * \param[in] lev refinement level */ - void ApplyBcFluidsAndComms (int lev); + void ApplyBcFluidsAndComms (ablastr::fields::MultiFabRegister& m_fields, int lev); #if defined(WARPX_DIM_RZ) /** @@ -83,7 +83,7 @@ public: * * \param[in] lev refinement level */ - void centrifugal_source_rz (int lev); + void centrifugal_source_rz (ablastr::fields::MultiFabRegister& m_fields, int lev); #endif /** @@ -101,10 +101,10 @@ public: * \param[in] Bz Yee magnetic field (z) * \param[in] t Current time */ - void GatherAndPush (int lev, + void GatherAndPush (ablastr::fields::MultiFabRegister& m_fields, const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::Real t); + amrex::Real t, int lev); /** * DepositCurrent interpolates the fluid current density comps. onto the Yee grid and @@ -117,8 +117,8 @@ public: * \param[in,out] jy current density MultiFab y comp. * \param[in,out] jz current density MultiFab z comp. */ - void DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz); + void DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, int lev); /** * DepositCharge interpolates the fluid charge density onto the Yee grid and @@ -129,7 +129,7 @@ public: * \param[in] lev refinement level * \param[in,out] rho charge density MultiFab. */ - void DepositCharge (int lev, amrex::MultiFab &rho, int icomp = 0); + void DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev, int icomp = 0); [[nodiscard]] amrex::Real getCharge () const {return charge;} [[nodiscard]] amrex::Real getMass () const {return mass;} @@ -185,9 +185,9 @@ protected: public: - // MultiFabs that contain the density (N) and momentum density (NU) of this fluid species, for each refinement level - amrex::Vector< std::unique_ptr > N; - amrex::Vector, 3 > > NU; + // Names of Multifabs that will be added to the mfs register + std::string name_mf_N = "fluid_density_"+species_name; + std::string name_mf_NU = "fluid_momentum_density_"+species_name; }; diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 99a1212ac90..326ce30c844 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -4,22 +4,25 @@ * * License: BSD-3-Clause-LBNL */ -#include "ablastr/coarsen/sample.H" +#include "Fields.H" #include "Particles/Pusher/UpdateMomentumHigueraCary.H" #include "Utils/WarpXProfilerWrapper.H" #include "MusclHancockUtils.H" #include "Fluids/WarpXFluidContainer.H" -#include "WarpX.H" -#include #include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXUtil.H" #include "Utils/SpeciesUtils.H" +#include "WarpX.H" + +#include +#include using namespace ablastr::utils::communication; using namespace amrex; -WarpXFluidContainer::WarpXFluidContainer(int nlevs_max, int ispecies, const std::string &name): + +WarpXFluidContainer::WarpXFluidContainer(int ispecies, const std::string &name): species_id{ispecies}, species_name{name} { @@ -50,9 +53,6 @@ WarpXFluidContainer::WarpXFluidContainer(int nlevs_max, int ispecies, const std: } amrex::Gpu::synchronize(); - // Resize the list of MultiFabs for the right number of levels - N.resize(nlevs_max); - NU.resize(nlevs_max); } void WarpXFluidContainer::ReadParameters() @@ -139,31 +139,33 @@ void WarpXFluidContainer::ReadParameters() } } -void WarpXFluidContainer::AllocateLevelMFs(int lev, const BoxArray &ba, const DistributionMapping &dm) +void WarpXFluidContainer::AllocateLevelMFs(ablastr::fields::MultiFabRegister& fields, const BoxArray &ba, const DistributionMapping &dm, int lev) const { + using ablastr::fields::Direction; const int ncomps = 1; const amrex::IntVect nguards(AMREX_D_DECL(2, 2, 2)); - // set human-readable tag for each MultiFab - auto const tag = [lev](std::string tagname) - { - tagname.append("[l=").append(std::to_string(lev)).append("]"); - return tagname; - }; - - WarpX::AllocInitMultiFab(N[lev], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid density"), 0.0_rt); - - WarpX::AllocInitMultiFab(NU[lev][0], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [x]"), 0.0_rt); - WarpX::AllocInitMultiFab(NU[lev][1], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [y]"), 0.0_rt); - WarpX::AllocInitMultiFab(NU[lev][2], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [z]"), 0.0_rt); + fields.alloc_init( + name_mf_N, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{0}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{1}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{2}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + } -void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur_time) +void WarpXFluidContainer::InitData(ablastr::fields::MultiFabRegister& fields, amrex::Box init_box, amrex::Real cur_time, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::InitData"); // Convert initialization box to nodal box @@ -186,14 +188,14 @@ void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Box const tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); // Return the intersection of all cells and the ones we wish to update amrex::Box const init_box_intersection = init_box & tile_box; @@ -253,54 +255,68 @@ void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur void WarpXFluidContainer::Evolve( + ablastr::fields::MultiFabRegister& fields, int lev, - const amrex::MultiFab &Ex, const amrex::MultiFab &Ey, const amrex::MultiFab &Ez, - const amrex::MultiFab &Bx, const amrex::MultiFab &By, const amrex::MultiFab &Bz, - amrex::MultiFab* rho, amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz, - amrex::Real cur_time, bool skip_deposition) + const std::string& current_fp_string, + amrex::Real cur_time, + bool skip_deposition) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; WARPX_PROFILE("WarpXFluidContainer::Evolve"); - if (rho && ! skip_deposition && ! do_not_deposit) { + if (fields.has(FieldType::rho_fp,lev) && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. - DepositCharge(lev, *rho, 0); + DepositCharge(fields, *fields.get(FieldType::rho_fp,lev), lev, 0); } // Step the Lorentz Term if(!do_not_gather){ - GatherAndPush(lev, Ex, Ey, Ez, Bx, By, Bz, cur_time); + GatherAndPush(fields, + *fields.get(FieldType::Efield_aux, Direction{0}, lev), + *fields.get(FieldType::Efield_aux, Direction{1}, lev), + *fields.get(FieldType::Efield_aux, Direction{2}, lev), + *fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *fields.get(FieldType::Bfield_aux, Direction{2}, lev), + cur_time, lev); } // Cylindrical centrifugal term if(!do_not_push){ #if defined(WARPX_DIM_RZ) - centrifugal_source_rz(lev); + centrifugal_source_rz(fields, lev); #endif // Apply (non-periodic) BC on the fluids (needed for spatial derivative), // and communicate N, NU at boundaries - ApplyBcFluidsAndComms(lev); + ApplyBcFluidsAndComms(fields, lev); // Step the Advective term - AdvectivePush_Muscl(lev); + AdvectivePush_Muscl(fields, lev); } // Deposit rho to the simulation mesh // Deposit charge (end of the step) - if (rho && ! skip_deposition && ! do_not_deposit) { - DepositCharge(lev, *rho, 1); + if (fields.has(FieldType::rho_fp,lev) && ! skip_deposition && ! do_not_deposit) { + DepositCharge(fields, *fields.get(FieldType::rho_fp,lev), lev, 1); } // Deposit J to the simulation mesh if (!skip_deposition && ! do_not_deposit) { - DepositCurrent(lev, jx, jy, jz); + DepositCurrent(fields, + *fields.get(current_fp_string, Direction{0}, lev), + *fields.get(current_fp_string, Direction{1}, lev), + *fields.get(current_fp_string, Direction{2}, lev), + lev); } } // Momentum source due to curvature -void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) +void WarpXFluidContainer::ApplyBcFluidsAndComms (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::ApplyBcFluidsAndComms"); WarpX &warpx = WarpX::GetInstance(); @@ -315,15 +331,15 @@ void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - const amrex::Array4 N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + const amrex::Array4 N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); //Grow the tilebox tile_box.grow(1); @@ -395,15 +411,16 @@ void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) } // Fill guard cells - FillBoundary(*N[lev], N[lev]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][0], NU[lev][0]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][1], NU[lev][1]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][2], NU[lev][2]->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_N, lev), fields.get(name_mf_N, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{0}, lev), fields.get(name_mf_NU, Direction{0}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{1}, lev), fields.get(name_mf_NU, Direction{1}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{2}, lev), fields.get(name_mf_NU, Direction{2}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); } // Muscl Advection Update -void WarpXFluidContainer::AdvectivePush_Muscl (int lev) +void WarpXFluidContainer::AdvectivePush_Muscl (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::AdvectivePush_Muscl"); // Grab the grid spacing @@ -434,31 +451,31 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) const amrex::Real dt_over_dz_half = 0.5_rt*(dt/dx[0]); #endif - const amrex::BoxArray ba = N[lev]->boxArray(); + const amrex::BoxArray ba = fields.get(name_mf_N, lev)->boxArray(); // Temporary Half-step values #if defined(WARPX_DIM_3D) - amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_y( amrex::convert(ba, IntVect(1,0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_y( amrex::convert(ba, IntVect(1,0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,1,0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,1,0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_y( amrex::convert(ba, IntVect(1,0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_y( amrex::convert(ba, IntVect(1,0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #else - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #endif // Fill edge values of N and U at the half timestep for MUSCL #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { // Loop over a box with one extra gridpoint in the ghost region to avoid @@ -476,10 +493,10 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) return tt; }(); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); // Boxes are computed to avoid going out of bounds. // Grow the entire domain @@ -741,13 +758,13 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const amrex::Box tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - const amrex::Array4 N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + const amrex::Box tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + const amrex::Array4 N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); #if defined(WARPX_DIM_3D) amrex::Array4 const &U_minus_x = tmp_U_minus_x.array(mfi); @@ -878,8 +895,9 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) // Momentum source due to curvature #if defined(WARPX_DIM_RZ) -void WarpXFluidContainer::centrifugal_source_rz (int lev) +void WarpXFluidContainer::centrifugal_source_rz (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::centrifugal_source_rz"); WarpX &warpx = WarpX::GetInstance(); @@ -894,15 +912,15 @@ void WarpXFluidContainer::centrifugal_source_rz (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); amrex::ParallelFor(tile_box, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept @@ -947,11 +965,13 @@ void WarpXFluidContainer::centrifugal_source_rz (int lev) // Momentum source from fields void WarpXFluidContainer::GatherAndPush ( - int lev, + ablastr::fields::MultiFabRegister& fields, const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - Real t) + Real t, + int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::GatherAndPush"); WarpX &warpx = WarpX::GetInstance(); @@ -978,7 +998,7 @@ void WarpXFluidContainer::GatherAndPush ( auto Bz_type = amrex::GpuArray{0, 0, 0}; for (int i = 0; i < AMREX_SPACEDIM; ++i) { - Nodal_type[i] = N[lev]->ixType()[i]; + Nodal_type[i] = fields.get(name_mf_N, lev)->ixType()[i]; Ex_type[i] = Ex.ixType()[i]; Ey_type[i] = Ey.ixType()[i]; Ez_type[i] = Ez.ixType()[i]; @@ -1015,15 +1035,15 @@ void WarpXFluidContainer::GatherAndPush ( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); amrex::Array4 const& Ex_arr = Ex.array(mfi); amrex::Array4 const& Ey_arr = Ey.array(mfi); @@ -1218,7 +1238,7 @@ void WarpXFluidContainer::GatherAndPush ( } } -void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icomp) +void WarpXFluidContainer::DepositCharge (ablastr::fields::MultiFabRegister& fields, amrex::MultiFab &rho, int lev, int icomp) { WARPX_PROFILE("WarpXFluidContainer::DepositCharge"); @@ -1235,11 +1255,11 @@ void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icom #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); const amrex::Array4 rho_arr = rho.array(mfi); const amrex::Array4 owner_mask_rho_arr = owner_mask_rho->array(mfi); @@ -1255,15 +1275,17 @@ void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icom void WarpXFluidContainer::DepositCurrent( - int lev, - amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz) + ablastr::fields::MultiFabRegister& fields, + amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz, + int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::DepositCurrent"); // Temporary nodal currents - amrex::MultiFab tmp_jx_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); - amrex::MultiFab tmp_jy_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); - amrex::MultiFab tmp_jz_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jx_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jy_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jz_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); const amrex::Real inv_clight_sq = 1.0_prt / PhysConst::c / PhysConst::c; const amrex::Real q = getCharge(); @@ -1293,14 +1315,14 @@ void WarpXFluidContainer::DepositCurrent( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); const amrex::Array4 tmp_jx_fluid_arr = tmp_jx_fluid.array(mfi); const amrex::Array4 tmp_jy_fluid_arr = tmp_jy_fluid.array(mfi); @@ -1328,7 +1350,7 @@ void WarpXFluidContainer::DepositCurrent( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { amrex::Box const &tile_box_x = mfi.tilebox(jx.ixType().toIntVect()); amrex::Box const &tile_box_y = mfi.tilebox(jy.ixType().toIntVect()); diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.H b/Source/Initialization/DivCleaner/ProjectionDivCleaner.H index 7ee1fe57048..2fedb83cd36 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.H +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.H @@ -35,7 +35,7 @@ #include #include -#include +#include "Fields.H" #include "Utils/Parser/ParserUtils.H" namespace warpx::initialization { @@ -64,7 +64,7 @@ protected: amrex::Real m_rtol; amrex::Real m_atol; - warpx::fields::FieldType m_field_type; + std::string m_field_name; public: amrex::Vector< std::unique_ptr > m_solution; @@ -83,7 +83,7 @@ public: amrex::Gpu::DeviceVector m_stencil_coefs_z; // Default Constructor - ProjectionDivCleaner (warpx::fields::FieldType a_field_type); + ProjectionDivCleaner (std::string const& a_field_name); void ReadParameters (); diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp index 40326aadc3c..670f962f7c3 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp @@ -19,7 +19,7 @@ #else #include #endif -#include +#include "Fields.H" #include #include #include @@ -30,9 +30,10 @@ using namespace amrex; namespace warpx::initialization { -ProjectionDivCleaner::ProjectionDivCleaner(warpx::fields::FieldType a_field_type) : - m_field_type(a_field_type) +ProjectionDivCleaner::ProjectionDivCleaner(std::string const& a_field_name) : + m_field_name(a_field_name) { + using ablastr::fields::Direction; ReadParameters(); auto& warpx = WarpX::GetInstance(); @@ -48,7 +49,7 @@ ProjectionDivCleaner::ProjectionDivCleaner(warpx::fields::FieldType a_field_type m_source.resize(m_levels); const int ncomps = WarpX::ncomps; - auto const& ng = warpx.getFieldPointer(m_field_type, 0, 0)->nGrowVect(); + auto const& ng = warpx.m_fields.get(m_field_name, Direction{0}, 0)->nGrowVect(); for (int lev = 0; lev < m_levels; ++lev) { @@ -201,6 +202,8 @@ ProjectionDivCleaner::solve () void ProjectionDivCleaner::setSourceFromBfield () { + using ablastr::fields::Direction; + // Get WarpX object auto & warpx = WarpX::GetInstance(); const auto& geom = warpx.Geom(); @@ -211,7 +214,9 @@ ProjectionDivCleaner::setSourceFromBfield () WarpX::ComputeDivB( *m_source[ilev], 0, - warpx.getFieldPointerArray(m_field_type, ilev), + {warpx.m_fields.get(m_field_name, Direction{0}, ilev), + warpx.m_fields.get(m_field_name, Direction{1}, ilev), + warpx.m_fields.get(m_field_name, Direction{2}, ilev)}, WarpX::CellSize(0) ); @@ -228,6 +233,8 @@ ProjectionDivCleaner::setSourceFromBfield () void ProjectionDivCleaner::correctBfield () { + using ablastr::fields::Direction; + // Get WarpX object auto & warpx = WarpX::GetInstance(); const auto& geom = warpx.Geom(); @@ -236,9 +243,9 @@ ProjectionDivCleaner::correctBfield () for (int ilev = 0; ilev < m_levels; ++ilev) { // Grab B-field multifabs at this level - amrex::MultiFab* Bx = warpx.getFieldPointer(m_field_type, ilev, 0); - amrex::MultiFab* By = warpx.getFieldPointer(m_field_type, ilev, 1); - amrex::MultiFab* Bz = warpx.getFieldPointer(m_field_type, ilev, 2); + amrex::MultiFab* Bx = warpx.m_fields.get(m_field_name, Direction{0}, ilev); + amrex::MultiFab* By = warpx.m_fields.get(m_field_name, Direction{1}, ilev); + amrex::MultiFab* Bz = warpx.m_fields.get(m_field_name, Direction{2}, ilev); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -337,8 +344,8 @@ WarpX::ProjectionCleanDivB() { ablastr::warn_manager::WarnPriority::low); } - warpx::initialization::ProjectionDivCleaner dc( - warpx::fields::FieldType::Bfield_fp_external); + warpx::initialization::ProjectionDivCleaner dc("Bfield_fp_external"); + dc.setSourceFromBfield(); dc.solve(); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 0cf9496e63e..70bf20d0905 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -17,7 +17,7 @@ #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -36,6 +36,7 @@ #include "Utils/WarpXUtil.H" #include "Python/callbacks.H" +#include #include #include #include @@ -93,8 +94,15 @@ namespace * \brief Check that the number of guard cells is smaller than the number of valid cells, * for a given MultiFab, and abort otherwise. */ - void CheckGuardCells(amrex::MultiFab const& mf) + void CheckGuardCells ( + ablastr::fields::MultiFabRegister& fields, + const std::string& mf_name, + int lev + ) { + if (!fields.has(mf_name, lev)) { return; } + auto & mf = *fields.get(mf_name, lev); + for (amrex::MFIter mfi(mf); mfi.isValid(); ++mfi) { const amrex::IntVect vc = mfi.validbox().enclosedCells().size(); @@ -495,6 +503,10 @@ void WarpX::InitData () { WARPX_PROFILE("WarpX::InitData()"); + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + ablastr::parallelization::check_mpi_thread_level(); #ifdef WARPX_QED @@ -546,9 +558,9 @@ WarpX::InitData () const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,0).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,1).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,2).ixType().toIntVect() + m_fields.get(FieldType::Efield_fp, Direction{0}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev_zero)->ixType().toIntVect() ); } @@ -620,30 +632,36 @@ WarpX::InitData () } void -WarpX::AddExternalFields (int const lev) { +WarpX::AddExternalFields (int const lev) +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // FIXME: RZ multimode has more than one component for all these if (m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::default_zero) { + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::constant) { Efield_fp[lev][0]->plus(m_p_ext_field_params->E_external_grid[0], guard_cells.ng_alloc_EB.min()); Efield_fp[lev][1]->plus(m_p_ext_field_params->E_external_grid[1], guard_cells.ng_alloc_EB.min()); Efield_fp[lev][2]->plus(m_p_ext_field_params->E_external_grid[2], guard_cells.ng_alloc_EB.min()); } else { - amrex::MultiFab::Add(*Efield_fp[lev][0], *Efield_fp_external[lev][0], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Efield_fp[lev][1], *Efield_fp_external[lev][1], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Efield_fp[lev][2], *Efield_fp_external[lev][2], 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][0], *m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][1], *m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][2], *m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); } } if (m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::default_zero) { + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::constant) { Bfield_fp[lev][0]->plus(m_p_ext_field_params->B_external_grid[0], guard_cells.ng_alloc_EB.min()); Bfield_fp[lev][1]->plus(m_p_ext_field_params->B_external_grid[1], guard_cells.ng_alloc_EB.min()); Bfield_fp[lev][2]->plus(m_p_ext_field_params->B_external_grid[2], guard_cells.ng_alloc_EB.min()); } else { - amrex::MultiFab::Add(*Bfield_fp[lev][0], *Bfield_fp_external[lev][0], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Bfield_fp[lev][1], *Bfield_fp_external[lev][1], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Bfield_fp[lev][2], *Bfield_fp_external[lev][2], 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][0], *m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][1], *m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][2], *m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); } } } @@ -703,7 +721,7 @@ WarpX::InitPML () do_pml_Hi[0][idim] = 1; // on level 0 } } - if (finest_level > 0) { do_pml = 1; } + if (max_level > 0) { do_pml = 1; } if (do_pml) { bool const eb_enabled = EB::enabled(); @@ -728,7 +746,7 @@ WarpX::InitPML () do_pml_Lo[0], do_pml_Hi[0]); #endif - for (int lev = 1; lev <= finest_level; ++lev) + for (int lev = 1; lev <= max_level; ++lev) { do_pml_Lo[lev] = amrex::IntVect::TheUnitVector(); do_pml_Hi[lev] = amrex::IntVect::TheUnitVector(); @@ -775,7 +793,7 @@ WarpX::ComputePMLFactors () { if (do_pml) { - for (int lev = 0; lev <= finest_level; ++lev) + for (int lev = 0; lev <= max_level; ++lev) { if (pml[lev]) { pml[lev]->ComputePMLFactors(dt[lev]); @@ -892,6 +910,9 @@ WarpX::PostRestart () void WarpX::InitLevelData (int lev, Real /*time*/) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // initialize the averaged fields only if the averaged algorithm // is activated ('psatd.do_time_averaging=1') const ParmParse pp_psatd("psatd"); @@ -907,14 +928,14 @@ WarpX::InitLevelData (int lev, Real /*time*/) if ( is_B_ext_const && (lev <= maxlevel_extEMfield_init) ) { if (fft_do_time_averaging) { - Bfield_avg_fp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_avg_fp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); } if (lev > 0) { - Bfield_aux[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); - Bfield_cp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_aux, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); if (fft_do_time_averaging) { - Bfield_avg_cp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_avg_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); } } } @@ -927,14 +948,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) if ( is_E_ext_const && (lev <= maxlevel_extEMfield_init) ) { if (fft_do_time_averaging) { - Efield_avg_fp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_avg_fp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); } - if (lev > 0) { - Efield_aux[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); - Efield_cp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_aux, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); if (fft_do_time_averaging) { - Efield_avg_cp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_avg_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); } } } @@ -954,26 +974,26 @@ WarpX::InitLevelData (int lev, Real /*time*/) && (lev > 0) && (lev <= maxlevel_extEMfield_init)) { InitializeExternalFieldsOnGridUsingParser( - Bfield_aux[lev][0].get(), - Bfield_aux[lev][1].get(), - Bfield_aux[lev][2].get(), + m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'B', lev, PatchType::fine); InitializeExternalFieldsOnGridUsingParser( - Bfield_cp[lev][0].get(), - Bfield_cp[lev][1].get(), - Bfield_cp[lev][2].get(), + m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], 'B', lev, PatchType::coarse); } @@ -991,43 +1011,49 @@ WarpX::InitLevelData (int lev, Real /*time*/) // We initialize ECTRhofield consistently with the Efield if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_fdtd_solver_fp[lev]->EvolveECTRho( - Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif if (lev > 0) { InitializeExternalFieldsOnGridUsingParser( - Efield_aux[lev][0].get(), - Efield_aux[lev][1].get(), - Efield_aux[lev][2].get(), + m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + m_fields.get(FieldType::Efield_aux, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::fine); InitializeExternalFieldsOnGridUsingParser( - Efield_cp[lev][0].get(), - Efield_cp[lev][1].get(), - Efield_cp[lev][2].get(), + m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::coarse); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { // We initialize ECTRhofield consistently with the Efield - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); - + m_fdtd_solver_cp[lev]->EvolveECTRho( + m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif @@ -1051,8 +1077,8 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( MultiFab *mfx, MultiFab *mfy, MultiFab *mfz, ParserExecutor<3> const& xfield_parser, ParserExecutor<3> const& yfield_parser, ParserExecutor<3> const& zfield_parser, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, [[maybe_unused]] const char field, const int lev, PatchType patch_type) { @@ -1209,66 +1235,44 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( void WarpX::CheckGuardCells() { - for (int lev = 0; lev <= finest_level; ++lev) + for (int lev = 0; lev <= max_level; ++lev) { for (int dim = 0; dim < 3; ++dim) { - ::CheckGuardCells(*Efield_fp[lev][dim]); - ::CheckGuardCells(*Bfield_fp[lev][dim]); - ::CheckGuardCells(*current_fp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "current_fp[" + std::to_string(dim) + "]", lev); if (WarpX::fft_do_time_averaging) { - ::CheckGuardCells(*Efield_avg_fp[lev][dim]); - ::CheckGuardCells(*Bfield_avg_fp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_avg_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_avg_fp[" + std::to_string(dim) + "]", lev); } } - if (rho_fp[lev]) - { - ::CheckGuardCells(*rho_fp[lev]); - } - - if (F_fp[lev]) - { - ::CheckGuardCells(*F_fp[lev]); - } - - if (G_fp[lev]) - { - ::CheckGuardCells(*G_fp[lev]); - } + ::CheckGuardCells(m_fields, "rho_fp", lev); + ::CheckGuardCells(m_fields, "F_fp", lev); + ::CheckGuardCells(m_fields, "G_fp", lev); // MultiFabs on coarse patch if (lev > 0) { for (int dim = 0; dim < 3; ++dim) { - ::CheckGuardCells(*Efield_cp[lev][dim]); - ::CheckGuardCells(*Bfield_cp[lev][dim]); - ::CheckGuardCells(*current_cp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "current_cp[" + std::to_string(dim) + "]", lev); if (WarpX::fft_do_time_averaging) { - ::CheckGuardCells(*Efield_avg_cp[lev][dim]); - ::CheckGuardCells(*Bfield_avg_cp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_avg_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_avg_cp[" + std::to_string(dim) + "]", lev); } } - if (rho_cp[lev]) - { - ::CheckGuardCells(*rho_cp[lev]); - } - - if (F_cp[lev]) - { - ::CheckGuardCells(*F_cp[lev]); - } - - if (G_cp[lev]) - { - ::CheckGuardCells(*G_cp[lev]); - } + ::CheckGuardCells(m_fields, "rho_cp", lev); + ::CheckGuardCells(m_fields, "F_cp", lev); + ::CheckGuardCells(m_fields, "G_cp", lev); } } } @@ -1286,14 +1290,19 @@ void WarpX::InitializeEBGridData (int lev) "particles are close to embedded boundaries"); } - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) + { + using warpx::fields::FieldType; auto const eb_fact = fieldEBFactory(lev); - ComputeEdgeLengths(m_edge_lengths[lev], eb_fact); - ScaleEdges(m_edge_lengths[lev], CellSize(lev)); - ComputeFaceAreas(m_face_areas[lev], eb_fact); - ScaleAreas(m_face_areas[lev], CellSize(lev)); + auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); + ComputeEdgeLengths(edge_lengths_lev, eb_fact); + ScaleEdges(edge_lengths_lev, CellSize(lev)); + + auto face_areas_lev = m_fields.get_alldirs(FieldType::face_areas, lev); + ComputeFaceAreas(face_areas_lev, eb_fact); + ScaleAreas(face_areas_lev, CellSize(lev)); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { MarkCells(); @@ -1358,6 +1367,9 @@ void WarpX::CheckKnownIssues() void WarpX::LoadExternalFields (int const lev) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // External fields from file are currently not compatible with the moving window // In order to support the moving window, the MultiFab containing the external // fields should be updated every time the window moves. @@ -1375,14 +1387,14 @@ WarpX::LoadExternalFields (int const lev) if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Bfield_fp_external with external function InitializeExternalFieldsOnGridUsingParser( - Bfield_fp_external[lev][0].get(), - Bfield_fp_external[lev][1].get(), - Bfield_fp_external[lev][2].get(), + m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), + m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'B', lev, PatchType::fine); } @@ -1390,27 +1402,27 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][0].get(), "B", "r"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][1].get(), "B", "t"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{0},lev), "B", "r"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{1},lev), "B", "t"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{2},lev), "B", "z"); #else - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][0].get(), "B", "x"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][1].get(), "B", "y"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), "B", "x"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), "B", "y"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), "B", "z"); #endif } if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Efield_fp_external with external function InitializeExternalFieldsOnGridUsingParser( - Efield_fp_external[lev][0].get(), - Efield_fp_external[lev][1].get(), - Efield_fp_external[lev][2].get(), + m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::fine); } @@ -1418,13 +1430,13 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][0].get(), "E", "r"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][1].get(), "E", "t"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{0},lev), "E", "r"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{1},lev), "E", "t"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{2},lev), "E", "z"); #else - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][0].get(), "E", "x"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][1].get(), "E", "y"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), "E", "x"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), "E", "y"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), "E", "z"); #endif } @@ -1441,13 +1453,25 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][0].get(), "B", "r"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][1].get(), "B", "t"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{0}, lev), + "B", "r"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{1}, lev), + "B", "t"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{2}, lev), + "B", "z"); #else - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][0].get(), "B", "x"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][1].get(), "B", "y"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{0}, lev), + "B", "x"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{1}, lev), + "B", "y"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{2}, lev), + "B", "z"); #endif } if (mypc->m_E_ext_particle_s == "read_from_file") { @@ -1457,13 +1481,25 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][0].get(), "E", "r"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][1].get(), "E", "t"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{0}, lev), + "E", "r"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{1}, lev), + "E", "t"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{2}, lev), + "E", "z"); #else - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][0].get(), "E", "x"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][1].get(), "E", "y"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{0}, lev), + "E", "x"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{1}, lev), + "E", "y"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{2}, lev), + "E", "z"); #endif } } diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 6c44df061fd..ac797d1e706 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -12,6 +12,7 @@ #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) # include "BoundaryConditions/PML_RZ.H" #endif +#include "Fields.H" #include "Filter/BilinearFilter.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -20,6 +21,7 @@ #include "WarpXSumGuardCells.H" #include "Particles/MultiParticleContainer.H" +#include #include #include @@ -49,13 +51,20 @@ #include using namespace amrex; +using warpx::fields::FieldType; void WarpX::UpdateAuxilaryData () { WARPX_PROFILE("WarpX::UpdateAuxilaryData()"); - if (Bfield_aux[0][0]->ixType() == Bfield_fp[0][0]->ixType()) { + using ablastr::fields::Direction; + + amrex::MultiFab *Bfield_aux_lvl0_0 = m_fields.get(FieldType::Bfield_aux, Direction{0}, 0); + + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + + if (Bfield_aux_lvl0_0->ixType() == Bfield_fp[0][0]->ixType()) { UpdateAuxilaryDataSameType(); } else { UpdateAuxilaryDataStagToNodal(); @@ -64,14 +73,18 @@ WarpX::UpdateAuxilaryData () // When loading particle fields from file: add the external fields: for (int lev = 0; lev <= finest_level; ++lev) { if (mypc->m_E_ext_particle_s == "read_from_file") { - amrex::MultiFab::Add(*Efield_aux[lev][0], *E_external_particle_field[lev][0], 0, 0, E_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Efield_aux[lev][1], *E_external_particle_field[lev][1], 0, 0, E_external_particle_field[lev][1]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Efield_aux[lev][2], *E_external_particle_field[lev][2], 0, 0, E_external_particle_field[lev][2]->nComp(), guard_cells.ng_FieldGather); + ablastr::fields::VectorField Efield_aux = m_fields.get_alldirs(FieldType::Efield_aux, lev); + const auto& E_ext_lev = m_fields.get_alldirs(FieldType::E_external_particle_field, lev); + amrex::MultiFab::Add(*Efield_aux[0], *E_ext_lev[0], 0, 0, E_ext_lev[0]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Efield_aux[1], *E_ext_lev[1], 0, 0, E_ext_lev[1]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Efield_aux[2], *E_ext_lev[2], 0, 0, E_ext_lev[2]->nComp(), guard_cells.ng_FieldGather); } if (mypc->m_B_ext_particle_s == "read_from_file") { - amrex::MultiFab::Add(*Bfield_aux[lev][0], *B_external_particle_field[lev][0], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Bfield_aux[lev][1], *B_external_particle_field[lev][1], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Bfield_aux[lev][2], *B_external_particle_field[lev][2], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); + ablastr::fields::VectorField Bfield_aux = m_fields.get_alldirs(FieldType::Bfield_aux, lev); + const auto& B_ext_lev = m_fields.get_alldirs(FieldType::B_external_particle_field, lev); + amrex::MultiFab::Add(*Bfield_aux[0], *B_ext_lev[0], 0, 0, B_ext_lev[0]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Bfield_aux[1], *B_ext_lev[1], 0, 0, B_ext_lev[1]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Bfield_aux[2], *B_ext_lev[2], 0, 0, B_ext_lev[2]->nComp(), guard_cells.ng_FieldGather); } } @@ -87,11 +100,21 @@ WarpX::UpdateAuxilaryDataStagToNodal () "WarpX build with spectral solver support."); } #endif - - amrex::Vector,3>> const & Bmf = WarpX::fft_do_time_averaging ? - Bfield_avg_fp : Bfield_fp; - amrex::Vector,3>> const & Emf = WarpX::fft_do_time_averaging ? - Efield_avg_fp : Efield_fp; + using ablastr::fields::Direction; + + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + ablastr::fields::MultiLevelVectorField const& Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + ablastr::fields::MultiLevelVectorField const& Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField const& Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + + ablastr::fields::MultiLevelVectorField const & Bmf = + WarpX::fft_do_time_averaging ? + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level) : + Bfield_fp; + ablastr::fields::MultiLevelVectorField const & Emf = + WarpX::fft_do_time_averaging ? + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level) : + Efield_fp; const amrex::IntVect& Bx_stag = Bmf[0][0]->ixType().toIntVect(); const amrex::IntVect& By_stag = Bmf[0][1]->ixType().toIntVect(); @@ -173,10 +196,10 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Btmp; - if (Bfield_cax[lev][0]) { + if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) { for (int i = 0; i < 3; ++i) { Btmp[i] = std::make_unique( - *Bfield_cax[lev][i], amrex::make_alias, 0, 1); + *m_fields.get(FieldType::Bfield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); } } else { const IntVect ngtmp = Bfield_aux[lev-1][0]->nGrowVect(); @@ -200,13 +223,13 @@ WarpX::UpdateAuxilaryDataStagToNodal () const amrex::IntVect& refinement_ratio = refRatio(lev-1); - const amrex::IntVect& Bx_fp_stag = Bfield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& By_fp_stag = Bfield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Bz_fp_stag = Bfield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Bx_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& By_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Bz_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->ixType().toIntVect(); - const amrex::IntVect& Bx_cp_stag = Bfield_cp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& By_cp_stag = Bfield_cp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Bz_cp_stag = Bfield_cp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Bx_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& By_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Bz_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -216,12 +239,12 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& bx_aux = Bfield_aux[lev][0]->array(mfi); Array4 const& by_aux = Bfield_aux[lev][1]->array(mfi); Array4 const& bz_aux = Bfield_aux[lev][2]->array(mfi); - Array4 const& bx_fp = Bfield_fp[lev][0]->const_array(mfi); - Array4 const& by_fp = Bfield_fp[lev][1]->const_array(mfi); - Array4 const& bz_fp = Bfield_fp[lev][2]->const_array(mfi); - Array4 const& bx_cp = Bfield_cp[lev][0]->const_array(mfi); - Array4 const& by_cp = Bfield_cp[lev][1]->const_array(mfi); - Array4 const& bz_cp = Bfield_cp[lev][2]->const_array(mfi); + Array4 const& bx_fp = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& by_fp = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& bz_fp = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->const_array(mfi); + Array4 const& bx_cp = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->const_array(mfi); + Array4 const& by_cp = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->const_array(mfi); + Array4 const& bz_cp = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->const_array(mfi); Array4 const& bx_c = Btmp[0]->const_array(mfi); Array4 const& by_c = Btmp[1]->const_array(mfi); Array4 const& bz_c = Btmp[2]->const_array(mfi); @@ -267,10 +290,10 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Etmp; - if (Efield_cax[lev][0]) { + if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) { for (int i = 0; i < 3; ++i) { Etmp[i] = std::make_unique( - *Efield_cax[lev][i], amrex::make_alias, 0, 1); + *m_fields.get(FieldType::Efield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); } } else { const IntVect ngtmp = Efield_aux[lev-1][0]->nGrowVect(); @@ -295,13 +318,13 @@ WarpX::UpdateAuxilaryDataStagToNodal () const amrex::IntVect& refinement_ratio = refRatio(lev-1); - const amrex::IntVect& Ex_fp_stag = Efield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_fp_stag = Efield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_fp_stag = Efield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType().toIntVect(); - const amrex::IntVect& Ex_cp_stag = Efield_cp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_cp_stag = Efield_cp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_cp_stag = Efield_cp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -311,12 +334,12 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); - Array4 const& ex_cp = Efield_cp[lev][0]->const_array(mfi); - Array4 const& ey_cp = Efield_cp[lev][1]->const_array(mfi); - Array4 const& ez_cp = Efield_cp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); + Array4 const& ex_cp = m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_cp = m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_cp = m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->const_array(mfi); Array4 const& ex_c = Etmp[0]->const_array(mfi); Array4 const& ey_c = Etmp[1]->const_array(mfi); Array4 const& ez_c = Etmp[2]->const_array(mfi); @@ -332,9 +355,9 @@ WarpX::UpdateAuxilaryDataStagToNodal () } } else { // electrostatic - const amrex::IntVect& Ex_fp_stag = Efield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_fp_stag = Efield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_fp_stag = Efield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -343,9 +366,9 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); const Box& bx = mfi.growntilebox(); amrex::ParallelFor(bx, @@ -367,17 +390,23 @@ WarpX::UpdateAuxilaryDataSameType () // Update aux field, including guard cells, up to ng_FieldGather const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + using ablastr::fields::Direction; + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + ablastr::fields::MultiLevelVectorField Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + // Level 0: Copy from fine to aux // Note: in some configurations, Efield_aux/Bfield_aux and Efield_fp/Bfield_fp are simply aliases to the // same MultiFab object. MultiFab::Copy operation automatically detects this and does nothing in this case. if (WarpX::fft_do_time_averaging) { - MultiFab::Copy(*Efield_aux[0][0], *Efield_avg_fp[0][0], 0, 0, Efield_aux[0][0]->nComp(), ng_src); - MultiFab::Copy(*Efield_aux[0][1], *Efield_avg_fp[0][1], 0, 0, Efield_aux[0][1]->nComp(), ng_src); - MultiFab::Copy(*Efield_aux[0][2], *Efield_avg_fp[0][2], 0, 0, Efield_aux[0][2]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][0], *Bfield_avg_fp[0][0], 0, 0, Bfield_aux[0][0]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][1], *Bfield_avg_fp[0][1], 0, 0, Bfield_aux[0][1]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][2], *Bfield_avg_fp[0][2], 0, 0, Bfield_aux[0][2]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][0], *m_fields.get(FieldType::Efield_avg_fp, Direction{0}, 0), 0, 0, Efield_aux[0][0]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][1], *m_fields.get(FieldType::Efield_avg_fp, Direction{1}, 0), 0, 0, Efield_aux[0][1]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][2], *m_fields.get(FieldType::Efield_avg_fp, Direction{2}, 0), 0, 0, Efield_aux[0][2]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][0], *m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, 0), 0, 0, Bfield_aux[0][0]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][1], *m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, 0), 0, 0, Bfield_aux[0][1]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][2], *m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, 0), 0, 0, Bfield_aux[0][2]->nComp(), ng_src); } else { @@ -391,16 +420,19 @@ WarpX::UpdateAuxilaryDataSameType () for (int lev = 1; lev <= finest_level; ++lev) { const amrex::Periodicity& crse_period = Geom(lev-1).periodicity(); - const IntVect& ng = Bfield_cp[lev][0]->nGrowVect(); - const DistributionMapping& dm = Bfield_cp[lev][0]->DistributionMap(); + const IntVect& ng = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nGrowVect(); + const DistributionMapping& dm = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->DistributionMap(); // B field { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { - MultiFab dBx(Bfield_cp[lev][0]->boxArray(), dm, Bfield_cp[lev][0]->nComp(), ng); - MultiFab dBy(Bfield_cp[lev][1]->boxArray(), dm, Bfield_cp[lev][1]->nComp(), ng); - MultiFab dBz(Bfield_cp[lev][2]->boxArray(), dm, Bfield_cp[lev][2]->nComp(), ng); + MultiFab dBx(m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab dBy(m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab dBz(m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->nComp(), ng); dBx.setVal(0.0); dBy.setVal(0.0); dBz.setVal(0.0); @@ -418,15 +450,18 @@ WarpX::UpdateAuxilaryDataSameType () Bfield_aux[lev - 1][2]->nComp(), ng_src, ng, WarpX::do_single_precision_comms, crse_period); - if (Bfield_cax[lev][0]) + if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) { - MultiFab::Copy(*Bfield_cax[lev][0], dBx, 0, 0, Bfield_cax[lev][0]->nComp(), ng); - MultiFab::Copy(*Bfield_cax[lev][1], dBy, 0, 0, Bfield_cax[lev][1]->nComp(), ng); - MultiFab::Copy(*Bfield_cax[lev][2], dBz, 0, 0, Bfield_cax[lev][2]->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{0}, lev), dBx, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{0}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{1}, lev), dBy, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{1}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{2}, lev), dBz, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{2}, lev)->nComp(), ng); } - MultiFab::Subtract(dBx, *Bfield_cp[lev][0], 0, 0, Bfield_cp[lev][0]->nComp(), ng); - MultiFab::Subtract(dBy, *Bfield_cp[lev][1], 0, 0, Bfield_cp[lev][1]->nComp(), ng); - MultiFab::Subtract(dBz, *Bfield_cp[lev][2], 0, 0, Bfield_cp[lev][2]->nComp(), ng); + MultiFab::Subtract(dBx, *m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab::Subtract(dBy, *m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab::Subtract(dBz, *m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->nComp(), ng); const amrex::IntVect& refinement_ratio = refRatio(lev-1); @@ -475,9 +510,12 @@ WarpX::UpdateAuxilaryDataSameType () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { - MultiFab dEx(Efield_cp[lev][0]->boxArray(), dm, Efield_cp[lev][0]->nComp(), ng); - MultiFab dEy(Efield_cp[lev][1]->boxArray(), dm, Efield_cp[lev][1]->nComp(), ng); - MultiFab dEz(Efield_cp[lev][2]->boxArray(), dm, Efield_cp[lev][2]->nComp(), ng); + MultiFab dEx(m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab dEy(m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab dEz(m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->nComp(), ng); dEx.setVal(0.0); dEy.setVal(0.0); dEz.setVal(0.0); @@ -497,15 +535,18 @@ WarpX::UpdateAuxilaryDataSameType () WarpX::do_single_precision_comms, crse_period); - if (Efield_cax[lev][0]) + if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) { - MultiFab::Copy(*Efield_cax[lev][0], dEx, 0, 0, Efield_cax[lev][0]->nComp(), ng); - MultiFab::Copy(*Efield_cax[lev][1], dEy, 0, 0, Efield_cax[lev][1]->nComp(), ng); - MultiFab::Copy(*Efield_cax[lev][2], dEz, 0, 0, Efield_cax[lev][2]->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{0}, lev), dEx, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{0}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{1}, lev), dEy, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{1}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{2}, lev), dEz, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{2}, lev)->nComp(), ng); } - MultiFab::Subtract(dEx, *Efield_cp[lev][0], 0, 0, Efield_cp[lev][0]->nComp(), ng); - MultiFab::Subtract(dEy, *Efield_cp[lev][1], 0, 0, Efield_cp[lev][1]->nComp(), ng); - MultiFab::Subtract(dEz, *Efield_cp[lev][2], 0, 0, Efield_cp[lev][2]->nComp(), ng); + MultiFab::Subtract(dEx, *m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab::Subtract(dEy, *m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab::Subtract(dEz, *m_fields.get(FieldType::Efield_cp, Direction{2}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->nComp(), ng); const amrex::IntVect& refinement_ratio = refRatio(lev-1); @@ -521,9 +562,9 @@ WarpX::UpdateAuxilaryDataSameType () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); Array4 const& ex_c = dEx.const_array(mfi); Array4 const& ey_c = dEy.const_array(mfi); Array4 const& ez_c = dEz.const_array(mfi); @@ -545,9 +586,9 @@ WarpX::UpdateAuxilaryDataSameType () } else // electrostatic { - MultiFab::Copy(*Efield_aux[lev][0], *Efield_fp[lev][0], 0, 0, Efield_aux[lev][0]->nComp(), Efield_aux[lev][0]->nGrowVect()); - MultiFab::Copy(*Efield_aux[lev][1], *Efield_fp[lev][1], 0, 0, Efield_aux[lev][1]->nComp(), Efield_aux[lev][1]->nGrowVect()); - MultiFab::Copy(*Efield_aux[lev][2], *Efield_fp[lev][2], 0, 0, Efield_aux[lev][2]->nComp(), Efield_aux[lev][2]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][0], *m_fields.get(FieldType::Efield_fp, Direction{0}, lev), 0, 0, Efield_aux[lev][0]->nComp(), Efield_aux[lev][0]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][1], *m_fields.get(FieldType::Efield_fp, Direction{1}, lev), 0, 0, Efield_aux[lev][1]->nComp(), Efield_aux[lev][1]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][2], *m_fields.get(FieldType::Efield_fp, Direction{2}, lev), 0, 0, Efield_aux[lev][2]->nComp(), Efield_aux[lev][2]->nGrowVect()); } } } @@ -668,14 +709,20 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In std::array mf; amrex::Periodicity period; + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { - mf = {Efield_fp[lev][0].get(), Efield_fp[lev][1].get(), Efield_fp[lev][2].get()}; + mf = {m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev)}; period = Geom(lev).periodicity(); } else // coarse patch { - mf = {Efield_cp[lev][0].get(), Efield_cp[lev][1].get(), Efield_cp[lev][2].get()}; + mf = {m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev)}; period = Geom(lev-1).periodicity(); } @@ -686,16 +733,18 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In if (pml[lev] && pml[lev]->ok()) { const std::array mf_pml = - (patch_type == PatchType::fine) ? pml[lev]->GetE_fp() : pml[lev]->GetE_cp(); + (patch_type == PatchType::fine) ? + m_fields.get_alldirs(FieldType::pml_E_fp, lev) : + m_fields.get_alldirs(FieldType::pml_E_cp, lev); pml[lev]->Exchange(mf_pml, mf, patch_type, do_pml_in_domain); - pml[lev]->FillBoundaryE(patch_type, nodal_sync); + pml[lev]->FillBoundary(mf_pml, patch_type, nodal_sync); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->FillBoundaryE(patch_type, nodal_sync); + pml_rz[lev]->FillBoundaryE(m_fields, patch_type, nodal_sync); } #endif } @@ -725,14 +774,20 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In std::array mf; amrex::Periodicity period; + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { - mf = {Bfield_fp[lev][0].get(), Bfield_fp[lev][1].get(), Bfield_fp[lev][2].get()}; + mf = {m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)}; period = Geom(lev).periodicity(); } else // coarse patch { - mf = {Bfield_cp[lev][0].get(), Bfield_cp[lev][1].get(), Bfield_cp[lev][2].get()}; + mf = {m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)}; period = Geom(lev-1).periodicity(); } @@ -743,16 +798,18 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In if (pml[lev] && pml[lev]->ok()) { const std::array mf_pml = - (patch_type == PatchType::fine) ? pml[lev]->GetB_fp() : pml[lev]->GetB_cp(); + (patch_type == PatchType::fine) ? + m_fields.get_alldirs(FieldType::pml_B_fp, lev) : + m_fields.get_alldirs(FieldType::pml_B_cp, lev); pml[lev]->Exchange(mf_pml, mf, patch_type, do_pml_in_domain); - pml[lev]->FillBoundaryB(patch_type, nodal_sync); + pml[lev]->FillBoundary(mf_pml, patch_type, nodal_sync); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->FillBoundaryB(patch_type, nodal_sync); + pml_rz[lev]->FillBoundaryB(m_fields, patch_type, nodal_sync); } #endif } @@ -786,9 +843,11 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); if ( safe_guard_cells ){ - const Vector mf{Efield_avg_fp[lev][0].get(),Efield_avg_fp[lev][1].get(),Efield_avg_fp[lev][2].get()}; + const Vector mf{Efield_avg_fp[lev][0],Efield_avg_fp[lev][1],Efield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -806,9 +865,11 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( safe_guard_cells ) { - const Vector mf{Efield_avg_cp[lev][0].get(),Efield_avg_cp[lev][1].get(),Efield_avg_cp[lev][2].get()}; + const Vector mf{Efield_avg_cp[lev][0],Efield_avg_cp[lev][1],Efield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); } else { @@ -833,19 +894,24 @@ WarpX::FillBoundaryB_avg (int lev, IntVect ng) void WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) { + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { if (do_pml && pml[lev]->ok()) { WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + + ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); if ( safe_guard_cells ) { - const Vector mf{Bfield_avg_fp[lev][0].get(),Bfield_avg_fp[lev][1].get(),Bfield_avg_fp[lev][2].get()}; + const Vector mf{Bfield_avg_fp[lev][0],Bfield_avg_fp[lev][1],Bfield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - ng.allLE(Bfield_fp[lev][0]->nGrowVect()), + ng.allLE(m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->nGrowVect()), "Error: in FillBoundaryB, requested more guard cells than allocated"); ablastr::utils::communication::FillBoundary(*Bfield_avg_fp[lev][0], ng, WarpX::do_single_precision_comms, period); ablastr::utils::communication::FillBoundary(*Bfield_avg_fp[lev][1], ng, WarpX::do_single_precision_comms, period); @@ -859,9 +925,11 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); + const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( safe_guard_cells ){ - const Vector mf{Bfield_avg_cp[lev][0].get(),Bfield_avg_cp[lev][1].get(),Bfield_avg_cp[lev][2].get()}; + const Vector mf{Bfield_avg_cp[lev][0],Bfield_avg_cp[lev][1],Bfield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -888,30 +956,38 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, std::optionalok()) { - if (F_fp[lev]) { pml[lev]->ExchangeF(patch_type, F_fp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryF(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_F_fp, lev) && m_fields.has(FieldType::F_fp, lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_F_fp, lev), m_fields.get(FieldType::F_fp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_F_fp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_F_fp, lev), patch_type, nodal_sync); + } } - if (F_fp[lev]) + if (m_fields.has(FieldType::F_fp, lev)) { const amrex::Periodicity& period = Geom(lev).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? F_fp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*F_fp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + const amrex::IntVect& nghost = (safe_guard_cells) ? m_fields.get(FieldType::F_fp, lev)->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_fp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } else if (patch_type == PatchType::coarse) { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (F_cp[lev]) { pml[lev]->ExchangeF(patch_type, F_cp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryF(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_F_cp, lev) && m_fields.has(FieldType::F_cp, lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_F_cp, lev), m_fields.get(FieldType::F_cp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_F_cp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_F_cp, lev), patch_type, nodal_sync); + } } - if (F_cp[lev]) + if (m_fields.has(FieldType::F_cp, lev)) { const amrex::Periodicity& period = Geom(lev-1).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? F_cp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*F_cp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + const amrex::IntVect& nghost = (safe_guard_cells) ? m_fields.get(FieldType::F_cp, lev)->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_cp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } } @@ -932,30 +1008,40 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, std::optio { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (G_fp[lev]) { pml[lev]->ExchangeG(patch_type, G_fp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryG(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_G_fp,lev) && m_fields.has(FieldType::G_fp,lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_G_fp, lev), m_fields.get(FieldType::G_fp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_G_fp,lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_G_fp, lev), patch_type, nodal_sync); + } } - if (G_fp[lev]) + if (m_fields.has(FieldType::G_fp,lev)) { const amrex::Periodicity& period = Geom(lev).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_fp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*G_fp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + MultiFab* G_fp = m_fields.get(FieldType::G_fp,lev); + const amrex::IntVect& nghost = (safe_guard_cells) ? G_fp->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*G_fp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } else if (patch_type == PatchType::coarse) { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (G_cp[lev]) { pml[lev]->ExchangeG(patch_type, G_cp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryG(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_G_cp,lev) && m_fields.has(FieldType::G_cp,lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_G_cp, lev), m_fields.get(FieldType::G_cp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_G_cp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_G_cp, lev), patch_type, nodal_sync); + } } - if (G_cp[lev]) + if (m_fields.has(FieldType::G_cp,lev)) { const amrex::Periodicity& period = Geom(lev-1).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_cp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*G_cp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + MultiFab* G_cp = m_fields.get(FieldType::G_cp,lev); + const amrex::IntVect& nghost = (safe_guard_cells) ? G_cp->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*G_cp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } } @@ -972,6 +1058,9 @@ WarpX::FillBoundaryAux (IntVect ng) void WarpX::FillBoundaryAux (int lev, IntVect ng) { + ablastr::fields::MultiLevelVectorField Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); ablastr::utils::communication::FillBoundary(*Efield_aux[lev][0], ng, WarpX::do_single_precision_comms, period); ablastr::utils::communication::FillBoundary(*Efield_aux[lev][1], ng, WarpX::do_single_precision_comms, period); @@ -982,23 +1071,26 @@ WarpX::FillBoundaryAux (int lev, IntVect ng) } void -WarpX::SyncCurrent ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer) +WarpX::SyncCurrent (const std::string& current_fp_string) { + using ablastr::fields::Direction; + WARPX_PROFILE("WarpX::SyncCurrent()"); + ablastr::fields::MultiLevelVectorField const& J_fp = m_fields.get_mr_levels_alldirs(current_fp_string, finest_level); + // If warpx.do_current_centering = 1, center currents from nodal grid to staggered grid if (do_current_centering) { + ablastr::fields::MultiLevelVectorField const& J_fp_nodal = m_fields.get_mr_levels_alldirs(FieldType::current_fp_nodal, finest_level+1); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(finest_level <= 1, "warpx.do_current_centering=1 not supported with more than one fine levels"); for (int lev = 0; lev <= finest_level; lev++) { - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][0], *current_fp_nodal[lev][0]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][1], *current_fp_nodal[lev][1]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][2], *current_fp_nodal[lev][2]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{0}], *J_fp_nodal[lev][Direction{0}]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{1}], *J_fp_nodal[lev][Direction{1}]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{2}], *J_fp_nodal[lev][Direction{2}]); } } @@ -1073,7 +1165,7 @@ WarpX::SyncCurrent ( { for (int lev = finest_level; lev >= 0; --lev) { - const int ncomp = J_fp[lev][idim]->nComp(); + const int ncomp = J_fp[lev][Direction{idim}]->nComp(); auto const& period = Geom(lev).periodicity(); if (lev < finest_level) @@ -1081,8 +1173,8 @@ WarpX::SyncCurrent ( // On a coarse level, the data in mf_comm comes from the // coarse patch of the fine level. They are unfiltered and uncommunicated. // We need to add it to the fine patch of the current level. - MultiFab fine_lev_cp(J_fp[lev][idim]->boxArray(), - J_fp[lev][idim]->DistributionMap(), + MultiFab fine_lev_cp(J_fp[lev][Direction{idim}]->boxArray(), + J_fp[lev][Direction{idim}]->DistributionMap(), ncomp, 0); fine_lev_cp.setVal(0.0); fine_lev_cp.ParallelAdd(*mf_comm, 0, 0, ncomp, mf_comm->nGrowVect(), @@ -1091,7 +1183,7 @@ WarpX::SyncCurrent ( auto owner_mask = amrex::OwnerMask(fine_lev_cp, period); auto const& mma = owner_mask->const_arrays(); auto const& sma = fine_lev_cp.const_arrays(); - auto const& dma = J_fp[lev][idim]->arrays(); + auto const& dma = J_fp[lev][Direction{idim}]->arrays(); amrex::ParallelFor(fine_lev_cp, IntVect(0), ncomp, [=] AMREX_GPU_DEVICE (int bno, int i, int j, int k, int n) { @@ -1100,6 +1192,7 @@ WarpX::SyncCurrent ( } }); // Now it's safe to apply filter and sumboundary on J_cp + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); if (use_filter) { ApplyFilterJ(J_cp, lev+1, idim); @@ -1114,23 +1207,26 @@ WarpX::SyncCurrent ( // filtering depends on the level. This is also done before any // same-level communication because it's easier this way to // avoid double counting. - J_cp[lev][idim]->setVal(0.0); - ablastr::coarsen::average::Coarsen(*J_cp[lev][idim], - *J_fp[lev][idim], + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); + J_cp[lev][Direction{idim}]->setVal(0.0); + ablastr::coarsen::average::Coarsen(*J_cp[lev][Direction{idim}], + *J_fp[lev][Direction{idim}], refRatio(lev-1)); - if (J_buffer[lev][idim]) + if (m_fields.has(FieldType::current_buf, Direction{idim}, lev)) { - IntVect const& ng = J_cp[lev][idim]->nGrowVect(); - AMREX_ASSERT(ng.allLE(J_buffer[lev][idim]->nGrowVect())); - MultiFab::Add(*J_buffer[lev][idim], *J_cp[lev][idim], + ablastr::fields::MultiLevelVectorField const& J_buffer = m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level); + + IntVect const& ng = J_cp[lev][Direction{idim}]->nGrowVect(); + AMREX_ASSERT(ng.allLE(J_buffer[lev][Direction{idim}]->nGrowVect())); + MultiFab::Add(*J_buffer[lev][Direction{idim}], *J_cp[lev][Direction{idim}], 0, 0, ncomp, ng); mf_comm = std::make_unique - (*J_buffer[lev][idim], amrex::make_alias, 0, ncomp); + (*J_buffer[lev][Direction{idim}], amrex::make_alias, 0, ncomp); } else { mf_comm = std::make_unique - (*J_cp[lev][idim], amrex::make_alias, 0, ncomp); + (*J_cp[lev][Direction{idim}], amrex::make_alias, 0, ncomp); } } @@ -1145,14 +1241,24 @@ WarpX::SyncCurrent ( void WarpX::SyncRho () { - SyncRho(rho_fp, rho_cp, charge_buf); + const ablastr::fields::MultiLevelScalarField rho_fp = m_fields.has(FieldType::rho_fp, 0) ? + m_fields.get_mr_levels(FieldType::rho_fp, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + const ablastr::fields::MultiLevelScalarField rho_cp = m_fields.has(FieldType::rho_cp, 1) ? + m_fields.get_mr_levels(FieldType::rho_cp, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + const ablastr::fields::MultiLevelScalarField rho_buf = m_fields.has(FieldType::rho_buf, 1) ? + m_fields.get_mr_levels(FieldType::rho_buf, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + + SyncRho(rho_fp, rho_cp, rho_buf); } void WarpX::SyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer) + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer) { WARPX_PROFILE("WarpX::SyncRho()"); @@ -1227,8 +1333,8 @@ WarpX::SyncRho ( * averaging the values of the current of the fine patch (on the same level). */ void WarpX::RestrictCurrentFromFineToCoarsePatch ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, const int lev) { J_cp[lev][0]->setVal(0.0); @@ -1237,23 +1343,25 @@ void WarpX::RestrictCurrentFromFineToCoarsePatch ( const IntVect& refinement_ratio = refRatio(lev-1); - std::array fine { J_fp[lev][0].get(), - J_fp[lev][1].get(), - J_fp[lev][2].get() }; - std::array< MultiFab*,3> crse { J_cp[lev][0].get(), - J_cp[lev][1].get(), - J_cp[lev][2].get() }; + std::array fine { J_fp[lev][0], + J_fp[lev][1], + J_fp[lev][2] }; + std::array< MultiFab*,3> crse { J_cp[lev][0], + J_cp[lev][1], + J_cp[lev][2] }; ablastr::coarsen::average::Coarsen(*crse[0], *fine[0], refinement_ratio ); ablastr::coarsen::average::Coarsen(*crse[1], *fine[1], refinement_ratio ); ablastr::coarsen::average::Coarsen(*crse[2], *fine[2], refinement_ratio ); } void WarpX::ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const int idim) { - amrex::MultiFab& J = *current[lev][idim]; + using ablastr::fields::Direction; + + amrex::MultiFab& J = *current[lev][Direction{idim}]; const int ncomp = J.nComp(); const amrex::IntVect ngrow = J.nGrowVect(); @@ -1266,7 +1374,7 @@ void WarpX::ApplyFilterJ ( } void WarpX::ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev) { for (int idim=0; idim<3; ++idim) @@ -1276,12 +1384,14 @@ void WarpX::ApplyFilterJ ( } void WarpX::SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const int idim, const amrex::Periodicity& period) { - amrex::MultiFab& J = *current[lev][idim]; + using ablastr::fields::Direction; + + amrex::MultiFab& J = *current[lev][Direction{idim}]; const amrex::IntVect ng = J.nGrowVect(); amrex::IntVect ng_depos_J = get_ng_depos_J(); @@ -1314,7 +1424,7 @@ void WarpX::SumBoundaryJ ( } void WarpX::SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const amrex::Periodicity& period) { @@ -1338,9 +1448,9 @@ void WarpX::SumBoundaryJ ( * patch (and buffer region) of `lev+1` */ void WarpX::AddCurrentFromFineLevelandSumBoundary ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, + const ablastr::fields::MultiLevelVectorField& J_buffer, const int lev) { const amrex::Periodicity& period = Geom(lev).periodicity(); @@ -1415,28 +1525,25 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( } } -void WarpX::RestrictRhoFromFineToCoarsePatch ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const int lev) +void WarpX::RestrictRhoFromFineToCoarsePatch ( const int lev ) { - if (charge_fp[lev]) { - charge_cp[lev]->setVal(0.0); + if (m_fields.has(FieldType::rho_fp, lev)) { + m_fields.get(FieldType::rho_cp, lev)->setVal(0.0); const IntVect& refinement_ratio = refRatio(lev-1); - ablastr::coarsen::average::Coarsen(*charge_cp[lev], *charge_fp[lev], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*m_fields.get(FieldType::rho_cp, lev), *m_fields.get(FieldType::rho_fp, lev), refinement_ratio ); } } void WarpX::ApplyFilterandSumBoundaryRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, const int lev, PatchType patch_type, const int icomp, const int ncomp) { const int glev = (patch_type == PatchType::fine) ? lev : lev-1; - const std::unique_ptr& rho = (patch_type == PatchType::fine) ? + amrex::MultiFab* rho = (patch_type == PatchType::fine) ? charge_fp[lev] : charge_cp[lev]; if (rho == nullptr) { return; } ApplyFilterandSumBoundaryRho(lev, glev, *rho, icomp, ncomp); @@ -1474,9 +1581,9 @@ void WarpX::ApplyFilterandSumBoundaryRho (int /*lev*/, int glev, amrex::MultiFab * patch (and buffer region) of `lev+1` */ void WarpX::AddRhoFromFineLevelandSumBoundary ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer, const int lev, const int icomp, const int ncomp) @@ -1555,8 +1662,8 @@ void WarpX::AddRhoFromFineLevelandSumBoundary ( } void WarpX::NodalSyncJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, const int lev, PatchType patch_type) { diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 0a3ab8d2099..a0a2d4929df 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -12,6 +12,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Initialization/ExternalField.H" #include "Particles/MultiParticleContainer.H" @@ -21,6 +22,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" +#include + #include #include #include @@ -168,85 +171,30 @@ WarpX::LoadBalance () void WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const DistributionMapping& dm) { - - const auto RemakeMultiFab = [&](auto& mf, const bool redistribute){ - if (mf == nullptr) { return; } - const IntVect& ng = mf->nGrowVect(); - auto pmf = std::remove_reference_t{}; - AllocInitMultiFab(pmf, mf->boxArray(), dm, mf->nComp(), ng, lev, mf->tags()[0]); - if (redistribute) { pmf->Redistribute(*mf, 0, 0, mf->nComp(), ng); } - mf = std::move(pmf); - }; + using ablastr::fields::Direction; + using warpx::fields::FieldType; bool const eb_enabled = EB::enabled(); if (ba == boxArray(lev)) { if (ParallelDescriptor::NProcs() == 1) { return; } + m_fields.remake_level(lev, dm); + // Fine patch + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); for (int idim=0; idim < 3; ++idim) { - RemakeMultiFab(Bfield_fp[lev][idim], true); - RemakeMultiFab(Efield_fp[lev][idim], true); - if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { - RemakeMultiFab(Bfield_fp_external[lev][idim], true); - } - if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { - RemakeMultiFab(Efield_fp_external[lev][idim], true); - } - if (mypc->m_B_ext_particle_s == "read_from_file") { - RemakeMultiFab(B_external_particle_field[lev][idim], true); - } - if (mypc->m_E_ext_particle_s == "read_from_file") { - RemakeMultiFab(E_external_particle_field[lev][idim], true); - } - RemakeMultiFab(current_fp[lev][idim], false); - RemakeMultiFab(current_store[lev][idim], false); - if (current_deposition_algo == CurrentDepositionAlgo::Vay) { - RemakeMultiFab(current_fp_vay[lev][idim], false); - } - if (do_current_centering) { - RemakeMultiFab(current_fp_nodal[lev][idim], false); - } - if (fft_do_time_averaging) { - RemakeMultiFab(Efield_avg_fp[lev][idim], true); - RemakeMultiFab(Bfield_avg_fp[lev][idim], true); - } - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - RemakeMultiFab(m_hybrid_pic_model->current_fp_temp[lev][idim], true); - RemakeMultiFab(m_hybrid_pic_model->current_fp_ampere[lev][idim], false); - RemakeMultiFab(m_hybrid_pic_model->current_fp_external[lev][idim],true); - } if (eb_enabled) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - RemakeMultiFab(m_edge_lengths[lev][idim], false); - RemakeMultiFab(m_face_areas[lev][idim], false); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - RemakeMultiFab(Venl[lev][idim], false); - RemakeMultiFab(m_flag_info_face[lev][idim], false); - RemakeMultiFab(m_flag_ext_face[lev][idim], false); - RemakeMultiFab(m_area_mod[lev][idim], false); - RemakeMultiFab(ECTRhofield[lev][idim], false); m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); } } } } - RemakeMultiFab(F_fp[lev], true); - RemakeMultiFab(rho_fp[lev], false); - // phi_fp should be redistributed since we use the solution from - // the last step as the initial guess for the next solve - RemakeMultiFab(phi_fp[lev], true); - - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - RemakeMultiFab(m_hybrid_pic_model->rho_fp_temp[lev], true); - RemakeMultiFab(m_hybrid_pic_model->electron_pressure_fp[lev], false); - } - if (eb_enabled) { - RemakeMultiFab(m_distance_to_eb[lev], false); - #ifdef AMREX_USE_EB int const max_guard = guard_cells.ng_FieldSolver.max(); m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, @@ -292,35 +240,8 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi } #endif - // Aux patch - if (lev == 0 && Bfield_aux[0][0]->ixType() == Bfield_fp[0][0]->ixType()) - { - for (int idim = 0; idim < 3; ++idim) { - Bfield_aux[lev][idim] = std::make_unique(*Bfield_fp[lev][idim], amrex::make_alias, 0, Bfield_aux[lev][idim]->nComp()); - Efield_aux[lev][idim] = std::make_unique(*Efield_fp[lev][idim], amrex::make_alias, 0, Efield_aux[lev][idim]->nComp()); - } - } else { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_aux[lev][idim], false); - RemakeMultiFab(Efield_aux[lev][idim], false); - } - } - // Coarse patch if (lev > 0) { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_cp[lev][idim], true); - RemakeMultiFab(Efield_cp[lev][idim], true); - RemakeMultiFab(current_cp[lev][idim], false); - if (fft_do_time_averaging) { - RemakeMultiFab(Efield_avg_cp[lev][idim], true); - RemakeMultiFab(Bfield_avg_cp[lev][idim], true); - } - } - RemakeMultiFab(F_cp[lev], true); - RemakeMultiFab(rho_cp[lev], false); #ifdef WARPX_USE_FFT if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { @@ -358,17 +279,6 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi } if (lev > 0 && (n_field_gather_buffer > 0 || n_current_deposition_buffer > 0)) { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_cax[lev][idim], false); - RemakeMultiFab(Efield_cax[lev][idim], false); - RemakeMultiFab(current_buf[lev][idim], false); - } - RemakeMultiFab(charge_buf[lev], false); - // we can avoid redistributing these since we immediately re-build the values via BuildBufferMasks() - RemakeMultiFab(current_buffer_masks[lev], false); - RemakeMultiFab(gather_buffer_masks[lev], false); - if (current_buffer_masks[lev] || gather_buffer_masks[lev]) { BuildBufferMasks(); } @@ -405,6 +315,9 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi void WarpX::ComputeCostsHeuristic (amrex::Vector > >& a_costs) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + for (int lev = 0; lev <= finest_level; ++lev) { const auto & mypc_ref = GetInstance().GetPartContainer(); @@ -423,7 +336,7 @@ WarpX::ComputeCostsHeuristic (amrex::Vectorupdate(t_lab); - BL_ASSERT(OnSameGrids(lev,jx)); + BL_ASSERT(OnSameGrids(lev, *fields.get(FieldType::current_fp, Direction{0}, lev))); amrex::LayoutData* cost = WarpX::getCosts(lev); - const bool has_buffer = cjx; + const bool has_rho = fields.has(FieldType::rho_fp, lev); + const bool has_buffer = fields.has(FieldType::current_buf, lev); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) @@ -626,11 +626,13 @@ LaserParticleContainer::Evolve (int lev, np_current = 0; } - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); if (has_buffer) { + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 0, np_current, np-np_current, thread_num, lev, lev-1); } @@ -658,6 +660,7 @@ LaserParticleContainer::Evolve (int lev, WARPX_PROFILE_VAR_STOP(blp_pp); // Current Deposition + using ablastr::fields::Direction; if (!skip_deposition) { // Deposit at t_{n+1/2} @@ -665,13 +668,19 @@ LaserParticleContainer::Evolve (int lev, int* ion_lev = nullptr; // Deposit inside domains - DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, &jx, &jy, &jz, + amrex::MultiFab * jx = fields.get(current_fp_string, Direction{0}, lev); + amrex::MultiFab * jy = fields.get(current_fp_string, Direction{1}, lev); + amrex::MultiFab * jz = fields.get(current_fp_string, Direction{2}, lev); + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, jx, jy, jz, 0, np_current, thread_num, lev, lev, dt, relative_time, push_type); if (has_buffer) { // Deposit in buffers + amrex::MultiFab * cjx = fields.get(FieldType::current_buf, Direction{0}, lev); + amrex::MultiFab * cjy = fields.get(FieldType::current_buf, Direction{1}, lev); + amrex::MultiFab * cjz = fields.get(FieldType::current_buf, Direction{2}, lev); DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, cjx, cjy, cjz, np_current, np-np_current, thread_num, lev, lev-1, dt, relative_time, push_type); @@ -679,11 +688,13 @@ LaserParticleContainer::Evolve (int lev, } - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); if (has_buffer) { + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 1, np_current, np-np_current, thread_num, lev, lev-1); } diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index 97e4e1bc4da..0e33b6bac3c 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -26,6 +26,8 @@ #include "WarpXParticleContainer.H" #include "ParticleBoundaries.H" +#include + #include #include #include @@ -102,16 +104,16 @@ public: * field solve, and pushing the particles, for all the species in the MultiParticleContainer. * This is the electromagnetic version. */ - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::MultiFab* cjx, amrex::MultiFab* cjy, amrex::MultiFab* cjz, - amrex::MultiFab* rho, amrex::MultiFab* crho, - const amrex::MultiFab* cEx, const amrex::MultiFab* cEy, const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, const amrex::MultiFab* cBy, const amrex::MultiFab* cBz, - amrex::Real t, amrex::Real dt, DtType a_dt_type=DtType::Full, bool skip_deposition=false, - PushType push_type=PushType::Explicit); + void Evolve ( + ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit + ); /** * \brief This pushes the particle positions by one time step for all the species in the @@ -147,7 +149,7 @@ public: * the time of the deposition. */ void - DepositCharge (amrex::Vector >& rho, + DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, amrex::Real relative_time); /** @@ -162,7 +164,7 @@ public: * the time of the deposition. */ void - DepositCurrent (amrex::Vector, 3 > >& J, + DepositCurrent (ablastr::fields::MultiLevelVectorField const& J, amrex::Real dt, amrex::Real relative_time); /// @@ -298,7 +300,7 @@ public: PhysicalParticleContainer& GetPCtmp () { return *pc_tmp; } - void ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb); + void ScrapeParticlesAtEB (ablastr::fields::MultiLevelScalarField const& distance_to_eb); std::string m_B_ext_particle_s = "none"; std::string m_E_ext_particle_s = "none"; diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 23af4177228..619b54ed7ad 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -11,7 +11,7 @@ */ #include "MultiParticleContainer.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/ElementaryProcess/Ionization.H" #ifdef WARPX_QED # include "Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.H" @@ -44,6 +44,7 @@ #include "WarpX.H" +#include #include #include @@ -80,7 +81,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -457,30 +458,26 @@ MultiParticleContainer::InitMultiPhysicsModules () } void -MultiParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +MultiParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { if (! skip_deposition) { - jx.setVal(0.0); - jy.setVal(0.0); - jz.setVal(0.0); - if (cjx) { cjx->setVal(0.0); } - if (cjy) { cjy->setVal(0.0); } - if (cjz) { cjz->setVal(0.0); } - if (rho) { rho->setVal(0.0); } - if (crho) { crho->setVal(0.0); } + using ablastr::fields::Direction; + + fields.get(current_fp_string, Direction{0}, lev)->setVal(0.0); + fields.get(current_fp_string, Direction{1}, lev)->setVal(0.0); + fields.get(current_fp_string, Direction{2}, lev)->setVal(0.0); + if (fields.has(FieldType::current_buf, Direction{0}, lev)) { fields.get(FieldType::current_buf, Direction{0}, lev)->setVal(0.0); } + if (fields.has(FieldType::current_buf, Direction{1}, lev)) { fields.get(FieldType::current_buf, Direction{1}, lev)->setVal(0.0); } + if (fields.has(FieldType::current_buf, Direction{2}, lev)) { fields.get(FieldType::current_buf, Direction{2}, lev)->setVal(0.0); } + if (fields.has(FieldType::rho_fp, lev)) { fields.get(FieldType::rho_fp, lev)->setVal(0.0); } + if (fields.has(FieldType::rho_buf, lev)) { fields.get(FieldType::rho_buf, lev)->setVal(0.0); } } for (auto& pc : allcontainers) { - pc->Evolve(lev, Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, cjx, cjy, cjz, - rho, crho, cEx, cEy, cEz, cBx, cBy, cBz, t, dt, a_dt_type, skip_deposition, push_type); + pc->Evolve(fields, lev, current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } } @@ -529,11 +526,11 @@ MultiParticleContainer::GetZeroChargeDensity (const int lev) void MultiParticleContainer::DepositCurrent ( - amrex::Vector, 3 > >& J, + ablastr::fields::MultiLevelVectorField const & J, const amrex::Real dt, const amrex::Real relative_time) { // Reset the J arrays - for (auto& J_lev : J) + for (const auto& J_lev : J) { J_lev[0]->setVal(0.0_rt); J_lev[1]->setVal(0.0_rt); @@ -550,18 +547,18 @@ MultiParticleContainer::DepositCurrent ( for (int lev = 0; lev < J.size(); ++lev) { WarpX::GetInstance().ApplyInverseVolumeScalingToCurrentDensity( - J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), lev); + J[lev][0], J[lev][1], J[lev][2], lev); } #endif } void MultiParticleContainer::DepositCharge ( - amrex::Vector >& rho, + const ablastr::fields::MultiLevelScalarField& rho, const amrex::Real relative_time) { // Reset the rho array - for (auto& rho_lev : rho) + for (const auto& rho_lev : rho) { rho_lev->setVal(0.0_rt); } @@ -587,7 +584,7 @@ MultiParticleContainer::DepositCharge ( #ifdef WARPX_DIM_RZ for (int lev = 0; lev < rho.size(); ++lev) { - WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); + WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev], lev); } #endif } @@ -963,7 +960,8 @@ void MultiParticleContainer::CheckIonizationProductSpecies() } } -void MultiParticleContainer::ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb) +void MultiParticleContainer::ScrapeParticlesAtEB ( + ablastr::fields::MultiLevelScalarField const& distance_to_eb) { for (auto& pc : allcontainers) { scrapeParticlesAtEB(*pc, distance_to_eb, ParticleBoundaryProcess::Absorb()); @@ -1358,12 +1356,13 @@ MultiParticleContainer::doQEDSchwinger () pc_product_ele->defineAllParticleTiles(); pc_product_pos->defineAllParticleTiles(); - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, level_0,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, level_0,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, level_0,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, level_0,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, level_0,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, level_0,2); + using ablastr::fields::Direction; + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, level_0); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, level_0); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, level_0); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, level_0); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, level_0); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, level_0); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/Particles/ParticleBoundaryBuffer.H b/Source/Particles/ParticleBoundaryBuffer.H index d33834309ab..24b388be00e 100644 --- a/Source/Particles/ParticleBoundaryBuffer.H +++ b/Source/Particles/ParticleBoundaryBuffer.H @@ -12,6 +12,8 @@ #include "Particles/PinnedMemoryParticleContainer.H" #include "Utils/export.H" +#include + #include @@ -41,7 +43,7 @@ public: void gatherParticlesFromDomainBoundaries (MultiParticleContainer& mypc); void gatherParticlesFromEmbeddedBoundaries ( - MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb + MultiParticleContainer& mypc, ablastr::fields::MultiLevelScalarField const& distance_to_eb ); void redistribute (); diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index bc113e8e3a3..0391dcc6178 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -462,7 +462,7 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC } void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( - MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb) + MultiParticleContainer& mypc, ablastr::fields::MultiLevelScalarField const& distance_to_eb) { if (EB::enabled()) { WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); diff --git a/Source/Particles/PhotonParticleContainer.H b/Source/Particles/PhotonParticleContainer.H index 34afac53482..485f56dba43 100644 --- a/Source/Particles/PhotonParticleContainer.H +++ b/Source/Particles/PhotonParticleContainer.H @@ -46,34 +46,16 @@ public: void InitData() override; - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; - void PushPX(WarpXParIter& pti, + void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, amrex::FArrayBox const * eyfab, amrex::FArrayBox const * ezfab, diff --git a/Source/Particles/PhotonParticleContainer.cpp b/Source/Particles/PhotonParticleContainer.cpp index 1f15d5210f5..47c426cd6ff 100644 --- a/Source/Particles/PhotonParticleContainer.cpp +++ b/Source/Particles/PhotonParticleContainer.cpp @@ -229,27 +229,17 @@ PhotonParticleContainer::PushPX (WarpXParIter& pti, } void -PhotonParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +PhotonParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { // This does gather, push and deposit. // Push and deposit have been re-written for photons - PhysicalParticleContainer::Evolve (lev, - Ex, Ey, Ez, - Bx, By, Bz, - jx, jy, jz, - cjx, cjy, cjz, - rho, crho, - cEx, cEy, cEz, - cBx, cBy, cBz, + PhysicalParticleContainer::Evolve (fields, + lev, + current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index 8102fc96a91..18880239183 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -81,27 +81,9 @@ public: * \brief Evolve is the central function PhysicalParticleContainer that * advances plasma particles for a time dt (typically one timestep). * + * \param fields the WarpX field register * \param lev level on which particles are living - * \param Ex MultiFab from which field Ex is gathered - * \param Ey MultiFab from which field Ey is gathered - * \param Ez MultiFab from which field Ez is gathered - * \param Bx MultiFab from which field Bx is gathered - * \param By MultiFab from which field By is gathered - * \param Bz MultiFab from which field Bz is gathered - * \param jx MultiFab to which the particles' current jx is deposited - * \param jy MultiFab to which the particles' current jy is deposited - * \param jz MultiFab to which the particles' current jz is deposited - * \param cjx Same as jx (coarser, from lev-1), when using deposition buffers - * \param cjy Same as jy (coarser, from lev-1), when using deposition buffers - * \param cjz Same as jz (coarser, from lev-1), when using deposition buffers - * \param rho MultiFab to which the particles' charge is deposited - * \param crho Same as rho (coarser, from lev-1), when using deposition buffers - * \param cEx Same as Ex (coarser, from lev-1), when using gather buffers - * \param cEy Same as Ey (coarser, from lev-1), when using gather buffers - * \param cEz Same as Ez (coarser, from lev-1), when using gather buffers - * \param cBx Same as Bx (coarser, from lev-1), when using gather buffers - * \param cBy Same as By (coarser, from lev-1), when using gather buffers - * \param cBz Same as Bz (coarser, from lev-1), when using gather buffers + * \param current_fp_string current coarse or fine patch identifier in fields * \param t current physical time * \param dt time step by which particles are advanced * \param a_dt_type type of time step (used for sub-cycling) @@ -112,32 +94,14 @@ public: * field gather, particle push and current deposition for all particles * in the box. */ - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; virtual void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 23af57b9206..07997a61f0c 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -10,6 +10,7 @@ */ #include "PhysicalParticleContainer.H" +#include "Fields.H" #include "Filter/NCIGodfreyFilter.H" #include "Initialization/InjectorDensity.H" #include "Initialization/InjectorMomentum.H" @@ -1342,8 +1343,12 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int #ifdef AMREX_USE_EB if (EB::enabled()) { - auto &distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(*this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + using warpx::fields::FieldType; + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + *this, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); } #endif @@ -1709,8 +1714,12 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, #ifdef AMREX_USE_EB if (EB::enabled()) { - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + using warpx::fields::FieldType; + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + tmp_pc, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); } #endif @@ -1724,29 +1733,36 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } void -PhysicalParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real /*t*/, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; WARPX_PROFILE("PhysicalParticleContainer::Evolve()"); WARPX_PROFILE_VAR_NS("PhysicalParticleContainer::Evolve::GatherAndPush", blp_fg); - BL_ASSERT(OnSameGrids(lev,jx)); + BL_ASSERT(OnSameGrids(lev, *fields.get(FieldType::current_fp, Direction{0}, lev))); amrex::LayoutData* cost = WarpX::getCosts(lev); const iMultiFab* current_masks = WarpX::CurrentBufferMasks(lev); const iMultiFab* gather_masks = WarpX::GatherBufferMasks(lev); - const bool has_buffer = cEx || cjx; + const bool has_rho = fields.has(FieldType::rho_fp, lev); + const bool has_cjx = fields.has(FieldType::current_buf, Direction{0}, lev); + const bool has_cEx = fields.has(FieldType::Efield_cax, Direction{0}, lev); + const bool has_buffer = has_cEx || has_cjx; + + amrex::MultiFab & Ex = *fields.get(FieldType::Efield_aux, Direction{0}, lev); + amrex::MultiFab & Ey = *fields.get(FieldType::Efield_aux, Direction{1}, lev); + amrex::MultiFab & Ez = *fields.get(FieldType::Efield_aux, Direction{2}, lev); + amrex::MultiFab & Bx = *fields.get(FieldType::Bfield_aux, Direction{0}, lev); + amrex::MultiFab & By = *fields.get(FieldType::Bfield_aux, Direction{1}, lev); + amrex::MultiFab & Bz = *fields.get(FieldType::Bfield_aux, Direction{2}, lev); if (m_do_back_transformed_particles) { @@ -1834,17 +1850,19 @@ PhysicalParticleContainer::Evolve (int lev, pti, lev, current_masks, gather_masks ); } - const long np_current = (cjx) ? nfine_current : np; + const long np_current = has_cjx ? nfine_current : np; - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. const int* const AMREX_RESTRICT ion_lev = (do_field_ionization)? pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); if (has_buffer){ + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 0, np_current, np-np_current, thread_num, lev, lev-1); } @@ -1852,7 +1870,7 @@ PhysicalParticleContainer::Evolve (int lev, if (! do_not_push) { - const long np_gather = (cEx) ? nfine_gather : np; + const long np_gather = has_cEx ? nfine_gather : np; int e_is_nodal = Ex.is_nodal() and Ey.is_nodal() and Ez.is_nodal(); @@ -1879,13 +1897,20 @@ PhysicalParticleContainer::Evolve (int lev, const IntVect& ref_ratio = WarpX::RefRatio(lev-1); const Box& cbox = amrex::coarsen(box,ref_ratio); + amrex::MultiFab & cEx = *fields.get(FieldType::Efield_cax, Direction{0}, lev); + amrex::MultiFab & cEy = *fields.get(FieldType::Efield_cax, Direction{1}, lev); + amrex::MultiFab & cEz = *fields.get(FieldType::Efield_cax, Direction{2}, lev); + amrex::MultiFab & cBx = *fields.get(FieldType::Bfield_cax, Direction{0}, lev); + amrex::MultiFab & cBy = *fields.get(FieldType::Bfield_cax, Direction{1}, lev); + amrex::MultiFab & cBz = *fields.get(FieldType::Bfield_cax, Direction{2}, lev); + // Data on the grid - FArrayBox const* cexfab = &(*cEx)[pti]; - FArrayBox const* ceyfab = &(*cEy)[pti]; - FArrayBox const* cezfab = &(*cEz)[pti]; - FArrayBox const* cbxfab = &(*cBx)[pti]; - FArrayBox const* cbyfab = &(*cBy)[pti]; - FArrayBox const* cbzfab = &(*cBz)[pti]; + FArrayBox const* cexfab = &cEx[pti]; + FArrayBox const* ceyfab = &cEy[pti]; + FArrayBox const* cezfab = &cEz[pti]; + FArrayBox const* cbxfab = &cBx[pti]; + FArrayBox const* cbyfab = &cBy[pti]; + FArrayBox const* cbzfab = &cBz[pti]; if (WarpX::use_fdtd_nci_corr) { @@ -1896,23 +1921,23 @@ PhysicalParticleContainer::Evolve (int lev, applyNCIFilter(lev-1, cbox, exeli, eyeli, ezeli, bxeli, byeli, bzeli, filtered_Ex, filtered_Ey, filtered_Ez, filtered_Bx, filtered_By, filtered_Bz, - (*cEx)[pti], (*cEy)[pti], (*cEz)[pti], - (*cBx)[pti], (*cBy)[pti], (*cBz)[pti], + cEx[pti], cEy[pti], cEz[pti], + cBx[pti], cBy[pti], cBz[pti], cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab); } // Field gather and push for particles in gather buffers - e_is_nodal = cEx->is_nodal() and cEy->is_nodal() and cEz->is_nodal(); + e_is_nodal = cEx.is_nodal() and cEy.is_nodal() and cEz.is_nodal(); if (push_type == PushType::Explicit) { PushPX(pti, cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab, - cEx->nGrowVect(), e_is_nodal, + cEx.nGrowVect(), e_is_nodal, nfine_gather, np-nfine_gather, lev, lev-1, dt, ScaleFields(false), a_dt_type); } else if (push_type == PushType::Implicit) { ImplicitPushXP(pti, cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab, - cEx->nGrowVect(), e_is_nodal, + cEx.nGrowVect(), e_is_nodal, nfine_gather, np-nfine_gather, lev, lev-1, dt, ScaleFields(false), a_dt_type); } @@ -1930,13 +1955,19 @@ PhysicalParticleContainer::Evolve (int lev, pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; // Deposit inside domains - DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, &jx, &jy, &jz, + amrex::MultiFab * jx = fields.get(current_fp_string, Direction{0}, lev); + amrex::MultiFab * jy = fields.get(current_fp_string, Direction{1}, lev); + amrex::MultiFab * jz = fields.get(current_fp_string, Direction{2}, lev); + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, jx, jy, jz, 0, np_current, thread_num, lev, lev, dt, relative_time, push_type); if (has_buffer) { // Deposit in buffers + amrex::MultiFab * cjx = fields.get(FieldType::current_buf, Direction{0}, lev); + amrex::MultiFab * cjy = fields.get(FieldType::current_buf, Direction{1}, lev); + amrex::MultiFab * cjz = fields.get(FieldType::current_buf, Direction{2}, lev); DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, cjx, cjy, cjz, np_current, np-np_current, thread_num, lev, lev-1, dt, relative_time, push_type); @@ -1944,10 +1975,11 @@ PhysicalParticleContainer::Evolve (int lev, } // end of "if electrostatic_solver_id == ElectrostaticSolverAlgo::None" } // end of "if do_not_push" - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge after particle push, in component 1 of MultiFab rho. // (Skipped for electrostatic solver, as this may lead to out-of-bounds) if (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::None) { + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(rho->nComp() >= 2, "Cannot deposit charge in rho component 1: only component 0 is allocated!"); @@ -1957,6 +1989,7 @@ PhysicalParticleContainer::Evolve (int lev, DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); if (has_buffer){ + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 1, np_current, np-np_current, thread_num, lev, lev-1); } diff --git a/Source/Particles/RigidInjectedParticleContainer.H b/Source/Particles/RigidInjectedParticleContainer.H index bc20420ea6e..d3565dd2df6 100644 --- a/Source/Particles/RigidInjectedParticleContainer.H +++ b/Source/Particles/RigidInjectedParticleContainer.H @@ -61,32 +61,14 @@ public: virtual void RemapParticles(); - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, diff --git a/Source/Particles/RigidInjectedParticleContainer.cpp b/Source/Particles/RigidInjectedParticleContainer.cpp index c3ec4c41131..d1e1f48ab38 100644 --- a/Source/Particles/RigidInjectedParticleContainer.cpp +++ b/Source/Particles/RigidInjectedParticleContainer.cpp @@ -291,14 +291,9 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, } void -RigidInjectedParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +RigidInjectedParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { @@ -317,14 +312,9 @@ RigidInjectedParticleContainer::Evolve (int lev, done_injecting_lev = ((zinject_plane_levels[lev] < plo[WARPX_ZINDEX] && WarpX::moving_window_v + WarpX::beta_boost*PhysConst::c >= 0.) || (zinject_plane_levels[lev] > phi[WARPX_ZINDEX] && WarpX::moving_window_v + WarpX::beta_boost*PhysConst::c <= 0.)); - PhysicalParticleContainer::Evolve (lev, - Ex, Ey, Ez, - Bx, By, Bz, - jx, jy, jz, - cjx, cjy, cjz, - rho, crho, - cEx, cEy, cEz, - cBx, cBy, cBz, + PhysicalParticleContainer::Evolve (fields, + lev, + current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 7e882c151e8..9c316b110ee 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -25,6 +25,8 @@ #include "MultiParticleContainer_fwd.H" #include "NamedComponentParticleContainer.H" +#include + #include #include #include @@ -145,14 +147,9 @@ public: * particles for a time dt (typically one timestep). It is a pure virtual * function for flexibility. */ - virtual void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::MultiFab* cjx, amrex::MultiFab* cjy, amrex::MultiFab* cjz, - amrex::MultiFab* rho, amrex::MultiFab* crho, - const amrex::MultiFab* cEx, const amrex::MultiFab* cEy, const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, const amrex::MultiFab* cBy, const amrex::MultiFab* cBz, + virtual void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, amrex::Real t, amrex::Real dt, DtType a_dt_type=DtType::Full, bool skip_deposition=false, PushType push_type=PushType::Explicit) = 0; @@ -199,7 +196,7 @@ public: * the particle position will be temporarily modified to match * the time of the deposition. */ - void DepositCurrent (amrex::Vector, 3 > >& J, + void DepositCurrent (ablastr::fields::MultiLevelVectorField const & J, amrex::Real dt, amrex::Real relative_time); /** @@ -212,12 +209,12 @@ public: * \param[in] interpolate_across_levels whether to average down from the fine patch to the coarse patch * \param[in] icomp component of the MultiFab where rho is deposited (old, new) */ - void DepositCharge (amrex::Vector >& rho, + void DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, bool local = false, bool reset = false, bool apply_boundary_and_scale_volume = false, bool interpolate_across_levels = true, int icomp = 0); - void DepositCharge (std::unique_ptr& rho, int lev, + void DepositCharge (amrex::MultiFab* rho, int lev, bool local = false, bool reset = false, bool apply_boundary_and_scale_volume = false, int icomp = 0); diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 591190a7a19..36793c8619b 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -14,6 +14,7 @@ #include "Deposition/CurrentDeposition.H" #include "Deposition/SharedDepositionUtils.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" #include "ParticleBoundaries_K.H" @@ -173,6 +174,7 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, int uniqueparticles, amrex::Long id) { using namespace amrex::literals; + using warpx::fields::FieldType; WARPX_ALWAYS_ASSERT_WITH_MESSAGE((PIdx::nattribs + nattr_real - 1) <= NumRealComps(), "Too many real attributes specified"); @@ -302,8 +304,11 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB if (EB::enabled()) { - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + *this, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); deleteInvalidParticles(); } #endif @@ -823,7 +828,7 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, void WarpXParticleContainer::DepositCurrent ( - amrex::Vector, 3 > >& J, + ablastr::fields::MultiLevelVectorField const & J, const amrex::Real dt, const amrex::Real relative_time) { // Loop over the refinement levels @@ -853,7 +858,7 @@ WarpXParticleContainer::DepositCurrent ( } DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, - J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), + J[lev][0], J[lev][1], J[lev][2], 0, np, thread_num, lev, lev, dt, relative_time, PushType::Explicit); } #ifdef AMREX_USE_OMP @@ -1170,7 +1175,7 @@ WarpXParticleContainer::DepositCharge (WarpXParIter& pti, RealVector const& wp, } void -WarpXParticleContainer::DepositCharge (amrex::Vector >& rho, +WarpXParticleContainer::DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, const bool local, const bool reset, const bool apply_boundary_and_scale_volume, const bool interpolate_across_levels, @@ -1211,7 +1216,7 @@ WarpXParticleContainer::DepositCharge (amrex::Vector& rho, +WarpXParticleContainer::DepositCharge (amrex::MultiFab* rho, const int lev, const bool local, const bool reset, const bool apply_boundary_and_scale_volume, const int icomp) @@ -1245,7 +1250,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); } - DepositCharge(pti, wp, ion_lev, rho.get(), icomp, 0, np, thread_num, lev, lev); + DepositCharge(pti, wp, ion_lev, rho, icomp, 0, np, thread_num, lev, lev); } #ifdef AMREX_USE_OMP } @@ -1254,7 +1259,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, #ifdef WARPX_DIM_RZ if (apply_boundary_and_scale_volume) { - WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho.get(), lev); + WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho, lev); } #endif @@ -1273,7 +1278,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, if (apply_boundary_and_scale_volume) { // Reflect density over PEC boundaries, if needed. - WarpX::GetInstance().ApplyRhofieldBoundary(lev, rho.get(), PatchType::fine); + WarpX::GetInstance().ApplyRhofieldBoundary(lev, rho, PatchType::fine); } #endif } @@ -1300,7 +1305,7 @@ WarpXParticleContainer::GetChargeDensity (int lev, bool local) const int ng_rho = warpx.get_ng_depos_rho().max(); auto rho = std::make_unique(nba, dm, WarpX::ncomps,ng_rho); - DepositCharge(rho, lev, local, true, true, 0); + DepositCharge(rho.get(), lev, local, true, true, 0); return rho; } diff --git a/Source/Python/CMakeLists.txt b/Source/Python/CMakeLists.txt index 17a75301306..1b4ab90aade 100644 --- a/Source/Python/CMakeLists.txt +++ b/Source/Python/CMakeLists.txt @@ -13,6 +13,7 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(pyWarpX_${SD} PRIVATE # pybind11 + MultiFabRegister.cpp WarpX.cpp ) endif() diff --git a/Source/Python/MultiFabRegister.cpp b/Source/Python/MultiFabRegister.cpp new file mode 100644 index 00000000000..fcf38a1a6db --- /dev/null +++ b/Source/Python/MultiFabRegister.cpp @@ -0,0 +1,164 @@ +/* Copyright 2024 The WarpX Community + * + * Authors: Axel Huebl + * License: BSD-3-Clause-LBNL + */ +#include "Python/pyWarpX.H" + +#include + +#include +#include +#include + +void init_MultiFabRegister (py::module & m) +{ + using namespace ablastr::fields; + + py::class_(m, "Direction") + .def(py::init()); + + py::class_(m, "MultiFabRegister") + + .def("alloc_init", + py::overload_cast< + std::string, + int, + amrex::BoxArray const &, + amrex::DistributionMapping const &, + int, + amrex::IntVect const &, + std::optional, + bool, + bool + >(&MultiFabRegister::alloc_init), + py::arg("name"), + py::arg("level"), + py::arg("ba"), + py::arg("dm"), + py::arg("ncomp"), + py::arg("ngrow"), + py::arg("initial_value"), + py::arg("redistribute"), + py::arg("redistribute_on_remake") + ) + + .def("alloc_init", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int, + amrex::BoxArray const &, + amrex::DistributionMapping const &, + int, + amrex::IntVect const &, + std::optional, + bool, + bool + >(&MultiFabRegister::alloc_init), + py::arg("name"), + py::arg("dir"), + py::arg("level"), + py::arg("ba"), + py::arg("dm"), + py::arg("ncomp"), + py::arg("ngrow"), + py::arg("initial_value"), + py::arg("redistribute"), + py::arg("redistribute_on_remake") + ) + + .def("alias_init", + py::overload_cast< + std::string, + std::string, + int, + std::optional + >(&MultiFabRegister::alias_init), + py::arg("new_name"), + py::arg("alias_name"), + py::arg("level"), + py::arg("initial_value") + ) + + .def("alias_init", + py::overload_cast< + std::string, + std::string, + ablastr::fields::Direction, + int, + std::optional + >(&MultiFabRegister::alias_init), + py::arg("new_name"), + py::arg("alias_name"), + py::arg("dir"), + py::arg("level"), + py::arg("initial_value") + ) + + .def("has", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::has, py::const_), + py::arg("name"), + py::arg("level") + ) + + .def("has", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::has, py::const_), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + + .def("get", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::get), + py::arg("name"), + py::arg("level") + ) + + .def("get", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::get), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + + //.def("list", + // &MultiFabRegister::list + // // "..." + //) + + .def("erase", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::erase), + py::arg("name"), + py::arg("level") + ) + + .def("erase", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::erase), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + ; +} diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 2689b3115fa..857d23dc588 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -55,6 +55,8 @@ namespace warpx { void init_WarpX (py::module& m) { + using ablastr::fields::Direction; + // Expose the WarpX instance m.def("get_instance", [] () { return &WarpX::GetInstance(); }, @@ -110,17 +112,47 @@ void init_WarpX (py::module& m) //py::overload_cast< int >(&WarpX::boxArray, py::const_), py::arg("lev") ) + .def("field", + [](WarpX const & wx) { + return wx.multifab_map; + }, + py::return_value_policy::reference_internal, + R"doc(Registry to all WarpX MultiFab (fields).)doc" + ) + .def("multifab", + [](WarpX & wx, std::string multifab_name, int level) { + if (wx.m_fields.has(multifab_name, level)) { + return wx.m_fields.get(multifab_name, level); + } else { + throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + } + }, + py::arg("multifab_name"), + py::arg("level"), + py::return_value_policy::reference_internal, + R"doc(Return MultiFabs by name and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + +The physical fields in WarpX have the following naming: + +- ``_fp`` are the "fine" patches, the regular resolution of a current mesh-refinement level +- ``_aux`` are temporary (auxiliar) patches at the same resolution as ``_fp``. + They usually include contributions from other levels and can be interpolated for gather routines of particles. +- ``_cp`` are "coarse" patches, at the same resolution (but not necessary values) as the ``_fp`` of ``level - 1`` + (only for level 1 and higher).)doc" + ) .def("multifab", - [](WarpX const & wx, std::string const multifab_name) { - if (wx.multifab_map.count(multifab_name) > 0) { - return wx.multifab_map.at(multifab_name); + [](WarpX & wx, std::string multifab_name, Direction dir, int level) { + if (wx.m_fields.has(multifab_name, dir, level)) { + return wx.m_fields.get(multifab_name, dir, level); } else { throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); } }, py::arg("multifab_name"), + py::arg("dir"), + py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name, e.g., ``\"Efield_aux[x][level=0]\"``, ``\"Efield_cp[x][level=0]\"``, ... + R"doc(Return MultiFabs by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... The physical fields in WarpX have the following naming: diff --git a/Source/Python/pyWarpX.cpp b/Source/Python/pyWarpX.cpp index 26f4c77502d..e128599abd0 100644 --- a/Source/Python/pyWarpX.cpp +++ b/Source/Python/pyWarpX.cpp @@ -32,6 +32,7 @@ // forward declarations of exposed classes void init_BoundaryBufferParIter (py::module&); void init_MultiParticleContainer (py::module&); +void init_MultiFabRegister (py::module&); void init_ParticleBoundaryBuffer (py::module&); void init_PinnedMemoryParticleContainer (py::module&); void init_WarpXParIter (py::module&); @@ -59,6 +60,7 @@ PYBIND11_MODULE(PYWARPX_MODULE_NAME, m) { )pbdoc"; // note: order from parent to child classes + init_MultiFabRegister(m); init_PinnedMemoryParticleContainer(m); init_WarpXParticleContainer(m); init_WarpXParIter(m); diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index 73696838cd4..d5cebd69254 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -14,6 +14,7 @@ #endif #include "Initialization/ExternalField.H" #include "Particles/MultiParticleContainer.H" +#include "Fields.H" #include "Fluids/MultiFluidContainer.H" #include "Fluids/WarpXFluidContainer.H" #include "Utils/TextMsg.H" @@ -139,6 +140,9 @@ WarpX::MoveWindow (const int step, bool move_j) { WARPX_PROFILE("WarpX::MoveWindow"); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (step == start_moving_window_step) { amrex::Print() << Utils::TextMsg::Info("Starting moving window"); } @@ -234,69 +238,73 @@ WarpX::MoveWindow (const int step, bool move_j) if (dim == 1) { Efield_parser = m_p_ext_field_params->Eyfield_parser->compile<3>(); } if (dim == 2) { Efield_parser = m_p_ext_field_params->Ezfield_parser->compile<3>(); } } - shiftMF(*Bfield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); if (fft_do_time_averaging) { + ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params-> E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); } if (pml[lev] && pml[lev]->ok()) { - const std::array& pml_B = pml[lev]->GetB_fp(); - const std::array& pml_E = pml[lev]->GetE_fp(); - shiftMF(*pml_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + amrex::MultiFab* pml_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); + amrex::MultiFab* pml_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); + shiftMF(*pml_B, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_E, geom[lev], num_shift, dir, lev, dont_update_cost); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev] && dim < 2) { - const std::array& pml_rz_B = pml_rz[lev]->GetB_fp(); - const std::array& pml_rz_E = pml_rz[lev]->GetE_fp(); - shiftMF(*pml_rz_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_rz_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + amrex::MultiFab* pml_rz_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); + amrex::MultiFab* pml_rz_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); + shiftMF(*pml_rz_B, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_rz_E, geom[lev], num_shift, dir, lev, dont_update_cost); } #endif if (lev > 0) { // coarse grid - shiftMF(*Bfield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); - shiftMF(*Bfield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); - shiftMF(*Efield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (fft_do_time_averaging) { + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } if (do_pml && pml[lev]->ok()) { - const std::array& pml_B = pml[lev]->GetB_cp(); - const std::array& pml_E = pml[lev]->GetE_cp(); - shiftMF(*pml_B[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); - shiftMF(*pml_E[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + amrex::MultiFab* pml_B_cp = m_fields.get(FieldType::pml_B_cp, Direction{dim}, lev); + amrex::MultiFab* pml_E_cp = m_fields.get(FieldType::pml_E_cp, Direction{dim}, lev); + shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } } // Shift scalar field F with div(E) cleaning in valid domain // TODO: shift F from pml_rz for RZ geometry with PSATD, once implemented - if (F_fp[lev]) + if (m_fields.has(FieldType::F_fp, lev)) { // Fine grid - shiftMF(*F_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0) { // Coarse grid - shiftMF(*F_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } @@ -306,7 +314,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Fine grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_F = pml[lev]->GetF_fp(); + amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_fp, lev); shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost); } if (lev > 0) @@ -314,7 +322,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Coarse grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_F = pml[lev]->GetF_cp(); + amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_cp, lev); shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } @@ -322,14 +330,14 @@ WarpX::MoveWindow (const int step, bool move_j) // Shift scalar field G with div(B) cleaning in valid domain // TODO: shift G from pml_rz for RZ geometry with PSATD, once implemented - if (G_fp[lev]) + if (m_fields.has(FieldType::G_fp, lev)) { // Fine grid - shiftMF(*G_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0) { // Coarse grid - shiftMF(*G_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } @@ -339,7 +347,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Fine grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_G = pml[lev]->GetG_fp(); + amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_fp, lev); shiftMF(*pml_G, geom[lev], num_shift, dir, lev, dont_update_cost); } if (lev > 0) @@ -347,7 +355,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Coarse grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_G = pml[lev]->GetG_cp(); + amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_cp, lev); shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } @@ -355,12 +363,12 @@ WarpX::MoveWindow (const int step, bool move_j) // Shift scalar component rho if (move_j) { - if (rho_fp[lev]){ + if (m_fields.has(FieldType::rho_fp, lev)) { // Fine grid - shiftMF(*rho_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0){ // Coarse grid - shiftMF(*rho_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } } @@ -369,11 +377,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_fluid_species) { const int n_fluid_species = myfl->nSpecies(); for (int i=0; iGetFluidContainer(i); - shiftMF( *fl.N[lev], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][0], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][1], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][2], geom[lev], num_shift, dir, lev, do_update_cost ); + WarpXFluidContainer const& fl = myfl->GetFluidContainer(i); + shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); } } } @@ -449,7 +457,7 @@ WarpX::MoveWindow (const int step, bool move_j) const amrex::Real cur_time = t_new[0]; for (int i=0; iGetFluidContainer(i); - fl.InitData( lev, injection_box, cur_time ); + fl.InitData( m_fields, injection_box, cur_time, lev ); } } @@ -458,9 +466,9 @@ WarpX::MoveWindow (const int step, bool move_j) const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,0).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,1).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,2).ixType().toIntVect() + m_fields.get(FieldType::Efield_fp, Direction{0}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev_zero)->ixType().toIntVect() ); } diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 1de03eb61f0..e35b0cdb313 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -8,6 +8,8 @@ #ifndef WARPX_UTILS_H_ #define WARPX_UTILS_H_ +#include + #include #include #include @@ -53,9 +55,53 @@ void CheckDims (); */ void CheckGriddingForRZSpectral (); -void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, - amrex::Real zmax); - +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] mf Pointer to the MultiFab + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMFinstance ( + amrex::MultiFab *mf, + int lev, + amrex::Real zmin, + amrex::Real zmax +); + +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] multifab_map Multifab registry + * \param[in] nf_name Name of Multifab to modify + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + int lev, + amrex::Real zmin, + amrex::Real zmax +); + +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] multifab_map Multifab registry + * \param[in] nf_name Name of Multifab to modify + * \param[in] dir Direction, for Multifabs that are components of vectors + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + ablastr::fields::Direction dir, + int lev, + amrex::Real zmin, + amrex::Real zmax +); namespace WarpXUtilIO{ /** diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 4556d64684f..856e021abb3 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -14,6 +14,7 @@ #include "WarpXProfilerWrapper.H" #include "WarpXUtil.H" +#include #include #include @@ -221,16 +222,18 @@ void ConvertLabParamsToBoost() } -/* \brief Function that sets the value of MultiFab MF to zero for z between - * zmin and zmax. - */ -void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax){ - WARPX_PROFILE("WarpXUtil::NullifyMF()"); - int const ncomp = mf.nComp(); +void NullifyMFinstance ( + amrex::MultiFab *mf, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + int const ncomp = mf->nComp(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for(amrex::MFIter mfi(mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi){ + for(amrex::MFIter mfi(*mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi){ const amrex::Box& bx = mfi.tilebox(); // Get box lower and upper physical z bound, and dz const amrex::Real zmin_box = WarpX::LowerCorner(bx, lev, 0._rt).z; @@ -246,7 +249,7 @@ void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax) #endif // Check if box intersect with [zmin, zmax] if ( (zmax>zmin_box && zmin<=zmax_box) ){ - const Array4 arr = mf[mfi].array(); + const Array4 arr = (*mf)[mfi].array(); // Set field to 0 between zmin and zmax ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) noexcept{ @@ -266,6 +269,39 @@ void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax) } } +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + WARPX_PROFILE("WarpXUtil::NullifyMF()"); + if (!multifab_map.has(mf_name, lev)) { return; } + + auto * mf = multifab_map.get(mf_name, lev); + + NullifyMFinstance ( mf, lev, zmin, zmax); +} + +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + ablastr::fields::Direction dir, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + WARPX_PROFILE("WarpXUtil::NullifyMF()"); + if (!multifab_map.has(mf_name, dir, lev)) { return; } + + auto * mf = multifab_map.get(mf_name, dir, lev); + + NullifyMFinstance ( mf, lev, zmin, zmax); +} + namespace WarpXUtilIO{ bool WriteBinaryDataOnFile(const std::string& filename, const amrex::Vector& data) { diff --git a/Source/WarpX.H b/Source/WarpX.H index 5065fa73ff9..83b1880f2b1 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -39,7 +39,7 @@ #include "AcceleratorLattice/AcceleratorLattice.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "FieldSolver/ImplicitSolvers/ImplicitSolver.H" #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" @@ -49,6 +49,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/export.H" +#include #include #include @@ -84,7 +85,6 @@ class WARPX_EXPORT WarpX : public amrex::AmrCore { public: - static WarpX& GetInstance (); static void ResetInstance (); @@ -104,9 +104,9 @@ public: WarpX& operator= ( WarpX const & ) = delete; /** Move constructor */ - WarpX ( WarpX && ) = default; + WarpX ( WarpX && ) = delete; /** Move operator */ - WarpX& operator= ( WarpX && ) = default; + WarpX& operator= ( WarpX && ) = delete; static std::string Version (); //!< Version of WarpX executable static std::string PicsarVersion (); //!< Version of PICSAR dependency @@ -132,14 +132,14 @@ public: void SaveParticlesAtImplicitStepStart (); void FinishImplicitParticleUpdate (); void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ); - void UpdateMagneticFieldAndApplyBCs ( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_thetadt ); + void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt ); void ApplyMagneticFieldBCs (); - void FinishMagneticFieldAndApplyBCs ( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_theta ); - void FinishImplicitField ( amrex::Vector, 3 > >& Field_fp, - const amrex::Vector, 3 > >& Field_n, - amrex::Real theta ); + void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta ); + void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp, + const ablastr::fields::MultiLevelVectorField& Field_n, + amrex::Real theta ); void ImplicitComputeRHSE ( amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); @@ -152,7 +152,10 @@ public: [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} #ifdef AMREX_USE_EB - amrex::Vector >& GetDistanceToEB () {return m_distance_to_eb;} + ablastr::fields::MultiLevelScalarField GetDistanceToEB () { + using warpx::fields::FieldType; + return m_fields.get_mr_levels(FieldType::distance_to_eb, finestLevel()); + } #endif ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } @@ -470,93 +473,12 @@ public: const std::string& name, std::optional initial_value); - /** - * \brief - * Allocate the MultiFab so that is like the specified MultiFab (same ba and dm) - * and optionally initialize it. This also adds the MultiFab - * to the map of MultiFabs (used to ease the access to MultiFabs from the Python - * interface - * - * \param mf[out] The MultiFab unique pointer to be allocated - * \param mf_model[in] The MultiFab to model - * \param name[in] The name of the MultiFab to use in the map - * \param initial_value[in] The optional initial value - */ - static void AllocInitMultiFabFromModel ( - std::unique_ptr& mf, - amrex::MultiFab& mf_model, - int level, - const std::string& name, - std::optional initial_value = {}); - // Maps of all of the MultiFabs and iMultiFabs used (this can include MFs from other classes) // This is a convenience for the Python interface, allowing all MultiFabs // to be easily referenced from Python. static std::map multifab_map; static std::map imultifab_map; - /** - * \brief - * Check if a field is initialized. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return true if the field is initialized, false otherwise - */ - [[nodiscard]] bool - isFieldInitialized (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * Get a pointer to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return the pointer to an amrex::MultiFab containing the field data - */ - [[nodiscard]] amrex::MultiFab* - getFieldPointer (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * For vector fields, get an array of three pointers to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * - * \return an array of three pointers amrex::MultiFab* containing the field data - */ - [[nodiscard]] std::array - getFieldPointerArray (warpx::fields::FieldType field_type, int lev) const; - - /** - * \brief - * Get a constant reference to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return a constant refernce to an amrex::MultiFab containing the field data - */ - [[nodiscard]] const amrex::MultiFab& - getField(warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * Get a constant reference to the specified vector field on the different MR levels - * - * \param field_type[in] the field type - * - * \return a vector (which one element per MR level) of arrays of three pointers (for 3 vector components) amrex::MultiFab* containing the field data - */ - [[nodiscard]] const amrex::Vector,3>>& - getMultiLevelField(warpx::fields::FieldType field_type) const; - /** * \brief * Get pointer to the amrex::MultiFab containing the dotMask for the specified field @@ -569,7 +491,7 @@ public: * Set the dotMask container */ void SetDotMask( std::unique_ptr& field_dotMask, - warpx::fields::FieldType field_type, int lev, int dir ) const; + std::string const & field_name, int lev, int dir ) const; [[nodiscard]] bool DoPML () const {return do_pml;} [[nodiscard]] bool DoFluidSpecies () const {return do_fluid_species;} @@ -746,8 +668,8 @@ public: * when FieldBoundaryType is set to damped. Vector version. */ void DampFieldsInGuards (int lev, - const std::array,3>& Efield, - const std::array,3>& Bfield); + const ablastr::fields::VectorField& Efield, + const ablastr::fields::VectorField& Bfield); /** * \brief Private function for spectral solver @@ -756,7 +678,7 @@ public: * can appear in parallel simulations. This will be called * when FieldBoundaryType is set to damped. Scalar version. */ - void DampFieldsInGuards (int lev, std::unique_ptr& mf); + void DampFieldsInGuards (int lev, amrex::MultiFab* mf); #ifdef WARPX_DIM_RZ void ApplyInverseVolumeScalingToCurrentDensity(amrex::MultiFab* Jx, @@ -893,21 +815,16 @@ public: * Then, for each MR level, including level 0, apply filter and sum guard * cells across levels. * - * \param[in,out] J_fp reference to fine-patch current \c MultiFab (all MR levels) - * \param[in,out] J_cp reference to coarse-patch current \c MultiFab (all MR levels) - * \param[in,out] J_buffer reference to buffer current \c MultiFab (all MR levels) + * \param[in] current_fp_string the coarse of fine patch to use for current */ - void SyncCurrent ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer); + void SyncCurrent (const std::string& current_fp_string); void SyncRho (); void SyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer); + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer); [[nodiscard]] amrex::Vector getnsubsteps () const {return nsubsteps;} [[nodiscard]] int getnsubsteps (int lev) const {return nsubsteps[lev];} @@ -988,11 +905,11 @@ public: // these should be private, but can't due to Cuda limitations static void ComputeDivB (amrex::MultiFab& divB, int dcomp, - const std::array& B, + ablastr::fields::VectorField const & B, const std::array& dx); static void ComputeDivB (amrex::MultiFab& divB, int dcomp, - const std::array& B, + ablastr::fields::VectorField const & B, const std::array& dx, amrex::IntVect ngrow); void ComputeDivE(amrex::MultiFab& divE, int lev); @@ -1022,14 +939,14 @@ public: MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; void ComputeMagnetostaticField (); void AddMagnetostaticFieldLabFrame (); - void computeVectorPotential (const amrex::Vector, 3> >& curr, - amrex::Vector, 3> >& A, + void computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, + ablastr::fields::MultiLevelVectorField const& A, amrex::Real required_precision=amrex::Real(1.e-11), amrex::Real absolute_tolerance=amrex::Real(0.0), int max_iters=200, - int verbosity=2) const; + int verbosity=2); // const; - void setVectorPotentialBC (amrex::Vector, 3> >& A) const; + void setVectorPotentialBC (ablastr::fields::MultiLevelVectorField const& A) const; /** * \brief @@ -1057,8 +974,8 @@ public: amrex::ParserExecutor<3> const& xfield_parser, amrex::ParserExecutor<3> const& yfield_parser, amrex::ParserExecutor<3> const& zfield_parser, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, [[maybe_unused]] char field, int lev, PatchType patch_type); @@ -1138,24 +1055,24 @@ public: * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. * An edge of length 0 is fully covered. */ - static void ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& edge_lengths, + static void ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, const amrex::EBFArrayBoxFactory& eb_fact); /** * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. * An edge of area 0 is fully covered. */ - static void ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face_areas, + static void ComputeFaceAreas (ablastr::fields::VectorField& face_areas, const amrex::EBFArrayBoxFactory& eb_fact); /** * \brief Scale the edges lengths by the mesh width to obtain the real lengths. */ - static void ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengths, + static void ScaleEdges (ablastr::fields::VectorField& edge_lengths, const std::array& cell_size); /** * \brief Scale the edges areas by the mesh width to obtain the real areas. */ - static void ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas, + static void ScaleAreas (ablastr::fields::VectorField& face_areas, const std::array& cell_size); /** * \brief Initialize information for cell extensions. @@ -1225,6 +1142,9 @@ public: FiniteDifferenceSolver * get_pointer_fdtd_solver_fp (int lev) { return m_fdtd_solver_fp[lev].get(); } + // Field container + ablastr::fields::MultiFabRegister m_fields; + protected: /** @@ -1324,53 +1244,50 @@ private: void OneStep_multiJ (amrex::Real cur_time); void RestrictCurrentFromFineToCoarsePatch ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, int lev); void AddCurrentFromFineLevelandSumBoundary ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, + const ablastr::fields::MultiLevelVectorField& J_buffer, int lev); void StoreCurrent (int lev); void RestoreCurrent (int lev); void ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, int idim); void ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev); void SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, int idim, const amrex::Periodicity& period); void SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, const amrex::Periodicity& period); void NodalSyncJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, int lev, PatchType patch_type); - void RestrictRhoFromFineToCoarsePatch ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - int lev); + void RestrictRhoFromFineToCoarsePatch (int lev ); void ApplyFilterandSumBoundaryRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, int lev, PatchType patch_type, int icomp, int ncomp); void AddRhoFromFineLevelandSumBoundary ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer, int lev, int icomp, int ncomp); @@ -1507,22 +1424,6 @@ private: // Fields: First array for level, second for direction // - // Full solution - amrex::Vector, 3 > > Efield_aux; - amrex::Vector, 3 > > Bfield_aux; - - // Fine patch - amrex::Vector< std::unique_ptr > F_fp; - amrex::Vector< std::unique_ptr > G_fp; - amrex::Vector< std::unique_ptr > rho_fp; - amrex::Vector< std::unique_ptr > phi_fp; - amrex::Vector, 3 > > current_fp; - amrex::Vector, 3 > > current_fp_vay; - amrex::Vector, 3 > > Efield_fp; - amrex::Vector, 3 > > Bfield_fp; - amrex::Vector, 3 > > Efield_avg_fp; - amrex::Vector, 3 > > Bfield_avg_fp; - // Masks for computing dot product and global moments of fields when using grids that // have shared locations across different ranks (e.g., a Yee grid) mutable amrex::Vector,3 > > Efield_dotMask; @@ -1530,23 +1431,6 @@ private: mutable amrex::Vector,3 > > Afield_dotMask; mutable amrex::Vector< std::unique_ptr > phi_dotMask; - // Memory buffers for computing magnetostatic fields - // Vector Potential A and previous step. Time buffer needed for computing dA/dt to first order - amrex::Vector, 3 > > vector_potential_fp_nodal; - amrex::Vector, 3 > > vector_potential_grad_buf_e_stag; - amrex::Vector, 3 > > vector_potential_grad_buf_b_stag; - - // Same as Bfield_fp/Efield_fp for reading external field data - amrex::Vector, 3 > > Efield_fp_external; - amrex::Vector, 3 > > Bfield_fp_external; - amrex::Vector, 3 > > E_external_particle_field; - amrex::Vector, 3 > > B_external_particle_field; - - //! EB: Lengths of the mesh edges - amrex::Vector, 3 > > m_edge_lengths; - //! EB: Areas of the mesh faces - amrex::Vector, 3 > > m_face_areas; - /** EB: for every mesh face flag_info_face contains a: * * 0 if the face needs to be extended * * 1 if the face is large enough to lend area to other faces @@ -1561,71 +1445,16 @@ private: * and in WarpX::ComputeEightWaysExtensions * This is only used for the ECT solver.*/ amrex::Vector, 3 > > m_flag_ext_face; - /** EB: m_area_mod contains the modified areas of the mesh faces, i.e. if a face is enlarged it - * contains the area of the enlarged face - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > m_area_mod; + /** EB: m_borrowing contains the info about the enlarged cells, i.e. for every enlarged cell it * contains the info of which neighbors are being intruded (and the amount of borrowed area). * This is only used for the ECT solver.*/ amrex::Vector >, 3 > > m_borrowing; - /** ECTRhofield is needed only by the ect - * solver and it contains the electromotive force density for every mesh face. - * The name ECTRhofield has been used to comply with the notation of the paper - * https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4463918 (page 9, equation 4 - * and below). - * Although it's called rho it has nothing to do with the charge density! - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > ECTRhofield; - /** Venl contains the electromotive force for every mesh face, i.e. every entry is - * the corresponding entry in ECTRhofield multiplied by the total area (possibly with enlargement) - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > Venl; - - //EB level set - amrex::Vector > m_distance_to_eb; - - // store fine patch - amrex::Vector, 3 > > current_store; - - // Nodal MultiFab for nodal current deposition if warpx.do_current_centering = 1 - amrex::Vector,3>> current_fp_nodal; - - // Coarse patch - amrex::Vector< std::unique_ptr > F_cp; - amrex::Vector< std::unique_ptr > G_cp; - amrex::Vector< std::unique_ptr > rho_cp; - amrex::Vector, 3 > > current_cp; - amrex::Vector, 3 > > Efield_cp; - amrex::Vector, 3 > > Bfield_cp; - amrex::Vector, 3 > > Efield_avg_cp; - amrex::Vector, 3 > > Bfield_avg_cp; - // Copy of the coarse aux - amrex::Vector, 3 > > Efield_cax; - amrex::Vector, 3 > > Bfield_cax; amrex::Vector > current_buffer_masks; amrex::Vector > gather_buffer_masks; - // If charge/current deposition buffers are used - amrex::Vector, 3 > > current_buf; - amrex::Vector > charge_buf; - - /** - * \brief - * Get a pointer to the field data. Does not check if the pointer - * is not nullptr. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return the pointer to an amrex::MultiFab containing the field data - */ - [[nodiscard]] amrex::MultiFab* - getFieldPointerUnchecked (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - // PML int do_pml = 0; int do_silver_mueller = 0; @@ -1744,17 +1573,11 @@ private: guardCellManager guard_cells; - //Slice Parameters + // Slice Parameters int slice_max_grid_size; int slice_plot_int = -1; amrex::RealBox slice_realbox; amrex::IntVect slice_cr_ratio; - amrex::Vector< std::unique_ptr > F_slice; - amrex::Vector< std::unique_ptr > G_slice; - amrex::Vector< std::unique_ptr > rho_slice; - amrex::Vector, 3 > > current_slice; - amrex::Vector, 3 > > Efield_slice; - amrex::Vector, 3 > > Bfield_slice; bool fft_periodic_single_box = false; int nox_fft = 16; @@ -1824,40 +1647,14 @@ private: /** * \brief Forward FFT of E,B on all mesh refinement levels - * - * \param E_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch electric field to be transformed - * \param B_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch magnetic field to be transformed - * \param E_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch electric field to be transformed - * \param B_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch magnetic field to be transformed - */ - void PSATDForwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp); + */ + void PSATDForwardTransformEB (); /** * \brief Backward FFT of E,B on all mesh refinement levels, * with field damping in the guard cells (if needed) - * - * \param E_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch electric field to be transformed - * \param B_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch magnetic field to be transformed - * \param E_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch electric field to be transformed - * \param B_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch magnetic field to be transformed - */ - void PSATDBackwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp); + */ + void PSATDBackwardTransformEB (); /** * \brief Backward FFT of averaged E,B on all mesh refinement levels @@ -1872,10 +1669,10 @@ private: * storing the coarse patch averaged magnetic field to be transformed */ void PSATDBackwardTransformEBavg ( - const amrex::Vector,3>>& E_avg_fp, - const amrex::Vector,3>>& B_avg_fp, - const amrex::Vector,3>>& E_avg_cp, - const amrex::Vector,3>>& B_avg_cp); + ablastr::fields::MultiLevelVectorField const& E_avg_fp, + ablastr::fields::MultiLevelVectorField const& B_avg_fp, + ablastr::fields::MultiLevelVectorField const& E_avg_cp, + ablastr::fields::MultiLevelVectorField const& B_avg_cp); /** * \brief Forward FFT of J on all mesh refinement levels, @@ -1889,8 +1686,8 @@ private: * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + std::string const & J_fp_string, + std::string const & J_cp_string, bool apply_kspace_filter=true); /** @@ -1902,8 +1699,8 @@ private: * storing the coarse patch current to be transformed */ void PSATDBackwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp); + std::string const & J_fp_string, + std::string const & J_cp_string); /** * \brief Forward FFT of rho on all mesh refinement levels, @@ -1917,8 +1714,8 @@ private: * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + std::string const & charge_fp_string, + std::string const & charge_cp_string, int icomp, int dcomp, bool apply_kspace_filter=true); /** diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index e9c518e8f61..60374133a52 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -92,7 +92,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; int WarpX::do_moving_window = 0; int WarpX::start_moving_window_step = 0; @@ -294,67 +294,17 @@ WarpX::WarpX () // Fluid Container if (do_fluid_species) { - myfl = std::make_unique(nlevs_max); + myfl = std::make_unique(); } - Efield_aux.resize(nlevs_max); - Bfield_aux.resize(nlevs_max); - - F_fp.resize(nlevs_max); - G_fp.resize(nlevs_max); - rho_fp.resize(nlevs_max); - phi_fp.resize(nlevs_max); - current_fp.resize(nlevs_max); - Efield_fp.resize(nlevs_max); - Bfield_fp.resize(nlevs_max); - Efield_dotMask.resize(nlevs_max); Bfield_dotMask.resize(nlevs_max); Afield_dotMask.resize(nlevs_max); phi_dotMask.resize(nlevs_max); - // Only allocate vector potential arrays when using the Magnetostatic Solver - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - vector_potential_fp_nodal.resize(nlevs_max); - vector_potential_grad_buf_e_stag.resize(nlevs_max); - vector_potential_grad_buf_b_stag.resize(nlevs_max); - } - - if (fft_do_time_averaging) - { - Efield_avg_fp.resize(nlevs_max); - Bfield_avg_fp.resize(nlevs_max); - } - - // Same as Bfield_fp/Efield_fp for reading external field data - Bfield_fp_external.resize(nlevs_max); - Efield_fp_external.resize(nlevs_max); - B_external_particle_field.resize(1); - E_external_particle_field.resize(1); - - m_edge_lengths.resize(nlevs_max); - m_face_areas.resize(nlevs_max); - m_distance_to_eb.resize(nlevs_max); m_flag_info_face.resize(nlevs_max); m_flag_ext_face.resize(nlevs_max); m_borrowing.resize(nlevs_max); - m_area_mod.resize(nlevs_max); - - ECTRhofield.resize(nlevs_max); - Venl.resize(nlevs_max); - - current_store.resize(nlevs_max); - - if (do_current_centering) - { - current_fp_nodal.resize(nlevs_max); - } - - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - current_fp_vay.resize(nlevs_max); - } // Create Electrostatic Solver object if needed if ((WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) @@ -370,28 +320,11 @@ WarpX::WarpX () if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { // Create hybrid-PIC model object if needed - m_hybrid_pic_model = std::make_unique(nlevs_max); - } - - F_cp.resize(nlevs_max); - G_cp.resize(nlevs_max); - rho_cp.resize(nlevs_max); - current_cp.resize(nlevs_max); - Efield_cp.resize(nlevs_max); - Bfield_cp.resize(nlevs_max); - - if (fft_do_time_averaging) - { - Efield_avg_cp.resize(nlevs_max); - Bfield_avg_cp.resize(nlevs_max); + m_hybrid_pic_model = std::make_unique(); } - Efield_cax.resize(nlevs_max); - Bfield_cax.resize(nlevs_max); current_buffer_masks.resize(nlevs_max); gather_buffer_masks.resize(nlevs_max); - current_buf.resize(nlevs_max); - charge_buf.resize(nlevs_max); pml.resize(nlevs_max); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) @@ -2055,64 +1988,14 @@ WarpX::MakeNewLevelFromCoarse (int /*lev*/, amrex::Real /*time*/, const amrex::B void WarpX::ClearLevel (int lev) { - for (int i = 0; i < 3; ++i) { - Efield_aux[lev][i].reset(); - Bfield_aux[lev][i].reset(); - - current_fp[lev][i].reset(); - Efield_fp [lev][i].reset(); - Bfield_fp [lev][i].reset(); + m_fields.clear_level(lev); + for (int i = 0; i < 3; ++i) { Efield_dotMask [lev][i].reset(); Bfield_dotMask [lev][i].reset(); Afield_dotMask [lev][i].reset(); - - current_store[lev][i].reset(); - - if (do_current_centering) - { - current_fp_nodal[lev][i].reset(); - } - - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - current_fp_vay[lev][i].reset(); - } - - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - vector_potential_fp_nodal[lev][i].reset(); - vector_potential_grad_buf_e_stag[lev][i].reset(); - vector_potential_grad_buf_b_stag[lev][i].reset(); - } - - current_cp[lev][i].reset(); - Efield_cp [lev][i].reset(); - Bfield_cp [lev][i].reset(); - - Efield_cax[lev][i].reset(); - Bfield_cax[lev][i].reset(); - current_buf[lev][i].reset(); } - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) - { - m_hybrid_pic_model->ClearLevel(lev); - } - - charge_buf[lev].reset(); - - current_buffer_masks[lev].reset(); - gather_buffer_masks[lev].reset(); - - F_fp [lev].reset(); - G_fp [lev].reset(); - rho_fp[lev].reset(); - phi_fp[lev].reset(); - F_cp [lev].reset(); - G_cp [lev].reset(); - rho_cp[lev].reset(); - phi_dotMask[lev].reset(); #ifdef WARPX_USE_FFT @@ -2208,6 +2091,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm const IntVect& ngEB, IntVect& ngJ, const IntVect& ngRho, const IntVect& ngF, const IntVect& ngG, const bool aux_is_nodal) { + using ablastr::fields::Direction; + // Declare nodal flags IntVect Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag; IntVect Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag; @@ -2309,61 +2194,55 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // const std::array dx = CellSize(lev); - AllocInitMultiFab(Bfield_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[x]", 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[y]", 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(current_fp[lev][0], amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[x]", 0.0_rt); - AllocInitMultiFab(current_fp[lev][1], amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[y]", 0.0_rt); - AllocInitMultiFab(current_fp[lev][2], amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{1}, lev, amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{2}, lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); if (do_current_centering) { amrex::BoxArray const& nodal_ba = amrex::convert(ba, amrex::IntVect::TheNodeVector()); - AllocInitMultiFab(current_fp_nodal[lev][0], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[x]", 0.0_rt); - AllocInitMultiFab(current_fp_nodal[lev][1], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[y]", 0.0_rt); - AllocInitMultiFab(current_fp_nodal[lev][2], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{0}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{1}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{2}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); } if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - AllocInitMultiFab(current_fp_vay[lev][0], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[x]", 0.0_rt); - AllocInitMultiFab(current_fp_vay[lev][1], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[y]", 0.0_rt); - AllocInitMultiFab(current_fp_vay[lev][2], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{0}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{1}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{2}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { - AllocInitMultiFab(vector_potential_fp_nodal[lev][0], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_fp_nodal[lev][1], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_fp_nodal[lev][2], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[z]", 0.0_rt); - - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][0], amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][1], amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][2], amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[z]", 0.0_rt); - - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][0], amrex::convert(ba, Bx_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][1], amrex::convert(ba, By_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][2], amrex::convert(ba, Bz_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[z]", 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{0}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{1}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{2}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // Memory buffers for computing magnetostatic fields + // Vector Potential A and previous step. Time buffer needed for computing dA/dt to first order + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // Allocate extra multifabs needed by the kinetic-fluid hybrid algorithm. if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { m_hybrid_pic_model->AllocateLevelMFs( + m_fields, lev, ba, dm, ncomps, ngJ, ngRho, jx_nodal_flag, jy_nodal_flag, jz_nodal_flag, rho_nodal_flag ); @@ -2371,10 +2250,10 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // Allocate extra multifabs needed for fluids if (do_fluid_species) { - myfl->AllocateLevelMFs(lev, ba, dm); + myfl->AllocateLevelMFs(m_fields, ba, dm, lev); auto & warpx = GetInstance(); const amrex::Real cur_time = warpx.gett_new(lev); - myfl->InitData(lev, geom[lev].Domain(),cur_time); + myfl->InitData(m_fields, geom[lev].Domain(), cur_time, lev); } // Allocate extra multifabs for macroscopic properties of the medium @@ -2386,50 +2265,41 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (fft_do_time_averaging) { - AllocInitMultiFab(Bfield_avg_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[x]", 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[y]", 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } if (EB::enabled()) { constexpr int nc_ls = 1; amrex::IntVect const ng_ls(2); - AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, - "m_distance_to_eb"); + //EB level set + m_fields.alloc_init(FieldType::distance_to_eb, lev, amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, 0.0_rt); // EB info are needed only at the finest level if (lev == maxLevel()) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + //! EB: Lengths of the mesh edges + m_fields.alloc_init(FieldType::edge_lengths, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::edge_lengths, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::edge_lengths, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + + //! EB: Areas of the mesh faces + m_fields.alloc_init(FieldType::face_areas, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::face_areas, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::face_areas, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, @@ -2442,31 +2312,47 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); - AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); - AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); - AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); + + /** EB: area_mod contains the modified areas of the mesh faces, i.e. if a face is enlarged it + * contains the area of the enlarged face + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::area_mod, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::area_mod, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::area_mod, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_borrowing[lev][0] = std::make_unique>( amrex::convert(ba, Bx_nodal_flag), dm); m_borrowing[lev][1] = std::make_unique>( amrex::convert(ba, By_nodal_flag), dm); m_borrowing[lev][2] = std::make_unique>( amrex::convert(ba, Bz_nodal_flag), dm); - AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[x]"); - AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[y]"); - AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[z]"); - - AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + + /** Venl contains the electromotive force for every mesh face, i.e. every entry is + * the corresponding entry in ECTRhofield multiplied by the total area (possibly with enlargement) + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::Venl, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::Venl, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::Venl, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + + /** ECTRhofield is needed only by the ect + * solver and it contains the electromotive force density for every mesh face. + * The name ECTRhofield has been used to comply with the notation of the paper + * https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4463918 (page 9, equation 4 + * and below). + * Although it's called rho it has nothing to do with the charge density! + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::ECTRhofield, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::ECTRhofield, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::ECTRhofield, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); } } } @@ -2488,31 +2374,38 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (rho_ncomps > 0) { - AllocInitMultiFab(rho_fp[lev], amrex::convert(ba, rho_nodal_flag), dm, rho_ncomps, ngRho, lev, "rho_fp", 0.0_rt); + m_fields.alloc_init(FieldType::rho_fp, + lev, amrex::convert(ba, rho_nodal_flag), dm, + rho_ncomps, ngRho, 0.0_rt); } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { const IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); - AllocInitMultiFab(phi_fp[lev], amrex::convert(ba, phi_nodal_flag), dm, ncomps, ngPhi, lev, "phi_fp", 0.0_rt); + m_fields.alloc_init(FieldType::phi_fp, lev, amrex::convert(ba, phi_nodal_flag), dm, + ncomps, ngPhi, 0.0_rt ); } if (do_subcycling && lev == 0) { - AllocInitMultiFab(current_store[lev][0], amrex::convert(ba,jx_nodal_flag),dm,ncomps,ngJ,lev, "current_store[x]"); - AllocInitMultiFab(current_store[lev][1], amrex::convert(ba,jy_nodal_flag),dm,ncomps,ngJ,lev, "current_store[y]"); - AllocInitMultiFab(current_store[lev][2], amrex::convert(ba,jz_nodal_flag),dm,ncomps,ngJ,lev, "current_store[z]"); + m_fields.alloc_init(FieldType::current_store, Direction{0}, lev, amrex::convert(ba,jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_store, Direction{1}, lev, amrex::convert(ba,jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_store, Direction{2}, lev, amrex::convert(ba,jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); } if (do_dive_cleaning) { - AllocInitMultiFab(F_fp[lev], amrex::convert(ba, F_nodal_flag), dm, ncomps, ngF, lev, "F_fp", 0.0_rt); + m_fields.alloc_init(FieldType::F_fp, + lev, amrex::convert(ba, F_nodal_flag), dm, + ncomps, ngF, 0.0_rt); } if (do_divb_cleaning) { - AllocInitMultiFab(G_fp[lev], amrex::convert(ba, G_nodal_flag), dm, ncomps, ngG, lev, "G_fp", 0.0_rt); + m_fields.alloc_init(FieldType::G_fp, + lev, amrex::convert(ba, G_nodal_flag), dm, + ncomps, ngG, 0.0_rt); } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) @@ -2581,90 +2474,103 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // Create aux multifabs on Nodal Box Array BoxArray const nba = amrex::convert(ba,IntVect::TheNodeVector()); - AllocInitMultiFab(Bfield_aux[lev][0], nba, dm, ncomps, ngEB, lev, "Bfield_aux[x]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][1], nba, dm, ncomps, ngEB, lev, "Bfield_aux[y]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][2], nba, dm, ncomps, ngEB, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, nba, dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][0], nba, dm, ncomps, ngEB, lev, "Efield_aux[x]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][1], nba, dm, ncomps, ngEB, lev, "Efield_aux[y]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][2], nba, dm, ncomps, ngEB, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, nba, dm, ncomps, ngEB, 0.0_rt); } else if (lev == 0) { if (WarpX::fft_do_time_averaging) { - AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_avg_fp[lev][0], 0, ncomps, lev, "Bfield_aux[x]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_avg_fp[lev][1], 0, ncomps, lev, "Bfield_aux[y]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_avg_fp[lev][2], 0, ncomps, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{2}, lev, 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][0], *Efield_avg_fp[lev][0], 0, ncomps, lev, "Efield_aux[x]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][1], *Efield_avg_fp[lev][1], 0, ncomps, lev, "Efield_aux[y]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][2], *Efield_avg_fp[lev][2], 0, ncomps, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{2}, lev, 0.0_rt); } else { if (mypc->m_B_ext_particle_s == "read_from_file") { - AllocInitMultiFab(Bfield_aux[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[x]"); - AllocInitMultiFab(Bfield_aux[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[y]"); - AllocInitMultiFab(Bfield_aux[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[z]"); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } else { // In this case, the aux grid is simply an alias of the fp grid (most common case in WarpX) - AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_fp[lev][0], 0, ncomps, lev, "Bfield_aux[x]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_fp[lev][1], 0, ncomps, lev, "Bfield_aux[y]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_fp[lev][2], 0, ncomps, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{2}, lev, 0.0_rt); } if (mypc->m_E_ext_particle_s == "read_from_file") { - AllocInitMultiFab(Efield_aux[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[x]"); - AllocInitMultiFab(Efield_aux[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[y]"); - AllocInitMultiFab(Efield_aux[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[z]"); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } else { // In this case, the aux grid is simply an alias of the fp grid (most common case in WarpX) - AliasInitMultiFab(Efield_aux[lev][0], *Efield_fp[lev][0], 0, ncomps, lev, "Efield_aux[x]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][1], *Efield_fp[lev][1], 0, ncomps, lev, "Efield_aux[y]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][2], *Efield_fp[lev][2], 0, ncomps, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{2}, lev, 0.0_rt); } } } else { - AllocInitMultiFab(Bfield_aux[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[x]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[y]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[x]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[y]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // The external fields that are read from file if (m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::default_zero && m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::constant) { // These fields will be added directly to the grid, i.e. to fp, and need to match the index type - AllocInitMultiFab(Bfield_fp_external[lev][0], amrex::convert(ba, Bfield_fp[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[x]", 0.0_rt); - AllocInitMultiFab(Bfield_fp_external[lev][1], amrex::convert(ba, Bfield_fp[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[y]", 0.0_rt); - AllocInitMultiFab(Bfield_fp_external[lev][2], amrex::convert(ba, Bfield_fp[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{0}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{0},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{1}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{1},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{2}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{2},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (mypc->m_B_ext_particle_s == "read_from_file") { // These fields will be added to the fields that the particles see, and need to match the index type - AllocInitMultiFab(B_external_particle_field[lev][0], amrex::convert(ba, Bfield_aux[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[x]", 0.0_rt); - AllocInitMultiFab(B_external_particle_field[lev][1], amrex::convert(ba, Bfield_aux[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[y]", 0.0_rt); - AllocInitMultiFab(B_external_particle_field[lev][2], amrex::convert(ba, Bfield_aux[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[z]", 0.0_rt); + auto *Bfield_aux_levl_0 = m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + auto *Bfield_aux_levl_1 = m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + auto *Bfield_aux_levl_2 = m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); + + // Same as Bfield_fp for reading external field data + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{0}, lev, amrex::convert(ba, Bfield_aux_levl_0->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{1}, lev, amrex::convert(ba, Bfield_aux_levl_1->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{2}, lev, amrex::convert(ba, Bfield_aux_levl_2->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::default_zero && m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::constant) { // These fields will be added directly to the grid, i.e. to fp, and need to match the index type - AllocInitMultiFab(Efield_fp_external[lev][0], amrex::convert(ba, Efield_fp[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[x]", 0.0_rt); - AllocInitMultiFab(Efield_fp_external[lev][1], amrex::convert(ba, Efield_fp[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[y]", 0.0_rt); - AllocInitMultiFab(Efield_fp_external[lev][2], amrex::convert(ba, Efield_fp[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{0}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{1}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{2}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (mypc->m_E_ext_particle_s == "read_from_file") { // These fields will be added to the fields that the particles see, and need to match the index type - AllocInitMultiFab(E_external_particle_field[lev][0], amrex::convert(ba, Efield_aux[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[x]", 0.0_rt); - AllocInitMultiFab(E_external_particle_field[lev][1], amrex::convert(ba, Efield_aux[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[y]", 0.0_rt); - AllocInitMultiFab(E_external_particle_field[lev][2], amrex::convert(ba, Efield_aux[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[z]", 0.0_rt); + auto *Efield_aux_levl_0 = m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + auto *Efield_aux_levl_1 = m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + auto *Efield_aux_levl_2 = m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + + // Same as Efield_fp for reading external field data + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{0}, lev, amrex::convert(ba, Efield_aux_levl_0->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{1}, lev, amrex::convert(ba, Efield_aux_levl_1->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{2}, lev, amrex::convert(ba, Efield_aux_levl_2->ixType()), + dm, ncomps, ngEB, 0.0_rt); } // @@ -2677,49 +2583,57 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm const std::array cdx = CellSize(lev-1); // Create the MultiFabs for B - AllocInitMultiFab(Bfield_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); // Create the MultiFabs for E - AllocInitMultiFab(Efield_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[x]", 0.0_rt); - AllocInitMultiFab(Efield_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[y]", 0.0_rt); - AllocInitMultiFab(Efield_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{0}, lev, amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{1}, lev, amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{2}, lev, amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); if (fft_do_time_averaging) { - AllocInitMultiFab(Bfield_avg_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[x]", 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[y]", 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{0}, lev, amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{1}, lev, amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{2}, lev, amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // Create the MultiFabs for the current - AllocInitMultiFab(current_cp[lev][0], amrex::convert(cba, jx_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[x]", 0.0_rt); - AllocInitMultiFab(current_cp[lev][1], amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[y]", 0.0_rt); - AllocInitMultiFab(current_cp[lev][2], amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{0}, lev, amrex::convert(cba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{1}, lev, amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{2}, lev, amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); if (rho_ncomps > 0) { - AllocInitMultiFab(rho_cp[lev], amrex::convert(cba, rho_nodal_flag), dm, rho_ncomps, ngRho, lev, "rho_cp", 0.0_rt); + m_fields.alloc_init(FieldType::rho_cp, + lev, amrex::convert(cba, rho_nodal_flag), dm, + rho_ncomps, ngRho, 0.0_rt); } if (do_dive_cleaning) { - AllocInitMultiFab(F_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngF, lev, "F_cp", 0.0_rt); + m_fields.alloc_init(FieldType::F_cp, + lev, amrex::convert(cba, IntVect::TheUnitVector()), dm, + ncomps, ngF, 0.0_rt); } if (do_divb_cleaning) { if (grid_type == GridType::Collocated) { - AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngG, lev, "G_cp", 0.0_rt); + m_fields.alloc_init(FieldType::G_cp, + lev, amrex::convert(cba, IntVect::TheUnitVector()), dm, + ncomps, ngG, 0.0_rt); } else // grid_type=staggered or grid_type=hybrid { - AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheZeroVector()), dm, ncomps, ngG, lev, "G_cp", 0.0_rt); + m_fields.alloc_init(FieldType::G_cp, + lev, amrex::convert(cba, IntVect::TheZeroVector()), dm, + ncomps, ngG, 0.0_rt); } } @@ -2777,22 +2691,25 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (n_field_gather_buffer > 0 || mypc->nSpeciesGatherFromMainGrid() > 0) { if (aux_is_nodal) { BoxArray const& cnba = amrex::convert(cba,IntVect::TheNodeVector()); - AllocInitMultiFab(Bfield_cax[lev][0], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][1], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][2], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[z]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][0], cnba,dm,ncomps,ngEB,lev, "Efield_cax[x]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][1], cnba,dm,ncomps,ngEB,lev, "Efield_cax[y]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][2], cnba,dm,ncomps,ngEB,lev, "Efield_cax[z]", 0.0_rt); + // Create the MultiFabs for B + m_fields.alloc_init(FieldType::Bfield_cax, Direction{0}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{1}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{2}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + + // Create the MultiFabs for E + m_fields.alloc_init(FieldType::Efield_cax, Direction{0}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{1}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{2}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); } else { // Create the MultiFabs for B - AllocInitMultiFab(Bfield_cax[lev][0], amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][1], amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][2], amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); // Create the MultiFabs for E - AllocInitMultiFab(Efield_cax[lev][0], amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[x]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][1], amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[y]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][2], amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{0}, lev, amrex::convert(cba,Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{1}, lev, amrex::convert(cba,Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{2}, lev, amrex::convert(cba,Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } AllocInitMultiFab(gather_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), lev, "gather_buffer_masks"); @@ -2801,11 +2718,11 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (n_current_deposition_buffer > 0) { - AllocInitMultiFab(current_buf[lev][0], amrex::convert(cba,jx_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[x]"); - AllocInitMultiFab(current_buf[lev][1], amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[y]"); - AllocInitMultiFab(current_buf[lev][2], amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[z]"); - if (rho_cp[lev]) { - AllocInitMultiFab(charge_buf[lev], amrex::convert(cba,rho_nodal_flag),dm,2*ncomps,ngRho,lev, "charge_buf"); + m_fields.alloc_init(FieldType::current_buf, Direction{0}, lev, amrex::convert(cba,jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_buf, Direction{1}, lev, amrex::convert(cba,jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_buf, Direction{2}, lev, amrex::convert(cba,jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + if (m_fields.has(FieldType::rho_cp, lev)) { + m_fields.alloc_init(FieldType::rho_buf, lev, amrex::convert(cba,rho_nodal_flag), dm, 2*ncomps, ngRho, 0.0_rt); } AllocInitMultiFab(current_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), lev, "current_buffer_masks"); // Current buffer masks have 1 ghost cell, because of the fact @@ -3008,7 +2925,7 @@ WarpX::RefRatio (int lev) void WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, - const std::array& B, + ablastr::fields::VectorField const& B, const std::array& dx) { ComputeDivB(divB, dcomp, B, dx, IntVect::TheZeroVector()); @@ -3016,7 +2933,7 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, void WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, - const std::array& B, + ablastr::fields::VectorField const& B, const std::array& dx, IntVect const ngrow) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(grid_type != GridType::Collocated, @@ -3056,13 +2973,15 @@ WarpX::ComputeDivE(amrex::MultiFab& divE, const int lev) { if ( WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD ) { #ifdef WARPX_USE_FFT - spectral_solver_fp[lev]->ComputeSpectralDivE( lev, Efield_aux[lev], divE ); + const ablastr::fields::VectorField Efield_aux_lev = m_fields.get_alldirs(FieldType::Efield_aux, lev); + spectral_solver_fp[lev]->ComputeSpectralDivE(lev, Efield_aux_lev, divE); #else WARPX_ABORT_WITH_MESSAGE( "ComputeDivE: PSATD requested but not compiled"); #endif } else { - m_fdtd_solver_fp[lev]->ComputeDivE( Efield_aux[lev], divE ); + const ablastr::fields::VectorField Efield_aux_lev = m_fields.get_alldirs(FieldType::Efield_aux, lev); + m_fdtd_solver_fp[lev]->ComputeDivE(Efield_aux_lev, divE); } } @@ -3337,10 +3256,12 @@ WarpX::GatherBufferMasks (int lev) void WarpX::StoreCurrent (int lev) { + using ablastr::fields::Direction; for (int idim = 0; idim < 3; ++idim) { - if (current_store[lev][idim]) { - MultiFab::Copy(*current_store[lev][idim], *current_fp[lev][idim], - 0, 0, 1, current_store[lev][idim]->nGrowVect()); + if (m_fields.has(FieldType::current_store, Direction{idim},lev)) { + MultiFab::Copy(*m_fields.get(FieldType::current_store, Direction{idim}, lev), + *m_fields.get(FieldType::current_fp, Direction{idim}, lev), + 0, 0, 1, m_fields.get(FieldType::current_store, Direction{idim}, lev)->nGrowVect()); } } } @@ -3348,9 +3269,15 @@ WarpX::StoreCurrent (int lev) void WarpX::RestoreCurrent (int lev) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + for (int idim = 0; idim < 3; ++idim) { - if (current_store[lev][idim]) { - std::swap(current_fp[lev][idim], current_store[lev][idim]); + if (m_fields.has(FieldType::current_store, Direction{idim}, lev)) { + std::swap( + *m_fields.get(FieldType::current_fp, Direction{idim}, lev), + *m_fields.get(FieldType::current_store, Direction{idim}, lev) + ); } } } @@ -3441,155 +3368,6 @@ WarpX::AliasInitMultiFab ( multifab_map[name_with_suffix] = mf.get(); } -void -WarpX::AllocInitMultiFabFromModel ( - std::unique_ptr& mf, - amrex::MultiFab& mf_model, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - const auto tag = amrex::MFInfo().SetTag(name_with_suffix); - mf = std::make_unique(mf_model.boxArray(), mf_model.DistributionMap(), - mf_model.nComp(), mf_model.nGrowVect(), tag); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - -amrex::MultiFab* -WarpX::getFieldPointerUnchecked (const FieldType field_type, const int lev, const int direction) const -{ - // This function does *not* check if the returned field pointer is != nullptr - - amrex::MultiFab* field_pointer = nullptr; - - switch(field_type) - { - case FieldType::Efield_aux : - field_pointer = Efield_aux[lev][direction].get(); - break; - case FieldType::Bfield_aux : - field_pointer = Bfield_aux[lev][direction].get(); - break; - case FieldType::Efield_fp : - field_pointer = Efield_fp[lev][direction].get(); - break; - case FieldType::Bfield_fp : - field_pointer = Bfield_fp[lev][direction].get(); - break; - case FieldType::Efield_fp_external : - field_pointer = Efield_fp_external[lev][direction].get(); - break; - case FieldType::Bfield_fp_external : - field_pointer = Bfield_fp_external[lev][direction].get(); - break; - case FieldType::current_fp : - field_pointer = current_fp[lev][direction].get(); - break; - case FieldType::current_fp_nodal : - field_pointer = current_fp_nodal[lev][direction].get(); - break; - case FieldType::rho_fp : - field_pointer = rho_fp[lev].get(); - break; - case FieldType::F_fp : - field_pointer = F_fp[lev].get(); - break; - case FieldType::G_fp : - field_pointer = G_fp[lev].get(); - break; - case FieldType::phi_fp : - field_pointer = phi_fp[lev].get(); - break; - case FieldType::vector_potential_fp : - field_pointer = vector_potential_fp_nodal[lev][direction].get(); - break; - case FieldType::Efield_cp : - field_pointer = Efield_cp[lev][direction].get(); - break; - case FieldType::Bfield_cp : - field_pointer = Bfield_cp[lev][direction].get(); - break; - case FieldType::current_cp : - field_pointer = current_cp[lev][direction].get(); - break; - case FieldType::rho_cp : - field_pointer = rho_cp[lev].get(); - break; - case FieldType::F_cp : - field_pointer = F_cp[lev].get(); - break; - case FieldType::G_cp : - field_pointer = G_cp[lev].get(); - break; - case FieldType::edge_lengths : - field_pointer = m_edge_lengths[lev][direction].get(); - break; - case FieldType::face_areas : - field_pointer = m_face_areas[lev][direction].get(); - break; - case FieldType::Efield_avg_fp : - field_pointer = Efield_avg_fp[lev][direction].get(); - break; - case FieldType::Bfield_avg_fp : - field_pointer = Bfield_avg_fp[lev][direction].get(); - break; - case FieldType::Efield_avg_cp : - field_pointer = Efield_avg_cp[lev][direction].get(); - break; - case FieldType::Bfield_avg_cp : - field_pointer = Bfield_avg_cp[lev][direction].get(); - break; - default: - WARPX_ABORT_WITH_MESSAGE("Invalid field type"); - break; - } - - return field_pointer; -} - -bool -WarpX::isFieldInitialized (const FieldType field_type, const int lev, const int direction) const -{ - const bool is_field_init = (getFieldPointerUnchecked(field_type, lev, direction) != nullptr); - return is_field_init; -} - -amrex::MultiFab* -WarpX::getFieldPointer (const FieldType field_type, const int lev, const int direction) const -{ - auto* const field_pointer = getFieldPointerUnchecked(field_type, lev, direction); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - field_pointer != nullptr, "Requested field is not initialized!"); - return field_pointer; -} - -std::array -WarpX::getFieldPointerArray (const FieldType field_type, const int lev) const -{ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (field_type == FieldType::Efield_aux) || (field_type == FieldType::Bfield_aux) || - (field_type == FieldType::Efield_fp) || (field_type == FieldType::Bfield_fp) || - (field_type == FieldType::Efield_fp_external) || (field_type == FieldType::Bfield_fp_external) || - (field_type == FieldType::current_fp) || (field_type == FieldType::current_fp_nodal) || - (field_type == FieldType::Efield_cp) || (field_type == FieldType::Bfield_cp) || - (field_type == FieldType::current_cp), "Requested field type is not a vector."); - - return std::array{ - getFieldPointer(field_type, lev, 0), - getFieldPointer(field_type, lev, 1), - getFieldPointer(field_type, lev, 2)}; -} - -const amrex::MultiFab& -WarpX::getField(FieldType field_type, const int lev, const int direction) const -{ - return *getFieldPointer(field_type, lev, direction); -} - amrex::DistributionMapping WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) { @@ -3616,55 +3394,22 @@ WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) } } -const amrex::Vector,3>>& -WarpX::getMultiLevelField(warpx::fields::FieldType field_type) const -{ - switch(field_type) - { - case FieldType::Efield_aux : - return Efield_aux; - case FieldType::Bfield_aux : - return Bfield_aux; - case FieldType::Efield_fp : - return Efield_fp; - case FieldType::Efield_fp_external : - return Efield_fp_external; - case FieldType::Bfield_fp : - return Bfield_fp; - case FieldType::Bfield_fp_external : - return Bfield_fp_external; - case FieldType::current_fp : - return current_fp; - case FieldType::current_fp_nodal : - return current_fp_nodal; - case FieldType::Efield_cp : - return Efield_cp; - case FieldType::Bfield_cp : - return Bfield_cp; - case FieldType::current_cp : - return current_cp; - default: - WARPX_ABORT_WITH_MESSAGE("Invalid field type"); - return Efield_fp; - } -} - const amrex::iMultiFab* WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const { switch(field_type) { case FieldType::Efield_fp : - SetDotMask( Efield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Efield_dotMask[lev][dir], "Efield_fp", lev, dir ); return Efield_dotMask[lev][dir].get(); case FieldType::Bfield_fp : - SetDotMask( Bfield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Bfield_dotMask[lev][dir], "Bfield_fp", lev, dir ); return Bfield_dotMask[lev][dir].get(); case FieldType::vector_potential_fp : - SetDotMask( Afield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Afield_dotMask[lev][dir], "vector_potential_fp", lev, dir ); return Afield_dotMask[lev][dir].get(); case FieldType::phi_fp : - SetDotMask( phi_dotMask[lev], field_type, lev, 0 ); + SetDotMask( phi_dotMask[lev], "phi_fp", lev, 0 ); return phi_dotMask[lev].get(); default: WARPX_ABORT_WITH_MESSAGE("Invalid field type for dotMask"); @@ -3673,15 +3418,15 @@ WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const } void WarpX::SetDotMask( std::unique_ptr& field_dotMask, - FieldType field_type, int lev, int dir ) const + std::string const & field_name, int lev, int dir ) const { // Define the dot mask for this field_type needed to properly compute dotProduct() // for field values that have shared locations on different MPI ranks if (field_dotMask != nullptr) { return; } - const amrex::MultiFab* this_field = getFieldPointer(field_type,lev,dir); - const amrex::BoxArray& this_ba = this_field->boxArray(); - const amrex::MultiFab tmp( this_ba, this_field->DistributionMap(), + ablastr::fields::ConstVectorField const& this_field = m_fields.get_alldirs(field_name,lev); + const amrex::BoxArray& this_ba = this_field[dir]->boxArray(); + const amrex::MultiFab tmp( this_ba, this_field[dir]->DistributionMap(), 1, 0, amrex::MFInfo().SetAlloc(false) ); const amrex::Periodicity& period = Geom(lev).periodicity(); field_dotMask = tmp.OwnerMask(period); diff --git a/Source/ablastr/fields/CMakeLists.txt b/Source/ablastr/fields/CMakeLists.txt index 56acc678217..011d765a6bb 100644 --- a/Source/ablastr/fields/CMakeLists.txt +++ b/Source/ablastr/fields/CMakeLists.txt @@ -1,5 +1,11 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) + + target_sources(ablastr_${SD} + PRIVATE + MultiFabRegister.cpp + ) + if(ABLASTR_FFT AND D EQUAL 3) target_sources(ablastr_${SD} PRIVATE diff --git a/Source/ablastr/fields/Make.package b/Source/ablastr/fields/Make.package index 01392991559..727a17b6de8 100644 --- a/Source/ablastr/fields/Make.package +++ b/Source/ablastr/fields/Make.package @@ -1,4 +1,5 @@ ifeq ($(USE_FFT),TRUE) + CEXE_sources += MultiFabRegister.cpp ifeq ($(DIM),3) CEXE_sources += IntegratedGreenFunctionSolver.cpp endif diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H new file mode 100644 index 00000000000..b67784aa94c --- /dev/null +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -0,0 +1,819 @@ +/* Copyright 2024 The ABLASTR Community + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + * Authors: Axel Huebl + */ +#ifndef ABLASTR_FIELDS_MF_REGISTER_H +#define ABLASTR_FIELDS_MF_REGISTER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace +{ + // type trait helpers in lieu of an amrex::is_amrex_enum + template > + struct is_castable_to_string : std::false_type {}; + + template + struct is_castable_to_string(std::declval()))>> : std::true_type {}; + + /** helper to either cast a string/char array to string to query an AMREX_ENUM */ + template + std::string getExtractedName (T name) + { + if constexpr(is_castable_to_string()) + { + // already a unique string key + return std::string(name); + } else + { + // user-defined AMREX_ENUM or compile error + return amrex::getEnumNameString(name); + } + } +} + +namespace ablastr::fields +{ + /** Components (base vector directions) of vector/tensor fields. + * + * Because of different staggering, the components of vector/tensor fields are stored + * in separate (i)MultiFab. + * + * @todo: synchronize with AMReX "enum class Direction" + */ + struct Direction + { + int dir = 0; + + bool operator<(const Direction& other) const + { + return other.dir < this->dir; + } + + /* TODO: just temporary int compatibility */ + operator int() const { return dir; } + }; + + /** A scalar field (a MultiFab) + * + * Note: might still have components, e.g., for copies at different times. + */ + using ScalarField = amrex::MultiFab*; + + /** A read-only scalar field (a MultiFab) + * + * Note: might still have components, e.g., for copies at different times. + */ + using ConstScalarField = amrex::MultiFab const *; + + /** A vector field of three MultiFab + */ + //using VectorField = ablastr::utils::ConstMap; + using VectorField = std::array; + + /** A read-only vector field of three MultiFab + */ + //using VectorField = ablastr::utils::ConstMap; + using ConstVectorField = std::array; + + /** A multi-level scalar field + */ + using MultiLevelScalarField = amrex::Vector; + + /** A read-only multi-level scalar field + */ + using ConstMultiLevelScalarField = amrex::Vector; + + /** A multi-level vector field + */ + using MultiLevelVectorField = amrex::Vector; + + /** A read-only multi-level vector field + */ + using ConstMultiLevelVectorField = amrex::Vector; + + /** A class to control the lifetime and properties of a MultiFab (field). + * + * This class is used to own the lifetime of an amrex::MultiFab and to store + * associated information around it regarding unique naming, scalar/vector/tensor + * properties, aliasing, load balancing, etc. + */ + struct MultiFabOwner + { + // TODO: also add iMultiFab via std::variant + + /** owned (i)MultiFab */ + amrex::MultiFab m_mf; + + /** Components (base vector directions) of this MultiFab */ + std::optional m_dir = std::nullopt; + + /** the MR level of this (i)MultiFab */ + int m_level = 0; + + /** remake distribution map on load balance, @see amrex::AmrCore::RemakeLevel */ + bool m_remake = true; + + /** redistribute on @see amrex::AmrCore::RemakeLevel */ + bool m_redistribute_on_remake = true; + + /** if m_mf is a non-owning alias, this string tracks the name of the owner */ + std::string m_owner; + + /** Is this part of a vector/tensor? */ + AMREX_INLINE + bool + is_vector () const { return m_dir.has_value(); } + + /** Is this an alias MultiFab? + * + * If yes, that means we do not own the memory. + */ + AMREX_INLINE + bool + is_alias () const { return !m_owner.empty(); } + }; + + /** This is a register of fields aka amrex::MultiFabs. + * + * This is owned by a simulation instance. All used fields should be registered here. + * Internally, this contains @see MultiFabOwner values. + */ + struct MultiFabRegister + { + // Avoid accidental copies when passing to member functions + MultiFabRegister() = default; + MultiFabRegister(MultiFabRegister const &) = delete; + MultiFabRegister(MultiFabRegister&&) = delete; + MultiFabRegister& operator=(MultiFabRegister const &) = delete; + MultiFabRegister& operator=(MultiFabRegister&&) = delete; + ~MultiFabRegister() = default; + + /** Allocate and optionally initialize a MultiFab (field) + * + * This registers a new MultiFab under a unique name, allocates it and + * optionally assigns it an initial value. + * + * @param name a unique name for this field + * @param level the MR level to represent + * @param ba the list of boxes to cover the field + * @param dm the distribution mapping for load balancing with MPI + * @param ncomp the number of components of the field (all with the same staggering) + * @param ngrow the number of guard (ghost, halo) cells + * @param initial_value the optional initial value + * @param remake follow the default domain decomposition of the simulation + * @param redistribute_on_remake redistribute on @see amrex::AmrCore::RemakeLevel + * @return pointer to newly allocated MultiFab + */ + template + amrex::MultiFab* + alloc_init ( + T name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ) + { + return internal_alloc_init( + getExtractedName(name), + level, + ba, + dm, + ncomp, + ngrow, + initial_value, + remake, + redistribute_on_remake + ); + } + + /** Allocate and optionally initialize a MultiFab (field) + * + * This registers a new MultiFab under a unique name, allocates it and + * optionally assigns it an initial value. + * + * @param name a unique name for this field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level to represent + * @param ba the list of boxes to cover the field + * @param dm the distribution mapping for load balancing with MPI + * @param ncomp the number of components of the field (all with the same staggering) + * @param ngrow the number of guard (ghost, halo) cells + * @param initial_value the optional initial value + * @param remake follow the default domain decomposition of the simulation + * @param redistribute_on_remake redistribute on @see amrex::AmrCore::RemakeLevel + * @return pointer to newly allocated MultiFab + */ + template + amrex::MultiFab* + alloc_init ( + T name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ) + { + return internal_alloc_init( + getExtractedName(name), + dir, + level, + ba, + dm, + ncomp, + ngrow, + initial_value, + remake, + redistribute_on_remake + ); + } + + /** Create an alias of a MultiFab (field) + * + * Registers a new name for an existing MultiFab (field) and optionally assigning + * a value. + * + * @param new_name new name + * @param alias_name owner name to alias + * @param level the MR level to represent + * @param initial_value the optional value to assign + * @return the newly aliased MultiFab + */ + template + amrex::MultiFab* + alias_init ( + N new_name, + A alias_name, + int level, + std::optional initial_value = std::nullopt + ) + { + return internal_alias_init( + getExtractedName(new_name), + getExtractedName(alias_name), + level, + initial_value + ); + } + + /** Create an alias of a MultiFab (field) + * + * Registers a new name for an existing MultiFab (field) and optionally assigning + * a value. + * + * @param new_name new name + * @param alias_name owner name to alias + * @param dir the field component for vector fields ("direction" of the unit vector) both in the alias and aliased + * @param level the MR level to represent + * @param initial_value the optional value to assign + * @return the newly aliased MultiFab + */ + template + amrex::MultiFab* + alias_init ( + N new_name, + A alias_name, + Direction dir, + int level, + std::optional initial_value = std::nullopt + ) + { + return internal_alias_init( + getExtractedName(new_name), + getExtractedName(alias_name), + dir, + level, + initial_value + ); + } + + /** Check if a scalar MultiFab (field) is registered. + * + * @param name the name to check if registered + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has ( + T name, + int level + ) const + { + return internal_has( + getExtractedName(name), + level + ); + } + + /** Check if a MultiFab that is part of a vector/tensor field is registered. + * + * @param name the name to check if registered + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has ( + T name, + Direction dir, + int level + ) const + { + return internal_has( + getExtractedName(name), + dir, + level + ); + } + + /** Check if a MultiFab vector field is registered. + * + * @param name the name to check if registered + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has_vector ( + T name, + int level + ) const + { + return internal_has_vector( + getExtractedName(name), + level + ); + } + + /** Return a scalar MultiFab (field). + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab* + get ( + T name, + int level + ) + { + return internal_get( + getExtractedName(name), + level + ); + } + + /** Return a MultiFab that is part of a vector/tensor field. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab* + get ( + T name, + Direction dir, + int level + ) + { + return internal_get( + getExtractedName(name), + dir, + level + ); + } + + /** Return a scalar MultiFab (field). + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab const * + get ( + T name, + int level + ) const + { + return internal_get( + getExtractedName(name), + level + ); + } + + /** Return a MultiFab that is part of a vector/tensor field. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab const * + get ( + T name, + Direction dir, + int level + ) const + { + return internal_get( + getExtractedName(name), + dir, + level + ); + } + + /** Return the MultiFab of a scalar field on all MR levels. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param finest_level the highest MR level to return + * @return non-owning pointers to the MultiFab (field) on all levels + */ + //@{ + template + [[nodiscard]] MultiLevelScalarField + get_mr_levels ( + T name, + int finest_level + ) + { + return internal_get_mr_levels( + getExtractedName(name), + finest_level + ); + } + template + [[nodiscard]] ConstMultiLevelScalarField + get_mr_levels ( + T name, + int finest_level + ) const + { + return internal_get_mr_levels( + getExtractedName(name), + finest_level + ); + } + //@} + + /** title + * + * Same as get above, but returns all levels for a name. + * + * @param name the name of the field + * @param level the MR level + * @return non-owning pointers to all components of a vector field + */ + //@{ + template + [[nodiscard]] VectorField + get_alldirs ( + T name, + int level + ) + { + return internal_get_alldirs( + getExtractedName(name), + level + ); + } + template + [[nodiscard]] ConstVectorField + get_alldirs ( + T name, + int level + ) const + { + return internal_get_alldirs( + getExtractedName(name), + level + ); + } + //@} + + /** Return a vector field on all MR levels. + * + * Out loop: MR levels. + * Inner loop: directions (components). + * + * @param name the name of the field + * @param finest_level the highest MR level to return + * @return non-owning pointers to all components of a vector field on all MR levels + */ + //@{ + template + [[nodiscard]] MultiLevelVectorField + get_mr_levels_alldirs ( + T name, + int finest_level + ) + { + return internal_get_mr_levels_alldirs( + getExtractedName(name), + finest_level + ); + } + template + [[nodiscard]] ConstMultiLevelVectorField + get_mr_levels_alldirs ( + T name, + int finest_level + ) const + { + return internal_get_mr_levels_alldirs( + getExtractedName(name), + finest_level + ); + } + //@} + + /** List the internal names of all registered fields. + * + * @return all currently allocated and registered fields + */ + [[nodiscard]] std::vector + list () const; + + /** Deallocate and remove a scalar field. + * + * @param name the name of the field + * @param level the MR level + */ + template + void + erase ( + T name, + int level + ) + { + internal_erase(getExtractedName(name), level); + } + + /** Deallocate and remove a vector field component. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + */ + template + void + erase ( + T name, + Direction dir, + int level + ) + { + internal_erase(getExtractedName(name), dir, level); + } + + /** Erase all MultiFabs on a specific MR level. + * + * Calls @see erase for all MultiFabs on a specific level. + * + * @param level the MR level to erase all MultiFabs from + */ + void + clear_level ( + int level + ); + + /** Remake all (i)MultiFab with a new distribution mapping. + * + * If redistribute is true, we also copy from the old data into the new. + * + * @param level the MR level to erase all MultiFabs from + * @param new_dm new distribution mapping + */ + void + remake_level ( + int other_level, + amrex::DistributionMapping const & new_dm + ); + + /** Create the register name of scalar field and MR level + * + * @param name the name of the field + * @param level the MR level + * @return internal name of the field in the register + */ + [[nodiscard]] std::string + mf_name ( + std::string name, + int level + ) const; + + /** Create the register name of vector field, component direction and MR level + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return internal name of the field in the register + */ + [[nodiscard]] std::string + mf_name ( + std::string name, + Direction dir, + int level + ) const; + + private: + amrex::MultiFab * + internal_get ( + std::string const & key + ); + [[nodiscard]] amrex::MultiFab const * + internal_get ( + std::string const & key + ) const; + + amrex::MultiFab* + internal_alloc_init ( + std::string const & name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ); + amrex::MultiFab* + internal_alloc_init ( + std::string const & name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ); + + amrex::MultiFab* + internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + int level, + std::optional initial_value = std::nullopt + ); + amrex::MultiFab* + internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + Direction dir, + int level, + std::optional initial_value = std::nullopt + ); + + [[nodiscard]] bool + internal_has ( + std::string const & name, + int level + ) const; + [[nodiscard]] bool + internal_has ( + std::string const & name, + Direction dir, + int level + ) const; + [[nodiscard]] bool + internal_has_vector ( + std::string const & name, + int level + ) const; + + amrex::MultiFab * + internal_get ( + std::string const & name, + int level + ); + amrex::MultiFab const * + internal_get ( + std::string const & name, + int level + ) const; + amrex::MultiFab * + internal_get ( + std::string const & name, + Direction dir, + int level + ); + amrex::MultiFab const * + internal_get ( + std::string const & name, + Direction dir, + int level + ) const; + MultiLevelScalarField + internal_get_mr_levels ( + std::string const & name, + int finest_level + ); + ConstMultiLevelScalarField + internal_get_mr_levels ( + std::string const & name, + int finest_level + ) const; + VectorField + internal_get_alldirs ( + std::string const & name, + int level + ); + ConstVectorField + internal_get_alldirs ( + std::string const & name, + int level + ) const; + MultiLevelVectorField + internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ); + ConstMultiLevelVectorField + internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) const; + + void + internal_erase ( + std::string const & name, + int level + ); + void + internal_erase ( + std::string const & name, + Direction dir, + int level + ); + + /** data storage: ownership and lifetime control */ + std::map< + std::string, + MultiFabOwner + > m_mf_register; + + /** the three directions of a vector field */ + std::vector m_all_dirs = {Direction{0}, Direction{1}, Direction{2}}; + }; + + /** Little temporary helper function to pass temporary MultiFabs as VectorField. + * + * @return pointers to externally managed vector field components (3 MultiFab) + */ + VectorField + a2m ( + std::array< std::unique_ptr, 3 > const & old_vectorfield + ); + +} // namespace ablastr::fields + +#endif // ABLASTR_FIELDS_MF_REGISTER_H diff --git a/Source/ablastr/fields/MultiFabRegister.cpp b/Source/ablastr/fields/MultiFabRegister.cpp new file mode 100644 index 00000000000..106a3aede79 --- /dev/null +++ b/Source/ablastr/fields/MultiFabRegister.cpp @@ -0,0 +1,626 @@ +/* Copyright 2024 The ABLASTR Community + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + * Authors: Axel Huebl + */ +#include "MultiFabRegister.H" + +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace ablastr::fields +{ + amrex::MultiFab* + MultiFabRegister::internal_alloc_init ( + std::string const & name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value, + bool remake, + bool redistribute_on_remake + ) + { + // checks + if (has(name, level)) { + throw std::runtime_error("MultiFabRegister::alloc_init failed because " + name + " already exists."); + } + + // fully qualified name + std::string const internal_name = mf_name(name, level); + + // allocate + const auto tag = amrex::MFInfo().SetTag(internal_name); + auto [it, success] = m_mf_register.emplace( + internal_name, + MultiFabOwner{ + {ba, dm, ncomp, ngrow, tag}, + std::nullopt, // scalar: no direction + level, + remake, + redistribute_on_remake, + "" // we own the memory + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alloc_init failed for " + internal_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alloc_init ( + std::string const & name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value, + bool remake, + bool redistribute_on_remake + ) + { + // checks + if (has(name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alloc_init failed because " + + mf_name(name, dir, level) + + " already exists." + ); + } + + // fully qualified name + std::string const internal_name = mf_name(name, dir, level); + + // allocate + const auto tag = amrex::MFInfo().SetTag(internal_name); + auto [it, success] = m_mf_register.emplace( + internal_name, + MultiFabOwner{ + {ba, dm, ncomp, ngrow, tag}, + dir, + level, + remake, + redistribute_on_remake, + "" // we own the memory + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alloc_init failed for " + internal_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + int level, + std::optional initial_value + ) + { + // checks + if (has(new_name, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(new_name, level) + + " already exists." + ); + } + if (!has(alias_name, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(alias_name, level) + + " does not exist." + ); + } + + // fully qualified name + std::string const internal_new_name = mf_name(new_name, level); + std::string const internal_alias_name = mf_name(alias_name, level); + + MultiFabOwner & alias = m_mf_register[internal_alias_name]; + amrex::MultiFab & mf_alias = alias.m_mf; + + // allocate + auto [it, success] = m_mf_register.emplace( + internal_new_name, + MultiFabOwner{ + {mf_alias, amrex::make_alias, 0, mf_alias.nComp()}, + std::nullopt, // scalar: no direction + level, + alias.m_remake, + alias.m_redistribute_on_remake, + internal_alias_name + } + + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alias_init failed for " + internal_new_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + Direction dir, + int level, + std::optional initial_value + ) + { + // checks + if (has(new_name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(new_name, dir, level) + + " already exists." + ); + } + if (!has(alias_name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(alias_name, dir, level) + + " does not exist." + ); + } + + // fully qualified name + std::string const internal_new_name = mf_name(new_name, dir, level); + std::string const internal_alias_name = mf_name(alias_name, dir, level); + + MultiFabOwner & alias = m_mf_register[internal_alias_name]; + amrex::MultiFab & mf_alias = alias.m_mf; + + // allocate + auto [it, success] = m_mf_register.emplace( + internal_new_name, + MultiFabOwner{ + {mf_alias, amrex::make_alias, 0, mf_alias.nComp()}, + dir, + level, + alias.m_remake, + alias.m_redistribute_on_remake, + internal_alias_name + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alias_init failed for " + internal_new_name); + } + + // a short-hand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + void + MultiFabRegister::remake_level ( + int level, + amrex::DistributionMapping const & new_dm + ) + { + // Owning MultiFabs + for (auto & element : m_mf_register ) + { + MultiFabOwner & mf_owner = element.second; + + // keep distribution map as it is? + if (!mf_owner.m_remake) { + continue; + } + + // remake MultiFab with new distribution map + if (mf_owner.m_level == level && !mf_owner.is_alias()) { + const amrex::MultiFab & mf = mf_owner.m_mf; + amrex::IntVect const & ng = mf.nGrowVect(); + const auto tag = amrex::MFInfo().SetTag(mf.tags()[0]); + amrex::MultiFab new_mf(mf.boxArray(), new_dm, mf.nComp(), ng, tag); + + // copy data to new MultiFab: Only done for persistent data like E and B field, not for + // temporary things like currents, etc. + if (mf_owner.m_redistribute_on_remake) { + new_mf.Redistribute(mf, 0, 0, mf.nComp(), ng); + } + + // replace old MultiFab with new one, deallocate old one + mf_owner.m_mf = std::move(new_mf); + } + } + + // Aliases + for (auto & element : m_mf_register ) + { + MultiFabOwner & mf_owner = element.second; + + // keep distribution map as it is? + if (!mf_owner.m_remake) { + continue; + } + + if (mf_owner.m_level == level && mf_owner.is_alias()) { + const amrex::MultiFab & mf = m_mf_register[mf_owner.m_owner].m_mf; + amrex::MultiFab new_mf(mf, amrex::make_alias, 0, mf.nComp()); + + // no copy via Redistribute: the owner was already redistributed + + // replace old MultiFab with new one, deallocate old one + mf_owner.m_mf = std::move(new_mf); + } + } + } + + bool + MultiFabRegister::internal_has ( + std::string const & name, + int level + ) const + { + std::string const internal_name = mf_name(name, level); + + return m_mf_register.count(internal_name) > 0; + } + + bool + MultiFabRegister::internal_has ( + std::string const & name, + Direction dir, + int level + ) const + { + std::string const internal_name = mf_name(name, dir, level); + + return m_mf_register.count(internal_name) > 0; + } + + bool + MultiFabRegister::internal_has_vector ( + std::string const & name, + int level + ) const + { + unsigned long count = 0; + for (Direction const & dir : m_all_dirs) + { + std::string const internal_name = mf_name(name, dir, level); + count += m_mf_register.count(internal_name); + } + + return count == 3; + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & key + ) + { + if (m_mf_register.count(key) == 0) { + // FIXME: temporary, throw a std::runtime_error + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); + return nullptr; + } + amrex::MultiFab & mf = m_mf_register.at(key).m_mf; + + return &mf; + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & key + ) const + { + if (m_mf_register.count(key) == 0) { + // FIXME: temporary, throw a std::runtime_error + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); + return nullptr; + } + amrex::MultiFab const & mf = m_mf_register.at(key).m_mf; + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & name, + int level + ) + { + std::string const internal_name = mf_name(name, level); + return internal_get(internal_name); + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & name, + Direction dir, + int level + ) + { + std::string const internal_name = mf_name(name, dir, level); + return internal_get(internal_name); + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & name, + int level + ) const + { + std::string const internal_name = mf_name(name, level); + return internal_get(internal_name); + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & name, + Direction dir, + int level + ) const + { + std::string const internal_name = mf_name(name, dir, level); + return internal_get(internal_name); + } + + MultiLevelScalarField + MultiFabRegister::internal_get_mr_levels ( + std::string const & name, + int finest_level + ) + { + MultiLevelScalarField field_on_level; + field_on_level.reserve(finest_level+1); + for (int lvl = 0; lvl <= finest_level; lvl++) + { + field_on_level.push_back(internal_get(name, lvl)); + } + return field_on_level; + } + + ConstMultiLevelScalarField + MultiFabRegister::internal_get_mr_levels ( + std::string const & name, + int finest_level + ) const + { + ConstMultiLevelScalarField field_on_level; + field_on_level.reserve(finest_level+1); + for (int lvl = 0; lvl <= finest_level; lvl++) + { + field_on_level.push_back(internal_get(name, lvl)); + } + return field_on_level; + } + + VectorField + MultiFabRegister::internal_get_alldirs ( + std::string const & name, + int level + ) + { + // insert a new level + VectorField vectorField; + + // insert components + for (Direction const & dir : m_all_dirs) + { + vectorField[dir] = internal_get(name, dir, level); + } + return vectorField; + } + + ConstVectorField + MultiFabRegister::internal_get_alldirs ( + std::string const & name, + int level + ) const + { + // insert a new level + ConstVectorField vectorField; + + // insert components + for (Direction const & dir : m_all_dirs) + { + vectorField[dir] = internal_get(name, dir, level); + } + return vectorField; + } + + MultiLevelVectorField + MultiFabRegister::internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) + { + MultiLevelVectorField field_on_level; + field_on_level.reserve(finest_level+1); + + for (int lvl = 0; lvl <= finest_level; lvl++) + { + // insert a new level + field_on_level.push_back(VectorField{}); + + // insert components + for (Direction const & dir : m_all_dirs) + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } + } + return field_on_level; + } + + ConstMultiLevelVectorField + MultiFabRegister::internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) const + { + ConstMultiLevelVectorField field_on_level; + field_on_level.reserve(finest_level+1); + + for (int lvl = 0; lvl <= finest_level; lvl++) + { + // insert a new level + field_on_level.push_back(ConstVectorField{}); + + // insert components + for (Direction const & dir : m_all_dirs) + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } + } + return field_on_level; + } + + std::vector + MultiFabRegister::list () const + { + std::vector names; + names.reserve(m_mf_register.size()); + for (auto const & str : m_mf_register) { names.push_back(str.first); } + + return names; + } + + void + MultiFabRegister::internal_erase ( + std::string const & name, + int level + ) + { + std::string const internal_name = mf_name(name, level); + + if (m_mf_register.count(internal_name) != 1) { + throw std::runtime_error("MultiFabRegister::erase name does not exist in register: " + internal_name); + } + m_mf_register.erase(internal_name); + } + + void + MultiFabRegister::internal_erase ( + std::string const & name, + Direction dir, + int level + ) + { + std::string const internal_name = mf_name(name, dir, level); + + if (m_mf_register.count(internal_name) != 1) { + throw std::runtime_error("MultiFabRegister::erase name does not exist in register: " + internal_name); + } + m_mf_register.erase(internal_name); + } + + void + MultiFabRegister::clear_level ( + int level + ) + { + // C++20: Replace with std::erase_if + for (auto first = m_mf_register.begin(), last = m_mf_register.end(); first != last;) + { + if (first->second.m_level == level) { + first = m_mf_register.erase(first); + } else { + ++first; + } + } + } + + std::string + MultiFabRegister::mf_name ( + std::string name, + int level + ) const + { + // Add the suffix "[level=level]" + return name.append("[level=") + .append(std::to_string(level)) + .append("]"); + } + + std::string + MultiFabRegister::mf_name ( + std::string name, + Direction dir, + int level + ) const + { + // Add the suffix for the direction [x] or [y] or [z] + // note: since Cartesian is not correct for all our supported geometries, + // in the future we might want to break this to "[dir=0/1/2]". + // This will be a breaking change for (Python) users that rely on that string. + constexpr int x_in_ascii = 120; + std::string const component_name{char(x_in_ascii + dir.dir)}; + return mf_name( + name + .append("[") + .append(component_name) + .append("]"), + level + ); + } + + VectorField + a2m ( + std::array< std::unique_ptr, 3 > const & old_vectorfield + ) + { + std::vector const all_dirs = {Direction{0}, Direction{1}, Direction{2}}; + + VectorField field_on_level; + + // insert components + for (auto const dir : {0, 1, 2}) + { + field_on_level[Direction{dir}] = old_vectorfield[dir].get(); + } + return field_on_level; + } +} // namespace ablastr::fields diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index c36c83bc336..727280d630b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -66,7 +66,7 @@ namespace ablastr::fields { * \param[out] max_norm_rho The maximum L-infinity norm of `rho` across all levels */ inline amrex::Real getMaxNormRho ( - amrex::Vector const & rho, + ablastr::fields::ConstMultiLevelScalarField const& rho, int finest_level, amrex::Real & absolute_tolerance) { @@ -191,25 +191,26 @@ template< typename T_FArrayBoxFactory = void > void -computePhi (amrex::Vector const & rho, - amrex::Vector & phi, - std::array const beta, - amrex::Real relative_tolerance, - amrex::Real absolute_tolerance, - int max_iters, - int verbosity, - amrex::Vector const& geom, - amrex::Vector const& dmap, - amrex::Vector const& grids, - utils::enums::GridType grid_type, - T_BoundaryHandler const boundary_handler, - bool is_solver_igf_on_lev0, - bool eb_enabled = false, - bool do_single_precision_comms = false, - std::optional > rel_ref_ratio = std::nullopt, - [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, - [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB - [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB +computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + std::array const beta, + amrex::Real relative_tolerance, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity, + amrex::Vector const& geom, + amrex::Vector const& dmap, + amrex::Vector const& grids, + utils::enums::GridType grid_type, + T_BoundaryHandler const boundary_handler, + bool is_solver_igf_on_lev0, + bool eb_enabled = false, + bool do_single_precision_comms = false, + std::optional > rel_ref_ratio = std::nullopt, + [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB + [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) { using namespace amrex::literals; @@ -250,7 +251,8 @@ computePhi (amrex::Vector const & rho, #endif // determine if rho is zero everywhere - const amrex::Real max_norm_b = getMaxNormRho(rho, finest_level, absolute_tolerance); + const amrex::Real max_norm_b = getMaxNormRho( + amrex::GetVecOfConstPtrs(rho), finest_level, absolute_tolerance); amrex::LPInfo info; From e375bbecf84bbe3382444c793966ced16e278f68 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 20:56:50 -0700 Subject: [PATCH 71/91] `FieldType`: Fix Inline Doxygen --- Source/Fields.H | 65 +++++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/Source/Fields.H b/Source/Fields.H index 00d1872a049..7b7cf58ccc3 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -1,8 +1,9 @@ -/* Copyright 2024 Luca Fedeli +/* Copyright 2024 The WarpX Community * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL + * Authors: Luca Fedeli, Justin Angus, Remi Lehe, Axel Huebl */ #ifndef WARPX_FIELDS_H_ #define WARPX_FIELDS_H_ @@ -19,23 +20,23 @@ namespace warpx::fields { AMREX_ENUM(FieldType, None, - Efield_aux, // Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData - Bfield_aux, // Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData - Efield_fp, // The field that is updated by the field solver at each timestep - Bfield_fp, // The field that is updated by the field solver at each timestep - Efield_fp_external, // Stores grid particle fields provided by the user as through an openPMD file - Bfield_fp_external, // Stores grid particle fields provided by the user as through an openPMD file - current_fp, // The current that is used as a source for the field solver - current_fp_nodal, // Only used when using nodal current deposition - current_fp_vay, // Only used when using Vay current deposition - current_buf, // Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - current_store, // Only used when doing subcycling with mesh refinement, for book-keeping of currents - rho_buf, // Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - rho_fp, // The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) - F_fp, // Used for divE cleaning - G_fp, // Used for divB cleaning - phi_fp, // Obtained by the Poisson solver, for labframe electrostatic - vector_potential_fp, // Obtained by the magnetostatic solver + Efield_aux, //!< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData + Bfield_aux, //!< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData + Efield_fp, //!< The field that is updated by the field solver at each timestep + Bfield_fp, //!< The field that is updated by the field solver at each timestep + Efield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file + Bfield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file + current_fp, //!< The current that is used as a source for the field solver + current_fp_nodal, //!< Only used when using nodal current deposition + current_fp_vay, //!< Only used when using Vay current deposition + current_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. + current_store, //!< Only used when doing subcycling with mesh refinement, for book-keeping of currents + rho_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. + rho_fp, //!< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) + F_fp, //!< Used for divE cleaning + G_fp, //!< Used for divB cleaning + phi_fp, //!< Obtained by the Poisson solver, for labframe electrostatic + vector_potential_fp, //!< Obtained by the magnetostatic solver vector_potential_fp_nodal, vector_potential_grad_buf_e_stag, vector_potential_grad_buf_b_stag, @@ -44,19 +45,19 @@ namespace warpx::fields hybrid_current_fp_temp, hybrid_current_fp_ampere, hybrid_current_fp_external, - Efield_cp, // Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - Bfield_cp, // Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - current_cp, // Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level - rho_cp, // Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level - F_cp, // Only used with MR. Used for divE cleaning, on the coarse patch of each level - G_cp, // Only used with MR. Used for divB cleaning, on the coarse patch of each level - Efield_cax, // Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - Bfield_cax, // Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - E_external_particle_field, // Stores external particle fields provided by the user as through an openPMD file - B_external_particle_field, // Stores external particle fields provided by the user as through an openPMD file - distance_to_eb, // Only used with embedded boundaries (EB). Stores the distance to the nearest EB - edge_lengths, // Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units - face_areas, // Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units + Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level + rho_cp, //!< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level + F_cp, //!< Only used with MR. Used for divE cleaning, on the coarse patch of each level + G_cp, //!< Only used with MR. Used for divB cleaning, on the coarse patch of each level + Efield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + Bfield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + E_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file + B_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file + distance_to_eb, //!< Only used with embedded boundaries (EB). Stores the distance to the nearest EB + edge_lengths, //!< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units + face_areas, //!< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units area_mod, pml_E_fp, pml_B_fp, @@ -73,7 +74,7 @@ namespace warpx::fields Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp, - Bold, // Stores the value of B at the beginning of the timestep, for the implicit solver + Bold, //!< Stores the value of B at the beginning of the timestep, for the implicit solver ECTRhofield, Venl ); From 31734d36c176fbaaf716212f4dc2159dfeb55486 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 23 Sep 2024 22:18:00 -0700 Subject: [PATCH 72/91] `MultiFabRegister` Leftover Clang-Tidy (#5309) Fix leftover clang-tidy recommendations. --- Source/ablastr/fields/MultiFabRegister.H | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H index b67784aa94c..f33eed1c5a6 100644 --- a/Source/ablastr/fields/MultiFabRegister.H +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -662,7 +662,7 @@ namespace ablastr::fields ) const; private: - amrex::MultiFab * + [[nodiscard]] amrex::MultiFab * internal_get ( std::string const & key ); @@ -730,54 +730,54 @@ namespace ablastr::fields int level ) const; - amrex::MultiFab * + [[nodiscard]] amrex::MultiFab * internal_get ( std::string const & name, int level ); - amrex::MultiFab const * + [[nodiscard]] amrex::MultiFab const * internal_get ( std::string const & name, int level ) const; - amrex::MultiFab * + [[nodiscard]] amrex::MultiFab * internal_get ( std::string const & name, Direction dir, int level ); - amrex::MultiFab const * + [[nodiscard]] amrex::MultiFab const * internal_get ( std::string const & name, Direction dir, int level ) const; - MultiLevelScalarField + [[nodiscard]] MultiLevelScalarField internal_get_mr_levels ( std::string const & name, int finest_level ); - ConstMultiLevelScalarField + [[nodiscard]] ConstMultiLevelScalarField internal_get_mr_levels ( std::string const & name, int finest_level ) const; - VectorField + [[nodiscard]] VectorField internal_get_alldirs ( std::string const & name, int level ); - ConstVectorField + [[nodiscard]] ConstVectorField internal_get_alldirs ( std::string const & name, int level ) const; - MultiLevelVectorField + [[nodiscard]] MultiLevelVectorField internal_get_mr_levels_alldirs ( std::string const & name, int finest_level ); - ConstMultiLevelVectorField + [[nodiscard]] ConstMultiLevelVectorField internal_get_mr_levels_alldirs ( std::string const & name, int finest_level From 0337ee120eb938f990a401fffa4c1ea16d84af50 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 24 Sep 2024 06:23:40 -0700 Subject: [PATCH 73/91] Rename `Bold` to `B_old` (#5312) For consistency of naming with other fields, this might be a better spelling of the bold name :) --- .../ImplicitSolvers/ThetaImplicitEM.cpp | 22 +++++++++---------- Source/Fields.H | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index e62ced29f6d..4cd5de4f24f 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -24,7 +24,7 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) m_E.Define( m_WarpX, "Efield_fp" ); m_Eold.Define( m_E ); - // Define Bold MultiFabs + // Define B_old MultiFabs using ablastr::fields::Direction; const int num_levels = 1; for (int lev = 0; lev < num_levels; ++lev) { @@ -33,9 +33,9 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) const auto& ba_Bz = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->boxArray(); const auto& dm = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->DistributionMap(); const amrex::IntVect ngb = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->nGrowVect(); - m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{0}, lev, ba_Bx, dm, 1, ngb, 0.0_rt); - m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{1}, lev, ba_By, dm, 1, ngb, 0.0_rt); - m_WarpX->m_fields.alloc_init(FieldType::Bold, Direction{2}, lev, ba_Bz, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{0}, lev, ba_Bx, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{1}, lev, ba_By, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{2}, lev, ba_Bz, dm, 1, ngb, 0.0_rt); } // Parse theta-implicit solver specific parameters @@ -92,10 +92,10 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, const int num_levels = 1; for (int lev = 0; lev < num_levels; ++lev) { const ablastr::fields::VectorField Bfp = m_WarpX->m_fields.get_alldirs(FieldType::Bfield_fp, lev); - ablastr::fields::VectorField Bold = m_WarpX->m_fields.get_alldirs(FieldType::Bold, lev); + ablastr::fields::VectorField B_old = m_WarpX->m_fields.get_alldirs(FieldType::B_old, lev); for (int n = 0; n < 3; ++n) { - amrex::MultiFab::Copy( *Bold[n], *Bfp[n], 0, 0, Bold[n]->nComp(), - Bold[n]->nGrowVect() ); + amrex::MultiFab::Copy(*B_old[n], *Bfp[n], 0, 0, B_old[n]->nComp(), + B_old[n]->nGrowVect() ); } } @@ -147,8 +147,8 @@ void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, m_WarpX->SetElectricFieldAndApplyBCs( a_E ); // Update Bfield_fp owned by WarpX - ablastr::fields::MultiLevelVectorField const& Bold = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::Bold, 0); - m_WarpX->UpdateMagneticFieldAndApplyBCs( Bold, m_theta*a_dt ); + ablastr::fields::MultiLevelVectorField const& B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); + m_WarpX->UpdateMagneticFieldAndApplyBCs(B_old, m_theta * a_dt ); } @@ -163,7 +163,7 @@ void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) const amrex::Real c1 = 1._rt - c0; m_E.linComb( c0, m_E, c1, m_Eold ); m_WarpX->SetElectricFieldAndApplyBCs( m_E ); - ablastr::fields::MultiLevelVectorField const & Bold = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::Bold, 0); - m_WarpX->FinishMagneticFieldAndApplyBCs( Bold, m_theta ); + ablastr::fields::MultiLevelVectorField const & B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); + m_WarpX->FinishMagneticFieldAndApplyBCs(B_old, m_theta ); } diff --git a/Source/Fields.H b/Source/Fields.H index 7b7cf58ccc3..0aa3cbdd0c0 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -74,7 +74,7 @@ namespace warpx::fields Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp, - Bold, //!< Stores the value of B at the beginning of the timestep, for the implicit solver + B_old, //!< Stores the value of B at the beginning of the timestep, for the implicit solver ECTRhofield, Venl ); @@ -114,7 +114,7 @@ namespace warpx::fields FieldType::Bfield_avg_fp, FieldType::Efield_avg_cp, FieldType::Bfield_avg_cp, - FieldType::Bold, + FieldType::B_old, FieldType::ECTRhofield, FieldType::Venl }; From 5329e3ba9f25606187fbe1bc2d6b2f55ef3cae01 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 24 Sep 2024 18:52:13 -0700 Subject: [PATCH 74/91] Python: `warpx.multifab` legacy signature (#5321) * Python: `warpx.multifab` legacy signature Keep the legacy signature of the fully qualified name a bit longer to avoid breakage. Throw warnings for users to migrate. * Update Docs * Simplify Example String No f-string needed here. * Old API: Do not warn yet * Clang-Tidy --- Docs/source/usage/workflows/python_extend.rst | 7 ++-- Source/Python/WarpX.cpp | 34 ++++++++++++------- Source/ablastr/fields/MultiFabRegister.H | 13 +++++-- Source/ablastr/fields/MultiFabRegister.cpp | 22 ++++++++---- 4 files changed, 52 insertions(+), 24 deletions(-) diff --git a/Docs/source/usage/workflows/python_extend.rst b/Docs/source/usage/workflows/python_extend.rst index 47610e0d7ba..275a4dd134d 100644 --- a/Docs/source/usage/workflows/python_extend.rst +++ b/Docs/source/usage/workflows/python_extend.rst @@ -134,9 +134,12 @@ This example accesses the :math:`E_x(x,y,z)` field at level 0 after every time s warpx = sim.extension.warpx # data access - E_x_mf = warpx.multifab(f"Efield_fp[x][level=0]") + # vector field E, component x, on the fine patch of MR level 0 + E_x_mf = warpx.multifab("Efield_fp", dir=0, level=0) + # scalar field rho, on the fine patch of MR level 0 + rho_mf = warpx.multifab("rho_fp", level=0) - # compute + # compute on E_x_mf # iterate over mesh-refinement levels for lev in range(warpx.finest_level + 1): # grow (aka guard/ghost/halo) regions diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 857d23dc588..70d91445d8b 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -120,17 +120,27 @@ void init_WarpX (py::module& m) R"doc(Registry to all WarpX MultiFab (fields).)doc" ) .def("multifab", - [](WarpX & wx, std::string multifab_name, int level) { - if (wx.m_fields.has(multifab_name, level)) { - return wx.m_fields.get(multifab_name, level); + [](WarpX & wx, std::string internal_name) { + if (wx.m_fields.internal_has(internal_name)) { + return wx.m_fields.internal_get(internal_name); + } else { + throw std::runtime_error("MultiFab '" + internal_name + "' is unknown or is not allocated!"); + } + }, + py::arg("internal_name") + ) + .def("multifab", + [](WarpX & wx, std::string scalar_name, int level) { + if (wx.m_fields.has(scalar_name, level)) { + return wx.m_fields.get(scalar_name, level); } else { - throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + throw std::runtime_error("The scalar field '" + scalar_name + "' is unknown or is not allocated!"); } }, - py::arg("multifab_name"), + py::arg("scalar_name"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + R"doc(Return scalar fields (MultiFabs) by name and level, e.g., ``\"rho_fp\"``, ``\"phi_fp"``, ... The physical fields in WarpX have the following naming: @@ -141,18 +151,18 @@ The physical fields in WarpX have the following naming: (only for level 1 and higher).)doc" ) .def("multifab", - [](WarpX & wx, std::string multifab_name, Direction dir, int level) { - if (wx.m_fields.has(multifab_name, dir, level)) { - return wx.m_fields.get(multifab_name, dir, level); + [](WarpX & wx, std::string vector_name, Direction dir, int level) { + if (wx.m_fields.has(vector_name, dir, level)) { + return wx.m_fields.get(vector_name, dir, level); } else { - throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + throw std::runtime_error("The vector field '" + vector_name + "' is unknown or is not allocated!"); } }, - py::arg("multifab_name"), + py::arg("vector_name"), py::arg("dir"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + R"doc(Return the component of a vector field (MultiFab) by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... The physical fields in WarpX have the following naming: diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H index f33eed1c5a6..21df20c1678 100644 --- a/Source/ablastr/fields/MultiFabRegister.H +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -661,14 +661,21 @@ namespace ablastr::fields int level ) const; - private: + /** Temporary test function for legacy Python bindings */ + [[nodiscard]] bool + internal_has ( + std::string const & internal_name + ); [[nodiscard]] amrex::MultiFab * internal_get ( - std::string const & key + std::string const & internal_name ); + + private: + [[nodiscard]] amrex::MultiFab const * internal_get ( - std::string const & key + std::string const & internal_name ) const; amrex::MultiFab* diff --git a/Source/ablastr/fields/MultiFabRegister.cpp b/Source/ablastr/fields/MultiFabRegister.cpp index 106a3aede79..2c384a90089 100644 --- a/Source/ablastr/fields/MultiFabRegister.cpp +++ b/Source/ablastr/fields/MultiFabRegister.cpp @@ -336,32 +336,40 @@ namespace ablastr::fields return count == 3; } + bool + MultiFabRegister::internal_has ( + std::string const & internal_name + ) + { + return m_mf_register.count(internal_name) > 0; + } + amrex::MultiFab* MultiFabRegister::internal_get ( - std::string const & key + std::string const & internal_name ) { - if (m_mf_register.count(key) == 0) { + if (m_mf_register.count(internal_name) == 0) { // FIXME: temporary, throw a std::runtime_error // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); return nullptr; } - amrex::MultiFab & mf = m_mf_register.at(key).m_mf; + amrex::MultiFab & mf = m_mf_register.at(internal_name).m_mf; return &mf; } amrex::MultiFab const * MultiFabRegister::internal_get ( - std::string const & key + std::string const & internal_name ) const { - if (m_mf_register.count(key) == 0) { + if (m_mf_register.count(internal_name) == 0) { // FIXME: temporary, throw a std::runtime_error - // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); return nullptr; } - amrex::MultiFab const & mf = m_mf_register.at(key).m_mf; + amrex::MultiFab const & mf = m_mf_register.at(internal_name).m_mf; return &mf; } From ae7aa62274f2b59a83eb02cb8034e0c58f7f45cb Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:16:46 -0700 Subject: [PATCH 75/91] Divide rho by epsilon to undo the multiplication before computing phi (#5324) * un-multiply done * Update Source/ablastr/fields/PoissonSolver.H --------- Co-authored-by: Remi Lehe --- .../inputs_test_rz_spacecraft_charging_picmi.py | 10 +++++----- Source/ablastr/fields/PoissonSolver.H | 4 +++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py index e3bc888f600..9ce8bb8433c 100644 --- a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py +++ b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py @@ -121,13 +121,13 @@ def compute_virtual_charge_on_spacecraft(): # Compute integral of rho over volume of the domain # (i.e. total charge of the plasma particles) rho_integral = ( - (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() * dr * dz + (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() + * 2 + * np.pi + * dr + * dz ) - # Due to an oddity in WarpX (which will probably be solved later) - # we need to multiply `rho` by `-epsilon_0` to get the correct charge - rho_integral *= 2 * np.pi * -scc.epsilon_0 # does this oddity still exist? - # Compute charge of the spacecraft, based on Gauss theorem q_spacecraft = -rho_integral - scc.epsilon_0 * grad_phi_integral print("Virtual charge on the spacecraft: %e" % q_spacecraft) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 727280d630b..ad825fdbe6b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -275,7 +275,7 @@ computePhi ( #endif // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately - rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); #ifdef WARPX_DIM_RZ constexpr bool is_rz = true; @@ -409,6 +409,8 @@ computePhi ( post_phi_calculation.value()(mlmg, lev); } } + rho[lev]->mult(-ablastr::constant::SI::ep0); // Multiply rho by epsilon again + } // loop over lev(els) } // computePhi } // namespace ablastr::fields From d889ac8da301e40d2d4444211d0e40fa12b4f776 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 24 Sep 2024 21:59:13 -0700 Subject: [PATCH 76/91] Fix Legacy Python MF API (#5325) Keep ownership on the C++ side. --- Source/Python/WarpX.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 70d91445d8b..0aab95f78f8 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -127,7 +127,15 @@ void init_WarpX (py::module& m) throw std::runtime_error("MultiFab '" + internal_name + "' is unknown or is not allocated!"); } }, - py::arg("internal_name") + py::arg("internal_name"), + py::return_value_policy::reference_internal, + R"doc(Return a MultiFab by its internal name (deprecated). + +The multifab('internal_name') signature is deprecated. +Please use: +- multifab('prefix', level=...) for scalar fields +- multifab('prefix', dir=..., level=...) for vector field components +where 'prefix' is the part of 'internal_name';' before the [])doc" ) .def("multifab", [](WarpX & wx, std::string scalar_name, int level) { From 30ac54887d2fc01a8b4f853fbc1f1dfda9d98fd6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 25 Sep 2024 01:04:04 -0700 Subject: [PATCH 77/91] PoissonSolver: Missing Include (#5327) Fix the missing include for `MultiLevelRegister`, which includes the MultiLevel MF types. --- Source/ablastr/fields/PoissonSolver.H | 1 + 1 file changed, 1 insertion(+) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index ad825fdbe6b..e6eaec4f4ad 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -14,6 +14,7 @@ #include #include #include +#include #include #if defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D) From 3e76edc05ffcb86092831c1cc81c2e0e015a9971 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 25 Sep 2024 06:02:06 -0700 Subject: [PATCH 78/91] PoissonSolver: Missing Include (#5327) (#5328) Fix the missing include for `MultiLevelRegister`, which includes the MultiLevel MF types. --- Source/ablastr/fields/PoissonSolver.H | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index e6eaec4f4ad..8b4f9cea9a1 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -54,6 +54,7 @@ #include #include +#include namespace ablastr::fields { @@ -311,13 +312,17 @@ computePhi ( auto linop_nodelap = std::make_unique(); if (eb_enabled) { #if defined(AMREX_USE_EB) - linop_nodelap->define( - amrex::Vector{geom[lev]}, - amrex::Vector{grids[lev]}, - amrex::Vector{dmap[lev]}, - info, - amrex::Vector{eb_farray_box_factory.value()[lev]} - ); + if constexpr(std::is_same_v) { + throw std::runtime_error("EB requested by eb_farray_box_factory not provided!"); + } else { + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); + } #endif } else { From e3e6ab8690e9222f0ad84ebb21a92c85874c4775 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 25 Sep 2024 13:49:54 -0700 Subject: [PATCH 79/91] =?UTF-8?q?Fixed=20a=20bug=20where=20centering=20coe?= =?UTF-8?q?fficients=20used=20in=20Magentostatic=20solver=E2=80=A6=20(#528?= =?UTF-8?q?9)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed a bug where centering coefficients used in Magentostatic solver were not being initialized when doing an energy-conserving field gather. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Pointer type passed into the magnetostatic solver for current is no longer smart, so removed the call to get() to return the raw pointer. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../MagnetostaticSolver/MagnetostaticSolver.cpp | 15 +++++++-------- Source/WarpX.cpp | 5 +++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 84efe8bf45a..5c28ff1f3c7 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -170,15 +170,14 @@ WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& cur amrex::Vector> sorted_curr; amrex::Vector> sorted_A; for (int lev = 0; lev <= finest_level; ++lev) { - sorted_curr.emplace_back(amrex::Array ({curr[lev][Direction{0}], - curr[lev][Direction{1}], - curr[lev][Direction{2}]})); - sorted_A.emplace_back(amrex::Array ({A[lev][Direction{0}], - A[lev][Direction{1}], - A[lev][Direction{2}]})); + sorted_curr.emplace_back(amrex::Array ({curr[lev][0], + curr[lev][1], + curr[lev][2]})); + sorted_A.emplace_back(amrex::Array ({A[lev][0], + A[lev][1], + A[lev][2]})); } -#if defined(AMREX_USE_EB) const ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); const std::optional post_A_calculation( { @@ -187,13 +186,13 @@ WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& cur m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_b_stag, finest_level) }); +#if defined(AMREX_USE_EB) amrex::Vector factories; for (int lev = 0; lev <= finest_level; ++lev) { factories.push_back(&WarpX::fieldEBFactory(lev)); } const std::optional > eb_farray_box_factory({factories}); #else - const std::optional post_A_calculation; const std::optional > eb_farray_box_factory; #endif diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 60374133a52..7f9288debb7 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1388,8 +1388,9 @@ WarpX::ReadParameters () // Instead, if warpx.grid_type=collocated, the momentum-conserving and // energy conserving field gathering algorithms are equivalent (forces // gathered from the collocated grid) and no fields centering occurs. - if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving && - WarpX::grid_type != GridType::Collocated) + if ((WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving + && WarpX::grid_type != GridType::Collocated) + || WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { utils::parser::queryWithParser( pp_warpx, "field_centering_nox", field_centering_nox); From d9935f48ee5773693d1e41f11ba0dc992fec0220 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 25 Sep 2024 22:45:56 -0700 Subject: [PATCH 80/91] `MultiFabRegister`: use `has_vector` when possible (#5334) --- Source/BoundaryConditions/PML.cpp | 8 ++++---- Source/Evolve/WarpXEvolve.cpp | 2 +- Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp | 8 ++++---- Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp | 8 ++++---- .../FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp | 2 +- Source/Parallelization/WarpXComm.cpp | 8 ++++---- Source/Particles/LaserParticleContainer.cpp | 2 +- Source/Particles/PhysicalParticleContainer.cpp | 10 +++++----- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 91d821d6646..f45ca222e69 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -1234,7 +1234,7 @@ PML::CheckPoint ( { using ablastr::fields::Direction; - if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_fp, 0)) { ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); @@ -1246,7 +1246,7 @@ PML::CheckPoint ( VisMF::AsyncWrite(*pml_B_fp[2], dir+"_Bz_fp"); } - if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); @@ -1267,7 +1267,7 @@ PML::Restart ( { using ablastr::fields::Direction; - if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_fp, 0)) { ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); @@ -1279,7 +1279,7 @@ PML::Restart ( VisMF::Read(*pml_B_fp[2], dir+"_Bz_fp"); } - if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 93d265d598f..a685afd28e7 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -1147,7 +1147,7 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, m_fields.get(FieldType::current_fp, Direction{1}, lev), m_fields.get(FieldType::current_fp, Direction{2}, lev), lev); - if (m_fields.has(FieldType::current_buf, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::current_buf, lev)) { ApplyInverseVolumeScalingToCurrentDensity( m_fields.get(FieldType::current_buf, Direction{0}, lev), m_fields.get(FieldType::current_buf, Direction{1}, lev), diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 63b51cb8416..c6a1e206200 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -79,19 +79,19 @@ void FiniteDifferenceSolver::EvolveB ( fields.get(FieldType::G_fp, lev) : fields.get(FieldType::G_cp, lev); } ablastr::fields::VectorField face_areas; - if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + if (fields.has_vector(FieldType::face_areas, lev)) { face_areas = fields.get_alldirs(FieldType::face_areas, lev); } ablastr::fields::VectorField area_mod; - if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + if (fields.has_vector(FieldType::area_mod, lev)) { area_mod = fields.get_alldirs(FieldType::area_mod, lev); } ablastr::fields::VectorField ECTRhofield; - if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + if (fields.has_vector(FieldType::ECTRhofield, lev)) { ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } ablastr::fields::VectorField Venl; - if (fields.has(FieldType::Venl, Direction{0}, lev)) { + if (fields.has_vector(FieldType::Venl, lev)) { Venl = fields.get_alldirs(FieldType::Venl, lev); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index db8e80cc972..03a9866fb98 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -73,19 +73,19 @@ void FiniteDifferenceSolver::EvolveE ( } ablastr::fields::VectorField edge_lengths; - if (fields.has(FieldType::edge_lengths, Direction{0}, lev)) { + if (fields.has_vector(FieldType::edge_lengths, lev)) { edge_lengths = fields.get_alldirs(FieldType::edge_lengths, lev); } ablastr::fields::VectorField face_areas; - if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + if (fields.has_vector(FieldType::face_areas, lev)) { face_areas = fields.get_alldirs(FieldType::face_areas, lev); } ablastr::fields::VectorField area_mod; - if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + if (fields.has_vector(FieldType::area_mod, lev)) { area_mod = fields.get_alldirs(FieldType::area_mod, lev); } ablastr::fields::VectorField ECTRhofield; - if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + if (fields.has_vector(FieldType::ECTRhofield, lev)) { ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index 9ecae05516d..7a1a05d560d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -69,7 +69,7 @@ void FiniteDifferenceSolver::EvolveEPML ( const ablastr::fields::VectorField Jfield = (patch_type == PatchType::fine) ? fields.get_alldirs(FieldType::pml_j_fp, level) : fields.get_alldirs(FieldType::pml_j_cp, level); ablastr::fields::VectorField edge_lengths; - if (fields.has(FieldType::pml_edge_lengths, Direction{0}, level)) { + if (fields.has_vector(FieldType::pml_edge_lengths, level)) { edge_lengths = fields.get_alldirs(FieldType::pml_edge_lengths, level); } amrex::MultiFab * Ffield = nullptr; diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index ac797d1e706..d64632d964a 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -196,7 +196,7 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Btmp; - if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { for (int i = 0; i < 3; ++i) { Btmp[i] = std::make_unique( *m_fields.get(FieldType::Bfield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); @@ -290,7 +290,7 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Etmp; - if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { for (int i = 0; i < 3; ++i) { Etmp[i] = std::make_unique( *m_fields.get(FieldType::Efield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); @@ -450,7 +450,7 @@ WarpX::UpdateAuxilaryDataSameType () Bfield_aux[lev - 1][2]->nComp(), ng_src, ng, WarpX::do_single_precision_comms, crse_period); - if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{0}, lev), dBx, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{0}, lev)->nComp(), ng); MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{1}, lev), dBy, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{1}, lev)->nComp(), ng); @@ -535,7 +535,7 @@ WarpX::UpdateAuxilaryDataSameType () WarpX::do_single_precision_comms, crse_period); - if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{0}, lev), dEx, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{0}, lev)->nComp(), ng); MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{1}, lev), dEy, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{1}, lev)->nComp(), ng); diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 10849a0c0d5..c804bb12797 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -586,7 +586,7 @@ LaserParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, amrex::LayoutData* cost = WarpX::getCosts(lev); const bool has_rho = fields.has(FieldType::rho_fp, lev); - const bool has_buffer = fields.has(FieldType::current_buf, lev); + const bool has_buffer = fields.has_vector(FieldType::current_buf, lev); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 07997a61f0c..26f9fee38d3 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1753,9 +1753,9 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, const iMultiFab* gather_masks = WarpX::GatherBufferMasks(lev); const bool has_rho = fields.has(FieldType::rho_fp, lev); - const bool has_cjx = fields.has(FieldType::current_buf, Direction{0}, lev); - const bool has_cEx = fields.has(FieldType::Efield_cax, Direction{0}, lev); - const bool has_buffer = has_cEx || has_cjx; + const bool has_J_buf = fields.has_vector(FieldType::current_buf, lev); + const bool has_E_cax = fields.has_vector(FieldType::Efield_cax, lev); + const bool has_buffer = has_E_cax || has_J_buf; amrex::MultiFab & Ex = *fields.get(FieldType::Efield_aux, Direction{0}, lev); amrex::MultiFab & Ey = *fields.get(FieldType::Efield_aux, Direction{1}, lev); @@ -1850,7 +1850,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, pti, lev, current_masks, gather_masks ); } - const long np_current = has_cjx ? nfine_current : np; + const long np_current = has_J_buf ? nfine_current : np; if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. @@ -1870,7 +1870,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, if (! do_not_push) { - const long np_gather = has_cEx ? nfine_gather : np; + const long np_gather = has_E_cax ? nfine_gather : np; int e_is_nodal = Ex.is_nodal() and Ey.is_nodal() and Ez.is_nodal(); From 284287d29e9865070a9de7e966f32ccd06e86e75 Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 26 Sep 2024 02:35:17 -0700 Subject: [PATCH 81/91] Remove m_cell_centered_data from multifab map (#5322) * Remove m_cell_centered_data from multifab map * Register cell_centered_data MultiFab * Cleaning & Member Variable Convention * Shorten: fields as variable Prepares to remove `WarpX` class altogether from here. --------- Co-authored-by: Axel Huebl --- Source/Diagnostics/BTDiagnostics.H | 4 +- Source/Diagnostics/BTDiagnostics.cpp | 75 ++++++++++++++++------------ 2 files changed, 45 insertions(+), 34 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index d5dd67226b7..d11db98276b 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -241,7 +241,7 @@ private: * will be used by all snapshots to obtain lab-frame data at the respective * z slice location. */ - amrex::Vector > m_cell_centered_data; + std::string const m_cell_centered_data_name; /** Vector of pointers to compute cell-centered data, per level, per component * using the coarsening-ratio provided by the user. */ @@ -346,7 +346,7 @@ private: * \param[in] i_buffer snapshot index */ void SetSnapshotFullStatus (int i_buffer); - /** Vector of field-data stored in the cell-centered multifab, m_cell_centered_data. + /** Vector of field-data stored in the cell-centered MultiFab. * All the fields are stored regardless of the specific fields to plot selected * by the user. */ diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index e00c30aa78e..631de298861 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -56,7 +56,8 @@ namespace } BTDiagnostics::BTDiagnostics (int i, const std::string& name) - : Diagnostics{i, name} + : Diagnostics{i, name}, + m_cell_centered_data_name("BTD_cell_centered_data_" + name) { ReadParameters(); } @@ -83,7 +84,6 @@ void BTDiagnostics::DerivedInitData () m_old_z_boost.resize(m_num_buffers); m_buffer_counter.resize(m_num_buffers); m_snapshot_ncells_lab.resize(m_num_buffers); - m_cell_centered_data.resize(nmax_lev); m_cell_center_functors.resize(nmax_lev); m_max_buffer_multifabs.resize(m_num_buffers); m_buffer_flush_counter.resize(m_num_buffers); @@ -519,7 +519,10 @@ BTDiagnostics::DefineCellCenteredMultiFab(int lev) #else const int ncomps = static_cast(m_cellcenter_varnames.size()); #endif - WarpX::AllocInitMultiFab(m_cell_centered_data[lev], ba, dmap, ncomps, amrex::IntVect(ngrow), lev, "cellcentered_BTD", 0._rt); + bool const remake = false; + bool const redistribute_on_remake = false; + warpx.m_fields.alloc_init(m_cell_centered_data_name, lev, ba, dmap, ncomps, amrex::IntVect(ngrow), 0.0_rt, + remake, redistribute_on_remake); } @@ -540,12 +543,14 @@ BTDiagnostics::InitializeFieldFunctors (int lev) #else auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single multifab. // Therefore, size of functors at all levels is 1. const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); @@ -554,11 +559,11 @@ BTDiagnostics::InitializeFieldFunctors (int lev) // Create an object of class BackTransformFunctor for (int i = 0; i < num_BT_functors; ++i) { - // coarsening ratio is not provided since the source MultiFab, m_cell_centered_data + // coarsening ratio is not provided since the source MultiFab // is coarsened based on the user-defined m_crse_ratio const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -570,23 +575,23 @@ BTDiagnostics::InitializeFieldFunctors (int lev) m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ey" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "By" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jy" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); } @@ -601,8 +606,9 @@ BTDiagnostics::UpdateVarnamesForRZopenPMD () { #ifdef WARPX_DIM_RZ auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; using ablastr::fields::Direction; - const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; @@ -663,21 +669,22 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) using ablastr::fields::Direction; auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); + auto & fields = warpx.m_fields; + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single MultiFab. // Therefore, size of functors at all levels is 1 const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); for (int i = 0; i < num_BT_functors; ++i) { const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -689,23 +696,23 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_cell_center_functors_at_lev_size = static_cast(m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Et" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Br" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jr" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, false, -1, false, ncomp); } @@ -795,6 +802,8 @@ BTDiagnostics::PrepareFieldDataForOutput () if (!m_do_back_transformed_fields) { return; } auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // In this function, we will get cell-centered data for every level, lev, // using the cell-center functors and their respective operators() // Call m_cell_center_functors->operator @@ -804,21 +813,23 @@ BTDiagnostics::PrepareFieldDataForOutput () for (int icomp = 0; icompoperator()(*m_cell_centered_data[lev], icomp_dst); + // stores it in cell-centered MultiFab. + m_cell_center_functors[lev][icomp]->operator()(*fields.get(m_cell_centered_data_name, lev), icomp_dst); icomp_dst += m_cell_center_functors[lev][icomp]->nComp(); } // Check that the proper number of user-requested components are cell-centered AMREX_ALWAYS_ASSERT( icomp_dst == m_cellcenter_varnames.size() ); // fill boundary call is required to average_down (flatten) data to // the coarsest level. - ablastr::utils::communication::FillBoundary(*m_cell_centered_data[lev], WarpX::do_single_precision_comms, + ablastr::utils::communication::FillBoundary(*fields.get(m_cell_centered_data_name, lev), + WarpX::do_single_precision_comms, warpx.Geom(lev).periodicity()); } // Flattening out MF over levels for (int lev = warpx.finestLevel(); lev > 0; --lev) { - ablastr::coarsen::sample::Coarsen(*m_cell_centered_data[lev - 1], *m_cell_centered_data[lev], 0, 0, + ablastr::coarsen::sample::Coarsen(*fields.get(m_cell_centered_data_name, lev - 1), + *fields.get(m_cell_centered_data_name, lev), 0, 0, static_cast(m_cellcenter_varnames.size()), 0, WarpX::RefRatio(lev-1) ); } From 192e1675d1843d0a01a1c5447cdd64aac1542be3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 26 Sep 2024 11:29:21 -0700 Subject: [PATCH 82/91] `FieldType`: Ensure Doxygen Works (#5330) * `FieldType`: Top Doxygen Comment Add a general top level comment. * Doxygen: Expand `AMREX_ENUM` * `FieldType`: Doxygen Oneline Use a comment form that will still work after it gets pasted and squashed into a single line after macro expansion. * Doc: Include in Sphinx * RTD: Doxygen 1.9.1 to latest --- .readthedocs.yml | 13 +++-- Docs/Doxyfile | 8 +-- Docs/README.md | 5 +- Docs/conda.yml | 12 +++++ Docs/requirements.txt | 2 +- Docs/source/developers/documentation.rst | 7 ++- Docs/source/developers/fields.rst | 7 +++ Source/Fields.H | 67 +++++++++++++----------- 8 files changed, 75 insertions(+), 46 deletions(-) create mode 100644 Docs/conda.yml diff --git a/.readthedocs.yml b/.readthedocs.yml index 3da9bc77140..95f86fe4ff2 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -9,14 +9,17 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.11" + python: "mambaforge-latest" + # python: "3.11" sphinx: - configuration: Docs/source/conf.py + configuration: Docs/source/conf.py -python: - install: - - requirements: Docs/requirements.txt +conda: + environment: Docs/conda.yml +# python: +# install: +# - requirements: Docs/requirements.txt formats: - htmlzip diff --git a/Docs/Doxyfile b/Docs/Doxyfile index 5fbb7651b18..f7740bc0328 100644 --- a/Docs/Doxyfile +++ b/Docs/Doxyfile @@ -2245,7 +2245,7 @@ ENABLE_PREPROCESSING = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -MACRO_EXPANSION = NO +MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and @@ -2253,7 +2253,7 @@ MACRO_EXPANSION = NO # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = NO +EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. @@ -2305,6 +2305,8 @@ PREDEFINED = AMREX_Linux=1 \ WARPX_QED=1 \ WARPX_QED_TABLE_GEN=1 +PREDEFINED += "AMREX_ENUM(CLASS,...)=\"enum class CLASS : int { __VA_ARGS__ };\"" + # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED @@ -2312,7 +2314,7 @@ PREDEFINED = AMREX_Linux=1 \ # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_AS_DEFINED = +EXPAND_AS_DEFINED = AMREX_ENUM # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all references to function-like macros that are alone on a line, have diff --git a/Docs/README.md b/Docs/README.md index e6fac921b04..6d3903ab327 100644 --- a/Docs/README.md +++ b/Docs/README.md @@ -9,12 +9,13 @@ More information can be found in Docs/source/developers/documentation.rst. Install the Python requirements for compiling the documentation: ``` -python3 -m pip install -r Docs/requirements.txt +cd Docs/ +python3 -m pip install -r requirements.txt ``` ### Compiling the documentation -`cd` into the `Docs/` directory and type +Still in the `Docs/` directory, type ``` make html ``` diff --git a/Docs/conda.yml b/Docs/conda.yml new file mode 100644 index 00000000000..1e23c203b2b --- /dev/null +++ b/Docs/conda.yml @@ -0,0 +1,12 @@ +name: readthedocs + +channels: + - conda-forge + - nodefaults + +dependencies: + - python + - doxygen + - pip + - pip: + - -r requirements.txt diff --git a/Docs/requirements.txt b/Docs/requirements.txt index a8c2af0e474..bc34e69cd65 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -5,7 +5,7 @@ # License: BSD-3-Clause-LBNL # WarpX PICMI bindings w/o C++ component (used for autoclass docs) --e Python +-e ../Python breathe docutils>=0.17.1 diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/documentation.rst index a5013299336..5d604bcf9b3 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/documentation.rst @@ -56,16 +56,15 @@ First, make sure you are in the root directory of WarpX's source and install the .. code-block:: sh - python3 -m pip install -r Docs/requirements.txt + cd Docs/ + python3 -m pip install -r requirements.txt You will also need Doxygen (macOS: ``brew install doxygen``; Ubuntu: ``sudo apt install doxygen``). -Then, to compile the documentation, use +Still in the ``Docs/`` directory, compile the documentation via .. code-block:: sh - cd Docs/ - make html # This will first compile the Doxygen documentation (execute doxygen) # and then build html pages from rst files using sphinx and breathe. diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index d0af160afef..9d980119814 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -37,6 +37,13 @@ The ``MultiFab`` constructor (for, e.g., ``Ex`` on level ``lev``) is called in ` By default, the ``MultiFab`` are set to ``0`` at initialization. They can be assigned a different value in ``WarpX::InitLevelData``. +Field Names +----------- + +The commonly used WarpX field names are defined in: + +.. doxygenenum:: warpx::fields::FieldType + Field solver ------------ diff --git a/Source/Fields.H b/Source/Fields.H index 0aa3cbdd0c0..f85b6c4584c 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -18,25 +18,30 @@ namespace warpx::fields { + /** Unique identifiers for WarpX scalar and vector fields. + * + * These are implemented as amrex::MultiFab (one or one per component "direction", + * respectively) and stored in the ablastr::fields::MultiFabRegister . + */ AMREX_ENUM(FieldType, None, - Efield_aux, //!< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData - Bfield_aux, //!< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData - Efield_fp, //!< The field that is updated by the field solver at each timestep - Bfield_fp, //!< The field that is updated by the field solver at each timestep - Efield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file - Bfield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file - current_fp, //!< The current that is used as a source for the field solver - current_fp_nodal, //!< Only used when using nodal current deposition - current_fp_vay, //!< Only used when using Vay current deposition - current_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - current_store, //!< Only used when doing subcycling with mesh refinement, for book-keeping of currents - rho_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - rho_fp, //!< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) - F_fp, //!< Used for divE cleaning - G_fp, //!< Used for divB cleaning - phi_fp, //!< Obtained by the Poisson solver, for labframe electrostatic - vector_potential_fp, //!< Obtained by the magnetostatic solver + Efield_aux, /**< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData */ + Bfield_aux, /**< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData */ + Efield_fp, /**< The field that is updated by the field solver at each timestep */ + Bfield_fp, /**< The field that is updated by the field solver at each timestep */ + Efield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + Bfield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + current_fp, /**< The current that is used as a source for the field solver */ + current_fp_nodal, /**< Only used when using nodal current deposition */ + current_fp_vay, /**< Only used when using Vay current deposition */ + current_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + current_store, /**< Only used when doing subcycling with mesh refinement, for book-keeping of currents */ + rho_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + rho_fp, /**< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) */ + F_fp, /**< Used for divE cleaning */ + G_fp, /**< Used for divB cleaning */ + phi_fp, /**< Obtained by the Poisson solver, for labframe electrostatic */ + vector_potential_fp, /**< Obtained by the magnetostatic solver */ vector_potential_fp_nodal, vector_potential_grad_buf_e_stag, vector_potential_grad_buf_b_stag, @@ -45,19 +50,19 @@ namespace warpx::fields hybrid_current_fp_temp, hybrid_current_fp_ampere, hybrid_current_fp_external, - Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level - rho_cp, //!< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level - F_cp, //!< Only used with MR. Used for divE cleaning, on the coarse patch of each level - G_cp, //!< Only used with MR. Used for divB cleaning, on the coarse patch of each level - Efield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - Bfield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - E_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - B_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - distance_to_eb, //!< Only used with embedded boundaries (EB). Stores the distance to the nearest EB - edge_lengths, //!< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units - face_areas, //!< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units + Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ + rho_cp, /**< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level */ + F_cp, /**< Only used with MR. Used for divE cleaning, on the coarse patch of each level */ + G_cp, /**< Only used with MR. Used for divB cleaning, on the coarse patch of each level */ + Efield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + Bfield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ + edge_lengths, /**< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units */ + face_areas, /**< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units */ area_mod, pml_E_fp, pml_B_fp, @@ -74,7 +79,7 @@ namespace warpx::fields Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp, - B_old, //!< Stores the value of B at the beginning of the timestep, for the implicit solver + B_old, /**< Stores the value of B at the beginning of the timestep, for the implicit solver */ ECTRhofield, Venl ); From 36b55449dccb0a8bf60a431341c730c5b74cc5a1 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 26 Sep 2024 15:27:45 -0700 Subject: [PATCH 83/91] Skip Guard in IGF Solver (Revert #5284) (#5335) * Skip Guard in IGF Solver (Revert #5284) This reverts the inclusion of the guard cells when copying rho. We could not yet determine why, but with this change the FFT 3D solver in ImpactX does not converge to the analytical solutions anymore. * Reset Checksum --- .../test_3d_open_bc_poisson_solver.json | 24 +++++++++---------- .../fields/IntegratedGreenFunctionSolver.cpp | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index 0ca6bde570a..af9ab3a0bdd 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 100915975.15403552, - "By": 157610677.3147734, - "Bz": 1.2276713711194638e-13, - "Ex": 4.725066923359797e+16, - "Ey": 3.025396149317578e+16, - "Ez": 3276584.4383433824, - "rho": 10994013582437.197 + "Bx": 100915933.446046, + "By": 157610622.18548763, + "Bz": 2.76973993530483e-13, + "Ex": 4.725065270619211e+16, + "Ey": 3.0253948989388292e+16, + "Ez": 3276573.9514776673, + "rho": 10994013582437.193 }, "electron": { - "particle_momentum_x": 5.701279599509506e-19, - "particle_momentum_y": 3.650453172383178e-19, + "particle_momentum_x": 5.701277606055763e-19, + "particle_momentum_y": 3.6504516636842883e-19, "particle_momentum_z": 1.145432768297242e-10, - "particle_position_x": 17.31408691249785, - "particle_position_y": 0.25836912671878015, + "particle_position_x": 17.314086912497864, + "particle_position_y": 0.25836912671877965, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } -} \ No newline at end of file +} diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index ae11ad5087d..40b36740ae5 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -121,7 +121,7 @@ computePhiIGF ( amrex::MultiFab const & rho, BL_PROFILE_VAR_START(timer_pcopies); // Copy from rho including its ghost cells to tmp_rho - tmp_rho.ParallelCopy( rho, 0, 0, 1, rho.nGrowVect(), amrex::IntVect::TheZeroVector() ); + tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); BL_PROFILE_VAR_STOP(timer_pcopies); #if !defined(ABLASTR_USE_HEFFTE) From eef12e96149b80f92600654c52994a230658b199 Mon Sep 17 00:00:00 2001 From: Brian Naranjo Date: Sat, 28 Sep 2024 07:37:38 -0700 Subject: [PATCH 84/91] General moving-window transformations in boosted-frame simulations (#5226) * General moving-window transformations in boosted-frame simulations * Default speed of moving window to speed of boosted frame * Extend simulation volume enough so that particles don't exit * Include moving window speed in diag_lo and diag_hi transformations * Modify bounds so as to produce the same results as in previous versions --------- Co-authored-by: Remi Lehe --- ...uts_test_3d_hard_edged_quadrupoles_boosted | 4 +- .../inputs_test_3d_plasma_lens_boosted | 4 +- ...est_3d_hard_edged_quadrupoles_boosted.json | 34 +++++++------- .../test_3d_plasma_lens_boosted.json | 32 +++++++------- Source/Diagnostics/Diagnostics.cpp | 5 ++- Source/Utils/WarpXUtil.H | 4 ++ Source/Utils/WarpXUtil.cpp | 44 ++++++++++++++++++- Source/WarpX.cpp | 43 ++++-------------- 8 files changed, 94 insertions(+), 76 deletions(-) diff --git a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted index 668ec73d2dd..c056ff1fc66 100644 --- a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted +++ b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted @@ -2,8 +2,8 @@ max_step = 50 amr.n_cell = 16 16 8 amr.max_level = 0 geometry.dims = 3 -geometry.prob_lo = -0.2 -0.2 -0.1 -geometry.prob_hi = +0.2 +0.2 +0.1 +geometry.prob_lo = -0.2 -0.2 -0.1866 +geometry.prob_hi = +0.2 +0.2 +0.1866 # Boundary condition boundary.field_lo = pec pec pec diff --git a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted index fa18ac439c4..b00779bae65 100644 --- a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted +++ b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted @@ -8,8 +8,8 @@ amr.max_level = 0 # Geometry geometry.dims = 3 -geometry.prob_lo = -1.0 -1.0 -1.0 # physical domain -geometry.prob_hi = 1.0 1.0 2.0 +geometry.prob_lo = -1.0 -1.0 -1.866 # physical domain +geometry.prob_hi = 1.0 1.0 3.732 boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec diff --git a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json index acec34286f7..0a601b7b437 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json @@ -1,22 +1,22 @@ { + "lev=0": { + "Bx": 3.254604354043409e-14, + "By": 3.2768679907552955e-14, + "Bz": 1.0615351421410278e-16, + "Ex": 2.3084916770539354e-05, + "Ey": 2.2657235922655432e-05, + "Ez": 1.9978004351148e-05, + "jx": 1.781971994166362e-10, + "jy": 4.2163624424546344e-20, + "jz": 1.0378980680353126e-07 + }, "electron": { - "particle_momentum_x": 5.955475926588059e-26, - "particle_momentum_y": 1.4612764777454504e-35, - "particle_momentum_z": 3.4687284535374423e-23, - "particle_position_x": 0.049960237123814574, - "particle_position_y": 8.397636119991403e-15, - "particle_position_z": 0.10931687737912647, + "particle_momentum_x": 5.955475927655105e-26, + "particle_momentum_y": 1.4613271542201658e-35, + "particle_momentum_z": 3.468728453537439e-23, + "particle_position_x": 0.04996023704063194, + "particle_position_y": 8.398113230295983e-15, + "particle_position_z": 0.10931682580470406, "particle_weight": 1.0 - }, - "lev=0": { - "Bx": 3.254531465641299e-14, - "By": 3.2768092409497234e-14, - "Bz": 1.0615286316115558e-16, - "Ex": 2.30845657253269e-05, - "Ey": 2.2656898931877975e-05, - "Ez": 1.997747654112569e-05, - "jx": 1.7819477343635878e-10, - "jy": 4.2163030523377745e-20, - "jz": 1.0378839382497739e-07 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json index 6d5eabb492e..e1fa54618ee 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json @@ -1,21 +1,21 @@ { "lev=0": { - "Bx": 1.3073041371012706e-14, - "By": 1.3033038210840872e-14, - "Bz": 5.595105968291083e-17, - "Ex": 2.801134785671445e-06, - "Ey": 2.8088613469887243e-06, - "Ez": 3.343430731047825e-06, - "jx": 2.5155716299904363e-11, - "jy": 2.013718424043256e-11, - "jz": 6.00631499206418e-09 + "Bx": 1.307357220398482e-14, + "By": 1.3033571630685163e-14, + "Bz": 5.594998319468307e-17, + "Ex": 2.8010832905044288e-06, + "Ey": 2.8088096742407935e-06, + "Ez": 3.3433681277560495e-06, + "jx": 2.5151718871714067e-11, + "jy": 2.013398608921663e-11, + "jz": 6.0063967622563335e-09 }, "electrons": { - "particle_momentum_x": 7.437088723328491e-24, - "particle_momentum_y": 5.9495056615288754e-24, - "particle_momentum_z": 5.117548636687908e-22, - "particle_position_x": 0.036489969262013186, - "particle_position_y": 0.029201200231260247, - "particle_position_z": 6.9681085285694095 + "particle_momentum_x": 7.43708887164806e-24, + "particle_momentum_y": 5.949505779760011e-24, + "particle_momentum_z": 5.117548636790359e-22, + "particle_position_x": 0.03648994812700447, + "particle_position_y": 0.029201183320618985, + "particle_position_z": 6.968107021318396 } -} +} \ No newline at end of file diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index dc28aeda095..fd079479285 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -229,8 +229,9 @@ Diagnostics::BaseReadParameters () if (WarpX::boost_direction[ dim_map[WarpX::moving_window_dir] ] == 1) { // Convert user-defined lo and hi for diagnostics to account for boosted-frame // simulations with moving window - const amrex::Real convert_factor = 1._rt/(WarpX::gamma_boost * (1._rt - WarpX::beta_boost) ); - // Assuming that the window travels with speed c + const amrex::Real beta_window = WarpX::moving_window_v / PhysConst::c; + const amrex::Real convert_factor = 1._rt/( + WarpX::gamma_boost * (1._rt - WarpX::beta_boost * beta_window) ); m_lo[WarpX::moving_window_dir] *= convert_factor; m_hi[WarpX::moving_window_dir] *= convert_factor; } diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index e35b0cdb313..46399b439d6 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -33,6 +33,10 @@ void ParseGeometryInput(); void ReadBoostedFrameParameters(amrex::Real& gamma_boost, amrex::Real& beta_boost, amrex::Vector& boost_direction); +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v); + void ConvertLabParamsToBoost(); /** diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 856e021abb3..d6f465fa901 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -140,6 +140,43 @@ void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, } } +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v) +{ + const ParmParse pp_warpx("warpx"); + pp_warpx.query("do_moving_window", do_moving_window); + if (do_moving_window) { + utils::parser::queryWithParser( + pp_warpx, "start_moving_window_step", start_moving_window_step); + utils::parser::queryWithParser( + pp_warpx, "end_moving_window_step", end_moving_window_step); + std::string s; + pp_warpx.get("moving_window_dir", s); + + if (s == "z" || s == "Z") { + moving_window_dir = WARPX_ZINDEX; + } +#if defined(WARPX_DIM_3D) + else if (s == "y" || s == "Y") { + moving_window_dir = 1; + } +#endif +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) + else if (s == "x" || s == "X") { + moving_window_dir = 0; + } +#endif + else { + WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); + } + + utils::parser::getWithParser( + pp_warpx, "moving_window_v", moving_window_v); + moving_window_v *= PhysConst::c; + } +} + void ConvertLabParamsToBoost() { Real gamma_boost = 1., beta_boost = 0.; @@ -196,8 +233,11 @@ void ConvertLabParamsToBoost() { if (boost_direction[dim_map[idim]]) { amrex::Real convert_factor; - // Assume that the window travels with speed +c - convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost ) ); + amrex::Real beta_window = beta_boost; + if (WarpX::do_moving_window && idim == WarpX::moving_window_dir) { + beta_window = WarpX::moving_window_v / PhysConst::c; + } + convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost * beta_window ) ); prob_lo[idim] *= convert_factor; prob_hi[idim] *= convert_factor; if (max_level > 0){ diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 7f9288debb7..89254e05c98 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -200,6 +200,10 @@ void WarpX::MakeWarpX () { ParseGeometryInput(); + ReadMovingWindowParameters( + do_moving_window, start_moving_window_step, end_moving_window_step, + moving_window_dir, moving_window_v); + ConvertLabParamsToBoost(); ReadBCParams(); @@ -623,42 +627,11 @@ WarpX::ReadParameters () pp_warpx.query("compute_max_step_from_btd", compute_max_step_from_btd); - pp_warpx.query("do_moving_window", do_moving_window); - if (do_moving_window) - { - utils::parser::queryWithParser( - pp_warpx, "start_moving_window_step", start_moving_window_step); - utils::parser::queryWithParser( - pp_warpx, "end_moving_window_step", end_moving_window_step); - std::string s; - pp_warpx.get("moving_window_dir", s); - - if (s == "z" || s == "Z") { - moving_window_dir = WARPX_ZINDEX; - } -#if defined(WARPX_DIM_3D) - else if (s == "y" || s == "Y") { - moving_window_dir = 1; - } -#endif -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) - else if (s == "x" || s == "X") { - moving_window_dir = 0; - } -#endif - - else { - WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); - } - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).isPeriodic(moving_window_dir) == 0, - "The problem must be non-periodic in the moving window direction"); - + if (do_moving_window) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + Geom(0).isPeriodic(moving_window_dir) == 0, + "The problem must be non-periodic in the moving window direction"); moving_window_x = geom[0].ProbLo(moving_window_dir); - - utils::parser::getWithParser( - pp_warpx, "moving_window_v", moving_window_v); - moving_window_v *= PhysConst::c; } m_p_ext_field_params = std::make_unique(pp_warpx); From b1aa846a7968bb30c1f49d2129a46efb4891609c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 08:45:12 -0700 Subject: [PATCH 85/91] AMReX/pyAMReX/PICSAR: Weekly Update (#5310) * AMReX: Weekly Update * pyAMReX: Weekly Update --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 28bfaaf57a7..ce1f4454345 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 3734079379bb6b2a3850d197241f6b2c3b3bfa7d && cd - + cd ../amrex && git checkout --detach 467dd681af11043304757f11d761cf0661c97e56 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 72642b575e8..498d56f5f81 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "3734079379bb6b2a3850d197241f6b2c3b3bfa7d" +set(WarpX_amrex_branch "467dd681af11043304757f11d761cf0661c97e56" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index e93851443c0..69711866f74 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "41c856b8a588c3c8b04bb35d2d05b56f6ce0dd7f" +set(WarpX_pyamrex_branch "1c66690f83244196c5655293f1381303a7d1589d" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 84d85ee07144379cce999a37cdef3346d1da40bc Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 13:52:11 -0700 Subject: [PATCH 86/91] AMReX: Weekly Update (#5343) --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index ce1f4454345..2bc5d35bb4a 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 467dd681af11043304757f11d761cf0661c97e56 && cd - + cd ../amrex && git checkout --detach 103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 498d56f5f81..7524d919c61 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "467dd681af11043304757f11d761cf0661c97e56" +set(WarpX_amrex_branch "103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From d1a338e90ed1ad7ac2f010f47409aa48a2265c88 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 14:16:50 -0700 Subject: [PATCH 87/91] Poisson `computePhi`: Simplify Boundary Handler (#5346) Move the boundary handler to become an optional argument, which otherwise defaults to Dirichlet conditions, e.g., in non-EB cases. This simplifies the ImpactX implementation and fixes a linker issue with CUDA for ImpactX. --- .../ElectrostaticSolver.cpp | 2 +- Source/ablastr/fields/PoissonSolver.H | 39 +++++++++++++------ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp index 1ced0a07152..0b1dca675be 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -201,12 +201,12 @@ ElectrostaticSolver::computePhi ( warpx.DistributionMap(), warpx.boxArray(), WarpX::grid_type, - *m_poisson_boundary_handler, is_solver_igf_on_lev0, EB::enabled(), WarpX::do_single_precision_comms, warpx.refRatio(), post_phi_calculation, + *m_poisson_boundary_handler, warpx.gett_new(0), eb_farray_box_factory ); diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 8b4f9cea9a1..d7eeecead1b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -164,8 +164,8 @@ inline void interpolatePhiBetweenLevels ( * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} * \f] * - * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \tparam T_PostPhiCalculationFunctor a calculation per level directly after phi was calculated + * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler (EB ONLY) * \tparam T_FArrayBoxFactory usually nothing or an amrex::EBFArrayBoxFactory (EB ONLY) * \param[in] rho The charge density a given species * \param[out] phi The potential to be computed by this function @@ -188,8 +188,8 @@ inline void interpolatePhiBetweenLevels ( * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) */ template< - typename T_BoundaryHandler, typename T_PostPhiCalculationFunctor = std::nullopt_t, + typename T_BoundaryHandler = std::nullopt_t, typename T_FArrayBoxFactory = void > void @@ -205,12 +205,12 @@ computePhi ( amrex::Vector const& dmap, amrex::Vector const& grids, utils::enums::GridType grid_type, - T_BoundaryHandler const boundary_handler, bool is_solver_igf_on_lev0, bool eb_enabled = false, bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, // only used for EB [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) @@ -349,12 +349,18 @@ computePhi ( #endif #if defined(AMREX_USE_EB) if (eb_enabled) { - // if the EB potential only depends on time, the potential can be passed - // as a float instead of a callable - if (boundary_handler.phi_EB_only_t) { - linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); - } else { - linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + if constexpr (!std::is_same_v) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } else + { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "EB Poisson solver enabled but no 'boundary_handler' passed!"); } } #endif @@ -372,9 +378,20 @@ computePhi ( linop = std::move(linop_tenslap); } - // Solve the Poisson equation - linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + // Level 0 domain boundary + if constexpr (std::is_same_v) { + amrex::Array const lobc = {AMREX_D_DECL( + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet + )}; + amrex::Array const hibc = lobc; + linop->setDomainBC(lobc, hibc); + } else { + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + } + // Solve the Poisson equation amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); From 2f2b66787fe1405f9cf6db43853832cdcbb673a4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:09:28 -0700 Subject: [PATCH 88/91] [pre-commit.ci] pre-commit autoupdate (#5347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae8881150c9..d2b15b8af95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.6.8 hooks: # Run the linter - id: ruff From 3b447589c7eb167bc71fe06b1478bb39306750f4 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 18:44:32 -0700 Subject: [PATCH 89/91] Doc: HPC no heFFTe yet (#5348) Do not default-advertise to enable heFFTe on Perlmutter and Lonestar yet. Introduce user-facing default for all HPC systems at a later point when it makes more sense. --- Docs/source/install/hpc/lonestar6.rst | 4 ++-- Docs/source/install/hpc/perlmutter.rst | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Docs/source/install/hpc/lonestar6.rst b/Docs/source/install/hpc/lonestar6.rst index 81795545da3..f1512e4a508 100644 --- a/Docs/source/install/hpc/lonestar6.rst +++ b/Docs/source/install/hpc/lonestar6.rst @@ -90,7 +90,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu - cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_gpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_gpu/bin/``. @@ -101,7 +101,7 @@ Additionally, the following commands will install WarpX as a Python module: cd $HOME/src/warpx rm -rf build_pm_gpu_py - cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_gpu_py -j 16 --target pip_install Now, you can :ref:`submit Lonestar6 compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index dc5a985e99f..9612b64476d 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -153,7 +153,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu - cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_gpu/bin/``. @@ -164,7 +164,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu_py - cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu_py -j 16 --target pip_install .. tab-item:: CPU Nodes @@ -174,7 +174,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_cpu - cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_cpu/bin/``. @@ -184,7 +184,7 @@ Use the following :ref:`cmake commands ` to compile the applicat rm -rf build_pm_cpu_py - cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu_py -j 16 --target pip_install Now, you can :ref:`submit Perlmutter compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). From 617d7bafd1ed1740494cecba47c536c5fb0fae4a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 23:48:41 -0700 Subject: [PATCH 90/91] Doc: NVTX in Nvidia Conda (#5345) The package `cuda-nvtx-dev` does not seem to be part of the `cuda` package. https://anaconda.org/nvidia/repo This fixes: ``` CMake Error at build/_deps/fetchedamrex-src/Tools/CMake/AMReXParallelBackends.cmake:71 (target_link_libraries): Target "amrex_3d" links to: CUDA::nvToolsExt but the target was not found. Possible reasons include: * There is a typo in the target name. * A find_package call is missing for an IMPORTED target. * An ALIAS target is missing. Call Stack (most recent call first): build/_deps/fetchedamrex-src/Src/CMakeLists.txt:40 (include) ``` --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 72c599ae2bd..71a607eae6a 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -117,7 +117,7 @@ For Nvidia CUDA GPU support, you will need to have `a recent CUDA driver install .. code-block:: bash - conda install -c nvidia -c conda-forge cuda cupy + conda install -c nvidia -c conda-forge cuda cuda-nvtx-dev cupy More info for `CUDA-enabled ML packages `__. From 2d61720395eaf520e51784eb49bec0e994477e6c Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 1 Oct 2024 00:29:12 -0700 Subject: [PATCH 91/91] Use only plasma current in `HybridPICSolveE` (#5273) * use plasma current rather than total current in `HybridPICSolveE` * remove logic to subtract J_ext from plasma current in `JdispFunctor` * add one ghost cell to the hybrid-pic external current since we interpolate to a nodal grid * Fix Doxygen Signed-off-by: roelof-groenewald --- ..._ohm_solver_magnetic_reconnection_picmi.py | 2 +- Python/pywarpx/fields.py | 14 ++-- .../ComputeDiagFunctors/JdispFunctor.cpp | 56 ++------------ .../FiniteDifferenceSolver.H | 22 +++--- .../HybridPICModel/HybridPICModel.H | 10 +-- .../HybridPICModel/HybridPICModel.cpp | 76 ++++++++++--------- .../HybridPICSolveE.cpp | 39 ++++------ .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 2 +- Source/Fields.H | 12 +-- 9 files changed, 92 insertions(+), 141 deletions(-) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py index 4f13c76e208..f074c81cbb3 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py @@ -303,7 +303,7 @@ def check_fields(self): rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 - Jy = fields.JyFPAmpereWrapper(include_ghosts=False)[...] / self.J0 + Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 0100f64f261..5d3b892b543 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -33,7 +33,7 @@ ExFPPMLWrapper, EyFPPMLWrapper, EzFPPMLWrapper BxFPPMLWrapper, ByFPPMLWrapper, BzFPPMLWrapper JxFPPMLWrapper, JyFPPMLWrapper, JzFPPMLWrapper -JxFPAmpereWrapper, JyFPAmpereWrapper, JzFPAmpereWrapper +JxFPPlasmaWrapper, JyFPPlasmaWrapper, JzFPPlasmaWrapper FFPPMLWrapper, GFPPMLWrapper ExCPPMLWrapper, EyCPPMLWrapper, EzCPPMLWrapper @@ -873,27 +873,27 @@ def FaceAreaszWrapper(level=0, include_ghosts=False): ) -def JxFPAmpereWrapper(level=0, include_ghosts=False): +def JxFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=0, level=level, include_ghosts=include_ghosts, ) -def JyFPAmpereWrapper(level=0, include_ghosts=False): +def JyFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=1, level=level, include_ghosts=include_ghosts, ) -def JzFPAmpereWrapper(level=0, include_ghosts=False): +def JzFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=2, level=level, include_ghosts=include_ghosts, diff --git a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp index b4f286506a8..e06f90b5f0c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp @@ -1,8 +1,11 @@ -/* This file is part of Warpx. +/* Copyright 2023-2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Avigdor Veksler (TAE Technologies) * - * Authors: Avigdor Veksler * License: BSD-3-Clause-LBNL -*/ + */ #include "JdispFunctor.H" #include "WarpX.H" @@ -40,7 +43,7 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff AMREX_ASSUME(hybrid_pic_model != nullptr); /** pointer to current calculated from Ampere's Law (Jamp) multifab */ - amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_ampere, Direction{m_dir}, m_lev); + amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_plasma, Direction{m_dir}, m_lev); //if (!hybrid_pic_model) { // To finish this implementation, we need to implement a method to @@ -63,51 +66,6 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff -1, *mf_j, 0, 0, 1, Jdisp.nGrowVect() ); - if (hybrid_pic_model) { - // Subtract the interpolated j_external value from j_displacement. - /** pointer to external currents (Jext) multifab */ - amrex::MultiFab* mf_j_external = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{m_dir}, m_lev); - - // Index type required for interpolating Jext from their respective - // staggering (nodal) to the Jx_displacement, Jy_displacement, Jz_displacement - // locations. The staggering of J_displacement is the same as the - // staggering for J, so we use J_stag as the interpolation map. - // For interp to work below, the indices of the undefined dimensions - // must match. We set them as (1,1,1). - amrex::GpuArray Jext_IndexType = {1, 1, 1}; - amrex::GpuArray J_IndexType = {1, 1, 1}; - amrex::IntVect Jext_stag = mf_j_external->ixType().toIntVect(); - amrex::IntVect J_stag = mf_j->ixType().toIntVect(); - - // Index types for the dimensions simulated are overwritten. - for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - Jext_IndexType[idim] = Jext_stag[idim]; - J_IndexType[idim] = J_stag[idim]; - } - - // Parameters for `interp` that maps from Jext to J. - // The "coarsening is just 1 i.e. no coarsening" - amrex::GpuArray const& coarsen = {1, 1, 1}; - - // Loop through the grids, and over the tiles within each grid to - // subtract the interpolated Jext from J_displacement. -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(Jdisp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - - Array4 const& Jdisp_arr = Jdisp.array(mfi); - Array4 const& Jext = mf_j_external->const_array(mfi); - - // Loop over cells and update the Jdisp MultiFab - amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Interpolate Jext to the staggering of J - auto const jext_interp = ablastr::coarsen::sample::Interp(Jext, Jext_IndexType, J_IndexType, coarsen, i, j, k, 0); - Jdisp_arr(i, j, k, 0) -= jext_interp; - }); - } - } - InterpolateMFForDiag(mf_dst, Jdisp, dcomp, warpx.DistributionMap(m_lev), m_convertRZmodes2cartesian); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 03f51f7ba62..45c06584fda 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -141,9 +141,8 @@ class FiniteDifferenceSolver * https://link.springer.com/chapter/10.1007/3-540-36530-3_8 * * \param[out] Efield vector of electric field MultiFabs updated at a given level - * \param[in] Jfield vector of total current MultiFabs at a given level + * \param[in] Jfield vector of total plasma current MultiFabs at a given level * \param[in] Jifield vector of ion current density MultiFabs at a given level - * \param[in] Jextfield vector of external current density MultiFabs at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] rhofield scalar ion charge density Multifab at a given level * \param[in] Pefield scalar electron pressure MultiFab at a given level @@ -153,15 +152,14 @@ class FiniteDifferenceSolver * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, - ablastr::fields::VectorField & Jfield, - ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, - ablastr::fields::VectorField const& Bfield, - amrex::MultiFab const& rhofield, - amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, - int lev, HybridPICModel const* hybrid_model, - bool solve_for_Faraday ); + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, + int lev, HybridPICModel const* hybrid_model, + bool solve_for_Faraday ); /** * \brief Calculation of total current using Ampere's law (without @@ -241,7 +239,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -346,7 +343,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index b0f63dd8018..ec4a53b2edd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -63,18 +63,18 @@ public: /** * \brief - * Function to calculate the total current based on Ampere's law while - * neglecting displacement current (J = curl x B). Used in the Ohm's law - * solver (kinetic-fluid hybrid model). + * Function to calculate the total plasma current based on Ampere's law while + * neglecting displacement current (J = curl x B). Any external current is + * subtracted as well. Used in the Ohm's law solver (kinetic-fluid hybrid model). * * \param[in] Bfield Magnetic field from which the current is calculated. * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account */ - void CalculateCurrentAmpere ( + void CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& edge_lengths ); - void CalculateCurrentAmpere ( + void CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& edge_lengths, int lev diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index dbf56a0e899..d7d6a43b4d5 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -67,18 +67,18 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field // The "hybrid_electron_pressure_fp" multifab stores the electron pressure calculated // from the specified equation of state. - // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density - // interpolated or extrapolated to appropriate timesteps. - // The "hybrid_current_fp_temp" multifab is used to store the ion current density - // interpolated or extrapolated to appropriate timesteps. - // The "hybrid_current_fp_ampere" multifab stores the total current calculated as - // the curl of B. fields.alloc_init(FieldType::hybrid_electron_pressure_fp, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density + // interpolated or extrapolated to appropriate timesteps. fields.alloc_init(FieldType::hybrid_rho_fp_temp, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_current_fp_temp" multifab is used to store the ion current density + // interpolated or extrapolated to appropriate timesteps. fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); @@ -89,28 +89,29 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{0}, + // The "hybrid_current_fp_plasma" multifab stores the total plasma current calculated + // as the curl of B minus any external current. + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{1}, + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{1}, lev, amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{2}, + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{2}, lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - // the external current density multifab is made nodal to avoid needing to interpolate - // to a nodal grid as has to be done for the ion and total current density multifabs - // this also allows the external current multifab to not have any ghost cells + // the external current density multifab matches the current staggering and + // one ghost cell is used since we interpolate the current to a nodal grid fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{0}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{1}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{2}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -352,7 +353,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); + mfyfab(i,j,k) = Jy_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary @@ -384,35 +385,44 @@ void HybridPICModel::GetCurrentExternal ( } } -void HybridPICModel::CalculateCurrentAmpere ( +void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& edge_lengths) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculateCurrentAmpere(Bfield[lev], edge_lengths[lev], lev); + CalculatePlasmaCurrent(Bfield[lev], edge_lengths[lev], lev); } } -void HybridPICModel::CalculateCurrentAmpere ( +void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& edge_lengths, const int lev) { - WARPX_PROFILE("WarpX::CalculateCurrentAmpere()"); + WARPX_PROFILE("HybridPICModel::CalculatePlasmaCurrent()"); auto& warpx = WarpX::GetInstance(); - ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_ampere, Bfield, edge_lengths, lev + current_fp_plasma, Bfield, edge_lengths, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but // the boundary correction was already applied to J_i and the B-field // boundary ensures that J itself complies with the boundary conditions, right? // ApplyJfieldBoundary(lev, Jfield[0].get(), Jfield[1].get(), Jfield[2].get()); - for (int i=0; i<3; i++) { current_fp_ampere[i]->FillBoundary(warpx.Geom(lev).periodicity()); } + for (int i=0; i<3; i++) { current_fp_plasma[i]->FillBoundary(warpx.Geom(lev).periodicity()); } + + // Subtract external current from "Ampere" current calculated above. Note + // we need to include 1 ghost cell since later we will interpolate the + // plasma current to a nodal grid. + ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + for (int i=0; i<3; i++) { + current_fp_plasma[i]->minus(*current_fp_external[i], 0, 1, 1); + } + } void HybridPICModel::HybridPICSolveE ( @@ -463,19 +473,15 @@ void HybridPICModel::HybridPICSolveE ( const int lev, PatchType patch_type, const bool solve_for_Faraday) const { - auto& warpx = WarpX::GetInstance(); - ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); - const ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); const ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( - Efield, current_fp_ampere, Jfield, current_fp_external, - Bfield, rhofield, - *electron_pressure_fp, - edge_lengths, lev, this, solve_for_Faraday + Efield, current_fp_plasma, Jfield, Bfield, rhofield, + *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } @@ -679,8 +685,8 @@ void HybridPICModel::FieldPush ( { auto& warpx = WarpX::GetInstance(); - // Calculate J = curl x B / mu0 - CalculateCurrentAmpere(Bfield, edge_lengths); + // Calculate J = curl x B / mu0 - J_ext + CalculatePlasmaCurrent(Bfield, edge_lengths); // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); warpx.FillBoundaryE(ng, nodal_sync); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 34a84756203..76fedbf4dea 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -354,7 +354,6 @@ void FiniteDifferenceSolver::HybridPICSolveE ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -368,14 +367,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( #ifdef WARPX_DIM_RZ HybridPICSolveECylindrical ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); @@ -392,7 +391,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -471,9 +469,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Jir = Jifield[0]->const_array(mfi); Array4 const& Jit = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextr = Jextfield[0]->const_array(mfi); - Array4 const& Jextt = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Br = Bfield[0]->const_array(mfi); Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -498,16 +493,16 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( - (jt_interp - jit_interp - Jextt(i, j, 0)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Bt_interp + (jt_interp - jit_interp) * Bz_interp + - (jz_interp - jiz_interp) * Bt_interp ); enE_nodal(i, j, 0, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Br_interp - - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bz_interp + (jz_interp - jiz_interp) * Br_interp + - (jr_interp - jir_interp) * Bz_interp ); enE_nodal(i, j, 0, 2) = ( - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bt_interp - - (jt_interp - jit_interp - Jextt(i, j, 0)) * Br_interp + (jr_interp - jir_interp) * Bt_interp + - (jt_interp - jit_interp) * Br_interp ); }); @@ -707,7 +702,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -780,9 +774,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Jix = Jifield[0]->const_array(mfi); Array4 const& Jiy = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextx = Jextfield[0]->const_array(mfi); - Array4 const& Jexty = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Bx = Bfield[0]->const_array(mfi); Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -790,7 +781,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // interpolate the total current to a nodal grid + // interpolate the total plasma current to a nodal grid auto const jx_interp = Interp(Jx, Jx_stag, nodal, coarsen, i, j, k, 0); auto const jy_interp = Interp(Jy, Jy_stag, nodal, coarsen, i, j, k, 0); auto const jz_interp = Interp(Jz, Jz_stag, nodal, coarsen, i, j, k, 0); @@ -807,16 +798,16 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, k)) * By_interp + (jy_interp - jiy_interp) * Bz_interp + - (jz_interp - jiz_interp) * By_interp ); enE_nodal(i, j, k, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, k)) * Bx_interp - - (jx_interp - jix_interp - Jextx(i, j, k)) * Bz_interp + (jz_interp - jiz_interp) * Bx_interp + - (jx_interp - jix_interp) * Bz_interp ); enE_nodal(i, j, k, 2) = ( - (jx_interp - jix_interp - Jextx(i, j, k)) * By_interp - - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bx_interp + (jx_interp - jix_interp) * By_interp + - (jy_interp - jiy_interp) * Bx_interp ); }); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 556b8f8fca4..be2d40459ac 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -165,7 +165,7 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->CalculateElectronPressure(); // Update the E field to t=n+1 using the extrapolated J_i^n+1 value - m_hybrid_pic_model->CalculateCurrentAmpere( + m_hybrid_pic_model->CalculatePlasmaCurrent( m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); m_hybrid_pic_model->HybridPICSolveE( diff --git a/Source/Fields.H b/Source/Fields.H index f85b6c4584c..b07661254c4 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -45,11 +45,11 @@ namespace warpx::fields vector_potential_fp_nodal, vector_potential_grad_buf_e_stag, vector_potential_grad_buf_b_stag, - hybrid_electron_pressure_fp, - hybrid_rho_fp_temp, - hybrid_current_fp_temp, - hybrid_current_fp_ampere, - hybrid_current_fp_external, + hybrid_electron_pressure_fp, /**< Used with Ohm's law solver. Stores the electron pressure */ + hybrid_rho_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated charge density */ + hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ + hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ + hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ @@ -100,7 +100,7 @@ namespace warpx::fields FieldType::vector_potential_grad_buf_e_stag, FieldType::vector_potential_grad_buf_b_stag, FieldType::hybrid_current_fp_temp, - FieldType::hybrid_current_fp_ampere, + FieldType::hybrid_current_fp_plasma, FieldType::hybrid_current_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp,