From 6ce7adf07f7b33982241e41aea9b41a7a98b4b9c Mon Sep 17 00:00:00 2001 From: Lucas Esclapez <13371051+esclapez@users.noreply.github.com> Date: Fri, 27 Aug 2021 15:29:28 -0700 Subject: [PATCH] Enabling SRD in PeleLM (#193) * Implement flexible redistribution of the diffusion fluxes. Controlled by 'ns.diffusion_redistribution_type' and defaulted to 'FluxRedist' for the explicit fluxes. * Fix up the mac_sync for SRD: mass was lost because SRD of Ucorr rhoY fluxes didn't add up to that of rho. Fix weirdly forgotten subtract of DdWar term included twice (did not introduce an error if syncIter == 1); * Need to set body state after initial redistribution and setVal the temporary div. holder to zero in flux_divergenceRD. * Remove constraint of div nGrow since tempary is used when EB. --- Source/PeleLM.H | 35 ++++-- Source/PeleLM.cpp | 310 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 254 insertions(+), 91 deletions(-) diff --git a/Source/PeleLM.H b/Source/PeleLM.H index 8bc4c1d8..1ae7f4f9 100644 --- a/Source/PeleLM.H +++ b/Source/PeleLM.H @@ -286,7 +286,6 @@ public: int scalScomp, const amrex::MFIter& mfi) override; - virtual void mac_sync () override; // // Crse/fine fixup functions. @@ -411,20 +410,32 @@ public: FluxBoxes& fb_betan, FluxBoxes& fb_betanp1); - void flux_divergence (amrex::MultiFab& fdiv, - int fdivComp, - const amrex::MultiFab* const* extensive_fluxes, - int fluxComp, - int nComp, - amrex::Real scale) const; - - void getDiffusivity (amrex::MultiFab* diffusivity[BL_SPACEDIM], + void flux_divergenceRD (const amrex::MultiFab &a_state, + int stateComp, + amrex::MultiFab &a_divergence, + int divComp, + const amrex::MultiFab* const* extensive_fluxes, + int fluxComp, + int nComp, + amrex::BCRec const* d_bc, + amrex::Real scale, + amrex::Real a_dt, + int areSpeciesFluxes = 0); + + void flux_divergence (amrex::MultiFab& a_divergence, + int divComp, + const amrex::MultiFab* const* a_ext_fluxes, + int fluxComp, + int nComp, + amrex::Real scale); + + void getDiffusivity (amrex::MultiFab* diffusivity[AMREX_SPACEDIM], const amrex::Real time, const int state_comp, const int dst_comp, const int num_comp); - void getDiffusivity_Wbar (amrex::MultiFab* diffusivity[BL_SPACEDIM], + void getDiffusivity_Wbar (amrex::MultiFab* diffusivity[AMREX_SPACEDIM], const amrex::Real time); amrex::DistributionMapping getFuncCountDM (const amrex::BoxArray& bxba, int ngrow); @@ -705,6 +716,10 @@ public: static int iter_debug; static int mHtoTiterMAX; static amrex::Vector mTmpData; + +#ifdef AMREX_USE_EB + static std::string diffusion_redistribution_type; +#endif static int ncells_chem; diff --git a/Source/PeleLM.cpp b/Source/PeleLM.cpp index 777a00f3..a486e8ce 100644 --- a/Source/PeleLM.cpp +++ b/Source/PeleLM.cpp @@ -65,6 +65,7 @@ #include #include #include +#include #endif #include @@ -167,8 +168,9 @@ Real PeleLM::relative_tol_chem = 1.0e-10; Real PeleLM::absolute_tol_chem = 1.0e-10; static bool plot_rhoydot; int PeleLM::nGrowAdvForcing=1; -#ifdef ARMEX_USE_EB +#ifdef AMREX_USE_EB int PeleLM::nGrowDivU=2; +std::string PeleLM::diffusion_redistribution_type = "FluxRedist"; #else int PeleLM::nGrowDivU=1; #endif @@ -445,6 +447,9 @@ PeleLM::Initialize () pp.query("num_divu_iters",num_divu_iters); pp.query("do_not_use_funccount",do_not_use_funccount); +#ifdef AMREX_USE_EB + pp.query("diffusion_redistribution_type",diffusion_redistribution_type); +#endif pp.query("schmidt",schmidt); pp.query("prandtl",prandtl); @@ -1015,7 +1020,9 @@ PeleLM::define_data () { const int nGrow = 0; #ifdef AMREX_USE_EB - const int nGrowEdges = (redistribution_type == "StateRedist") ? 3 : 2; + // Only the advection piece uses this, so set based on hydro. redistribution_type + const int nGrowEdges = (redistribution_type == "StateRedist"|| + redistribution_type == "NewStateRedist") ? 3 : 2; #else const int nGrowEdges = 0; #endif @@ -2040,6 +2047,7 @@ PeleLM::initData () make_rho_prev_time(); make_rho_curr_time(); + // // Initialize other types (RhoRT, divu, ...) // @@ -2065,6 +2073,15 @@ PeleLM::initData () // initActiveControl(); +#ifdef AMREX_USE_EB + // + // Perform redistribution on initial fields + // This changes the input velocity fields + // + InitialRedistribution(); + set_body_state(S_new); +#endif + // // Initialize divU // @@ -2086,14 +2103,6 @@ PeleLM::initData () old_intersect_new = grids; -#ifdef AMREX_USE_EB - // - // Perform redistribution on initial fields - // This changes the input velocity fields - // - InitialRedistribution(); -#endif - #ifdef AMREX_PARTICLES NavierStokesBase::initParticleData(); #endif @@ -3285,7 +3294,7 @@ PeleLM::differential_diffusion_update (MultiFab& Force, MultiFab Rh; // allocated memeory not needed for this, since rho_flag=2 for Y solves - int nGrowDiff = 1; + int nGrowDiff = 1; // Implicit fluxes are not redistributed, so no need for more ghost cells const Real prev_time = state[State_Type].prevTime(); const Real new_time = state[State_Type].curTime(); const Real dt = new_time - prev_time; @@ -3410,9 +3419,8 @@ PeleLM::differential_diffusion_update (MultiFab& Force, // // Modify/update new-time fluxes to ensure sum of species fluxes = 0, then compute Dnew[m] = -Div(flux[m]) - // Use an FPI to get proper ghost cells post diffusion solve. TODO: More than one is needed for StateRedistribution. + // Use an FPI to get proper ghost cells post diffusion solve. const BCRec& Tbc = AmrLevel::desc_lst[State_Type].getBCs()[Temp]; - //int nGrowDiff = 1; FillPatchIterator fpi(*this, Force, nGrowDiff, new_time, State_Type, Density, NUM_SPECIES+1); MultiFab& scalars = fpi.get_mf(); @@ -4371,57 +4379,142 @@ PeleLM::scalar_advection_update (Real dt, } void -PeleLM::flux_divergence (MultiFab& fdiv, - int fdivComp, - const MultiFab* const* f, +PeleLM::flux_divergenceRD(const MultiFab &a_state, + int stateComp, + MultiFab &a_divergence, + int divComp, + const MultiFab* const* a_fluxes, + int fluxComp, + int nComp, + BCRec const* d_bc, + Real scale, + Real a_dt, + int areSpeciesFluxes) +{ +#ifdef AMREX_USE_EB + // Temporary divTemp to allow fillBoundary + MultiFab divTemp(grids,dmap,nComp,a_state.nGrow(),MFInfo(),Factory()); + divTemp.setVal(0.0); + + // Call face-centroid extensive fluxes divergence with scaling + flux_divergence(divTemp, 0, a_fluxes, fluxComp, nComp, scale); + + // FillBoundary + divTemp.FillBoundary(geom.periodicity()); + + // Redistribution + auto const& ebfact= dynamic_cast(a_state.Factory()); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(a_divergence, TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + auto const& bx = mfi.tilebox(); + auto const& flagfab = ebfact.getMultiEBCellFlagFab()[mfi]; + auto const& flag = flagfab.const_array(); + auto const& div_arr = a_divergence.array(mfi, divComp); + auto const& divT_arr = divTemp.array(mfi); + if (flagfab.getType(bx) != FabType::covered ) { + if (flagfab.getType(grow(bx,4)) != FabType::regular) { + // + // Redistribute + // + AMREX_D_TERM( auto apx = ebfact.getAreaFrac()[0]->const_array(mfi);, + auto apy = ebfact.getAreaFrac()[1]->const_array(mfi);, + auto apz = ebfact.getAreaFrac()[2]->const_array(mfi); ); + + AMREX_D_TERM( Array4 fcx = ebfact.getFaceCent()[0]->const_array(mfi);, + Array4 fcy = ebfact.getFaceCent()[1]->const_array(mfi);, + Array4 fcz = ebfact.getFaceCent()[2]->const_array(mfi);); + + auto const &ccc = ebfact.getCentroid().const_array(mfi); + auto const &vfrac_arr = ebfact.getVolFrac().const_array(mfi); + + // This is scratch space if calling StateRedistribute, + // but is used as the weights (here set to 1) if calling + // FluxRedistribute + Box gbx = bx; + + if (diffusion_redistribution_type == "StateRedist" || + diffusion_redistribution_type == "NewStateRedist") + gbx.grow(3); + else if (diffusion_redistribution_type == "FluxRedist") + gbx.grow(2); + + FArrayBox tmpfab(gbx, nComp); + Elixir eli = tmpfab.elixir(); + Array4 scratch = tmpfab.array(0); + if (diffusion_redistribution_type == "FluxRedist") + { + amrex::ParallelFor(Box(scratch), + [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { scratch(i,j,k) = 1.;}); + } + + if (areSpeciesFluxes) { + // Placeholder for eventual species-specific treatment of the redistribution + Redistribution::Apply( bx, nComp, div_arr, divT_arr, + a_state.const_array(mfi, stateComp), scratch, flag, + AMREX_D_DECL(apx,apy,apz), vfrac_arr, + AMREX_D_DECL(fcx,fcy,fcz), ccc, d_bc, + geom, a_dt, diffusion_redistribution_type ); + } else { + Redistribution::Apply( bx, nComp, div_arr, divT_arr, + a_state.const_array(mfi, stateComp), scratch, flag, + AMREX_D_DECL(apx,apy,apz), vfrac_arr, + AMREX_D_DECL(fcx,fcy,fcz), ccc, d_bc, + geom, a_dt, diffusion_redistribution_type ); + } + } else { + // Move from divTmp to outgoing MF + ParallelFor(bx, nComp, [div_arr, divT_arr] + AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { div_arr( i, j, k, n ) = divT_arr(i,j,k,n); }); + } + } + } + + EB_set_covered(a_divergence,0.); +#else + + // Call face-centroid extensive fluxes divergence with scaling + flux_divergence(a_divergence, divComp, a_fluxes, fluxComp, nComp, scale); + +#endif +} + +void +PeleLM::flux_divergence (MultiFab &a_divergence, + int divComp, + const MultiFab* const* a_fluxes, int fluxComp, int nComp, - Real scale) const + Real scale) { BL_PROFILE("PLM::flux_divergence()"); - AMREX_ASSERT(fdiv.nComp() >= fdivComp+nComp); - -////////////////////////////////////////////////////// -// Version using AMReX computeDivergence function -// Will be updated later once more options are available in AMReX -////////////////////////////////////////////////////// -// // Need aliases for the fluxes and divergence -// Array flux_alias; -// AMREX_D_TERM(flux_alias[0] = new MultiFab(*f[0], amrex::make_alias, fluxComp, nComp);, -// flux_alias[1] = new MultiFab(*f[1], amrex::make_alias, fluxComp, nComp);, -// flux_alias[2] = new MultiFab(*f[2], amrex::make_alias, fluxComp, nComp);); -// -// MultiFab div_alias(fdiv, amrex::make_alias, fdivComp, nComp); -// -//#ifdef AMREX_USE_EB -// EB_computeDivergence(div_alias, flux_alias, geom, true); -//#else -// computeDivergence(div_alias, flux_alias, geom); -//#endif -// div_alias.mult(scale,0,nComp,0); - + AMREX_ASSERT(a_divergence.nComp() >= divComp+nComp); ////////////////////////////////////////////////////// // PeleLM divergence function +// We assume that our fluxes are already face-centroid and extensive #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(fdiv,TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(a_divergence,TilingIfNotGPU()); mfi.isValid(); ++mfi) { const Box& bx = mfi.tilebox(); - AMREX_D_TERM(auto const& fluxX = f[0]->array(mfi,fluxComp);, - auto const& fluxY = f[1]->array(mfi,fluxComp);, - auto const& fluxZ = f[2]->array(mfi,fluxComp);); - auto const& divergence = fdiv.array(mfi,fdivComp); + AMREX_D_TERM(auto const& fluxX = a_fluxes[0]->array(mfi,fluxComp);, + auto const& fluxY = a_fluxes[1]->array(mfi,fluxComp);, + auto const& fluxZ = a_fluxes[2]->array(mfi,fluxComp);); + auto const& divergence = a_divergence.array(mfi,divComp); auto const& vol = volume.const_array(mfi); #ifdef AMREX_USE_EB auto const& ebfactory = dynamic_cast(Factory()); auto const& flagfab = ebfactory.getMultiEBCellFlagFab()[mfi]; auto const& flag = flagfab.const_array(); -#endif -#ifdef AMREX_USE_EB if (flagfab.getType(bx) == FabType::covered) { // Covered boxes amrex::ParallelFor(bx, nComp, [divergence] AMREX_GPU_DEVICE( int i, int j, int k, int n) noexcept @@ -4464,16 +4557,6 @@ PeleLM::flux_divergence (MultiFab& fdiv, } #endif } - -#ifdef AMREX_USE_EB - { - MultiFab fdiv_SrcGhostCell(grids,dmap,nComp,fdiv.nGrow()+2,MFInfo(),Factory()); - fdiv_SrcGhostCell.setVal(0.); - MultiFab::Copy(fdiv_SrcGhostCell, fdiv, fdivComp, 0, nComp, fdiv.nGrow()); - amrex::single_level_weighted_redistribute( {fdiv_SrcGhostCell}, {fdiv}, *volfrac, fdivComp, nComp, {geom} ); - } - EB_set_covered(fdiv,0.); -#endif } void @@ -4503,8 +4586,15 @@ PeleLM::compute_differential_diffusion_terms (MultiFab& D, // Get scalars with proper ghost cells. Might need more than what's available in the class // data, so use an FPI. - // TODO adjust for state redistribution int nGrowDiff = 1; +#ifdef AMREX_USE_EB + if ( diffusion_redistribution_type == "StateRedist" || + diffusion_redistribution_type == "NewStateRedist" ) { + nGrowDiff = 3; + } else { + nGrowDiff = 2; + } +#endif int sComp = std::min((int)Density, std::min((int)first_spec,(int)Temp) ); int eComp = std::max((int)Density, std::max((int)last_spec,(int)Temp) ); @@ -4515,8 +4605,8 @@ PeleLM::compute_differential_diffusion_terms (MultiFab& D, std::unique_ptr scalarsCrse; if (level > 0) { auto& crselev = getLevel(level-1); - scalarsCrse.reset(new MultiFab(crselev.boxArray(), crselev.DistributionMap(), nComp, nGrowDiff)); - FillPatch(crselev, *scalarsCrse, nGrowDiff, time, State_Type, Density, nComp, 0); + scalarsCrse.reset(new MultiFab(crselev.boxArray(), crselev.DistributionMap(), nComp, 1)); + FillPatch(crselev, *scalarsCrse, 1, time, State_Type, Density, nComp, 0); } // @@ -4534,12 +4624,15 @@ PeleLM::compute_differential_diffusion_terms (MultiFab& D, // Compute "D": // D[0:NUM_SPECIES-1] = -Div( Fk ) // D[ NUM_SPECIES+1 ] = Div( lambda Grad(T) ) - flux_divergence(D,0,flux,0,NUM_SPECIES,-1.0); - flux_divergence(D,NUM_SPECIES+1,flux,NUM_SPECIES+2,1,-1.0); + BCRec const* d_bcrec_spec = &(m_bcrec_scalars_d.dataPtr())[first_spec-Density]; + flux_divergenceRD(scalars,first_spec-Density,D,0,flux,0,NUM_SPECIES,d_bcrec_spec,-1.0,dt,1); + BCRec const* d_bcrec_temp = &(m_bcrec_scalars_d.dataPtr())[Temp-Density]; + flux_divergenceRD(scalars,Temp-Density,D,NUM_SPECIES+1,flux,NUM_SPECIES+2,1,d_bcrec_temp,-1.0,dt); // Compute "DD": // DD = -Sum{ Div( hk . Fk ) } a.k.a. the "diffdiff" terms - flux_divergence(DD,0,flux,NUM_SPECIES+1,1,-1.0); + BCRec const* d_bcrec_rhoh = &(m_bcrec_scalars_d.dataPtr())[RhoH-Density]; + flux_divergenceRD(scalars,RhoH-Density,DD,0,flux,NUM_SPECIES+1,1,d_bcrec_rhoh,-1.0,dt); if (D.nGrow() > 0 && DD.nGrow() > 0) { @@ -4983,16 +5076,6 @@ PeleLM::advance (Real time, chi_increment.setVal(0.0, chi_increment.nGrow()); calc_dpdt(new_time, dt, chi_increment); -#ifdef AMREX_USE_EB - { - MultiFab chi_tmp(grids,dmap,1,chi.nGrow()+2,MFInfo(),Factory()); - chi_tmp.setVal(0.); - MultiFab::Copy(chi_tmp,chi_increment,0,0,1,chi.nGrow()); - amrex::single_level_weighted_redistribute( {chi_tmp}, {chi_increment}, *volfrac, 0, 1, {geom} ); - EB_set_covered(chi_increment,0.0); - } -#endif - // Add chi_increment to chi and add chi to time-centered mac_divu #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -5106,9 +5189,18 @@ PeleLM::advance (Real time, // Compute Wbar fluxes from state np1k (lagged) // Compute DWbar term: - \nabla \cdot \Gamma_{\overline{W}_m} // They will be added to the Fnp1kp1 directly in diff_diff_update() - MultiFab scalars_alias(S_new, amrex::make_alias, Density, NUM_SPECIES+1); - compute_Wbar_fluxes(scalars_alias, new_time, 0, 1.0); - flux_divergence(DWbar,0,SpecDiffusionFluxWbar,0,NUM_SPECIES,-1); + DWbar.setVal(0.0); +#ifdef AMREX_USE_EB + int nGrowDiff = (diffusion_redistribution_type == "StateRedist" || + diffusion_redistribution_type == "NewStateRedist") ? 3 : 2; + FillPatchIterator fpi(*this, DWbar, nGrowDiff, new_time, State_Type, Density, NUM_SPECIES+1); + MultiFab& scalars = fpi.get_mf(); +#else + MultiFab scalars(S_new, amrex::make_alias, Density, NUM_SPECIES+1); +#endif + compute_Wbar_fluxes(scalars, new_time, 0, 1.0); + BCRec const* d_bcrec_spec = &(m_bcrec_scalars_d.dataPtr())[first_spec-Density]; + flux_divergenceRD(scalars, 1, DWbar,0,SpecDiffusionFluxWbar,0,NUM_SPECIES,d_bcrec_spec,-1.0,dt); } #ifdef AMREX_USE_OMP @@ -6530,7 +6622,7 @@ PeleLM::mac_sync () // compute U^{ADV,corr} in mac_sync_compute // //TODO: offset/subtract_avg is not used ... ? -// bool subtract_avg = (closed_chamber && level == 0); + // bool subtract_avg = (closed_chamber && level == 0); Real offset = 0.0; BL_PROFILE_VAR("PeleLM::mac_sync::ucorr", PLM_UCORR); @@ -6652,11 +6744,37 @@ PeleLM::mac_sync () // // Delete Ucorr; we're done with it. // - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { delete Ucorr[idim]; + } Ssync.mult(dt); // Turn this into an increment over dt +#ifdef AMREX_USE_EB + // If we use StateRedistribution, the Ssync of rhoYs don't sum up to that of rho at this point. + // So let's make sure that it's the case. + if (redistribution_type == "StateRedist" || + redistribution_type == "NewStateRedist" ) { +#ifdef AMREX_USE_OMP +#pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(Ssync, TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + const Box& bx = mfi.tilebox(); + auto const& rhosync = Ssync.array(mfi,Density-AMREX_SPACEDIM); + auto const& rhoYsync = Ssync.array(mfi,first_spec-AMREX_SPACEDIM); + amrex::ParallelFor(bx, [rhosync, rhoYsync] + AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + rhosync(i,j,k) = 0.0; + for (int n = 0; n no redistribution on those DdWbar.define(grids,dmap,NUM_SPECIES,nGrowAdvForcing,MFInfo(),Factory()); MultiFab* const * fluxWbar = SpecDiffusionFluxWbar; + showMF("DBGSync",*fluxWbar[0],"fluxXWbarInSync_",level,mac_sync_iter,parent->levelSteps(level)); + showMF("DBGSync",*fluxWbar[1],"fluxYWbarInSync_",level,mac_sync_iter,parent->levelSteps(level)); flux_divergence(DdWbar,0,fluxWbar,0,NUM_SPECIES,-1); DdWbar.mult(dt); } @@ -6772,6 +6892,7 @@ PeleLM::mac_sync () // Note: DT (comp 1) and DD (comp 0) are in the same multifab MultiFab DiffTerms_pre(grids,dmap,2,0,MFInfo(),Factory()); + // No redistribution for those. flux_divergence(DiffTerms_pre,0,GammaKp1,NUM_SPECIES+1,2,-1); // Diffuse species sync to get dGamma^{sync}, then form Gamma^{postsync} = Gamma^{presync} + dGamma^{sync} @@ -6788,9 +6909,26 @@ PeleLM::mac_sync () // For all species increment sync by (sync_for_rho)*Y_presync. // Before this, Ssync holds rho^{n+1} (delta Y)^sync // DeltaYsync holds Y^{n+1,p} * (delta rho)^sync - // - MultiFab::Add(Ssync,DeltaYsync,0,first_spec-AMREX_SPACEDIM,NUM_SPECIES,0); - MultiFab::Add(S_new,Ssync,first_spec-AMREX_SPACEDIM,first_spec,NUM_SPECIES,0); + // Remove DdWar if req. as it has been added both to the RHS of the diffsync solve and to the fluxes afterward. +#ifdef AMREX_USE_OMP +#pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(S_new, TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + const Box& bx = mfi.tilebox(); + auto const& rhoYnew = S_new.array(mfi,first_spec); + auto const& rhoYssync = Ssync.array(mfi,first_spec-AMREX_SPACEDIM); + auto const& drhoYsync = DeltaYsync.const_array(mfi,0); + auto const& dWbarsync = (use_wbar) ? DdWbar.const_array(mfi) : DeltaYsync.const_array(mfi); + amrex::ParallelFor(bx, NUM_SPECIES, [rhoYnew,rhoYssync,drhoYsync, + dWbarsync,use_wbar=use_wbar] + AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + rhoYssync(i,j,k,n) += drhoYsync(i,j,k,n); + if (use_wbar) rhoYssync(i,j,k,n) -= dWbarsync(i,j,k,n); + rhoYnew(i,j,k,n) += rhoYssync(i,j,k,n); + }); + } if (use_wbar) { // compute (overwrite) beta grad Wbar terms using the post-sync state @@ -6920,6 +7058,7 @@ PeleLM::mac_sync () MultiFab::Add(get_new_data(State_Type),Told,0,Temp,1,0); compute_enthalpy_fluxes(SpecDiffusionFluxnp1,betanp1,new_time); // Compute F[N+1], F[N+2] + // No redistribution for implicit fluxes flux_divergence(DiffTerms_post,0,SpecDiffusionFluxnp1,NUM_SPECIES+1,2,-1); #ifdef AMREX_USE_OMP @@ -6986,6 +7125,7 @@ PeleLM::mac_sync () Abort("FIXME: Properly deal with do_diffuse_sync=0"); } + BL_PROFILE_VAR_START(PLM_SSYNC); // Update coarse post-sync temp from rhoH and rhoY RhoH_to_Temp(S_new); @@ -7080,7 +7220,7 @@ PeleLM::mac_sync () MultiFab& S_crse_loc = crse_level.get_new_data(State_Type); MultiFab& S_fine_loc = fine_level.get_new_data(State_Type); - crse_level.average_down(S_fine_loc, S_crse_loc, RhoRT, 1); + crse_level.average_down(S_fine_loc, S_crse_loc, RhoRT, 1); } PeleLM& fine_level = getLevel(level+1); showMF("DBGSync",fine_level.get_new_data(State_Type),"sdc_SnewFine_EndSyncIter",level+1,mac_sync_iter,parent->levelSteps(level)); @@ -7571,6 +7711,14 @@ PeleLM::differential_spec_diffuse_sync (Real dt, const Vector& theBCs = AmrLevel::desc_lst[State_Type].getBCs(); const BCRec& bc = theBCs[first_spec]; int nGrowDiff = 1; +#ifdef AMREX_USE_EB + if ( diffusion_redistribution_type == "StateRedist" || + diffusion_redistribution_type == "NewStateRedist" ) { + nGrowDiff = 3; + } else { + nGrowDiff = 2; + } +#endif FillPatchIterator fpi(*this, Rhs, nGrowDiff, tnp1, State_Type, Density, NUM_SPECIES+1); MultiFab& scalars = fpi.get_mf();