-
Notifications
You must be signed in to change notification settings - Fork 223
use span to allocate mesh data by default #6123
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
3ad2a04
3bcdfae
fb2bd48
2e384ba
ff20012
8b926ed
c832012
ff047b1
2a5da56
e79da49
5b84af2
d408dbc
6ad9fb2
30fe6d3
e5370a1
e51ca1a
d0bbf79
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -152,6 +152,13 @@ public: | |
/** Return OpenPMD File type ("bp5", "bp4", "h5" or "json")*/ | ||
std::string OpenPMDFileType () { return m_OpenPMDFileType; } | ||
|
||
/** Flush a few BTD buffers in a snapshot | ||
* @param[in] isBTD if the current diagnostic is BTD | ||
* This function is controlled by the paramter | ||
* FlushFormatOpenPMD::m_NumAggBTDBufferToFlush (default to 5), | ||
* it can be adjusted in the input file: diag_name.buffer_flush_limit_btd | ||
ax3l marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
*/ | ||
void ForceFlush (bool isBTD); | ||
private: | ||
void Init (openPMD::Access access, bool isBTD); | ||
|
||
|
@@ -181,7 +188,11 @@ private: | |
* @param[in] isBTD if the current diagnostic is BTD | ||
* | ||
* if isBTD=false, apply the default flush behaviour | ||
* if isBTD=true, advice to use ADIOS Put() instead of PDW for better performance. | ||
* in ADIOS, the action will be PerformDataWrite | ||
* if isBTD=true, in ADIOS, the action will be PerformPut | ||
* because no action is taken for the span tasks. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this generally true? You only add spans for fields right now, what if particles are involved? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You are right BTD particles will be calling PerformPut. Right now particles is not the I/O performance bottleneck for BTD. It will happen when you have enormous amount of particles. I will definitely revisit this issue with another pull request. |
||
* This way we can aggregate buffers before | ||
* calling ForceFlush(isBTD) to write out. | ||
* | ||
* iteration.seriesFlush() is used instead of series.flush() | ||
* because the latter flushes only if data is dirty | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -409,11 +409,14 @@ WarpXOpenPMDPlot::~WarpXOpenPMDPlot () | |
|
||
void WarpXOpenPMDPlot::flushCurrent (bool isBTD) const | ||
{ | ||
WARPX_PROFILE("WarpXOpenPMDPlot::flushCurrent"); | ||
|
||
openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); | ||
|
||
currIteration.seriesFlush(); | ||
openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); | ||
if (isBTD) { | ||
WARPX_PROFILE("WarpXOpenPMDPlot::flushCurrent()::BTD"); | ||
currIteration.seriesFlush("adios2.engine.preferred_flush_target = \"buffer\""); | ||
} else { | ||
WARPX_PROFILE("WarpXOpenPMDPlot::flushCurrent()"); | ||
currIteration.seriesFlush(); | ||
guj marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
} | ||
|
||
std::string | ||
|
@@ -463,6 +466,7 @@ void WarpXOpenPMDPlot::SetStep (int ts, const std::string& dirPrefix, int file_m | |
|
||
void WarpXOpenPMDPlot::CloseStep (bool isBTD, bool isLastBTDFlush) | ||
{ | ||
WARPX_PROFILE("WarpXOpenPMDPlot::CloseStep()"); | ||
// default close is true | ||
bool callClose = true; | ||
// close BTD file only when isLastBTDFlush is true | ||
|
@@ -666,19 +670,32 @@ for (const auto & particle_diag : particle_diags) { | |
pc->getCharge(), pc->getMass(), | ||
isBTD, isLastBTDFlush); | ||
} | ||
} | ||
|
||
void | ||
WarpXOpenPMDPlot::ForceFlush(bool isBTD) | ||
{ | ||
if (!isBTD) | ||
ax3l marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
return; | ||
|
||
auto hasOption = m_OpenPMDoptions.find("FlattenSteps"); | ||
const bool flattenSteps = isBTD && (m_Series->backend() == "ADIOS2") && (hasOption != std::string::npos); | ||
const bool result = (m_Series->backend() == "ADIOS2") && (hasOption != std::string::npos); | ||
guj marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
guj marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
||
if (flattenSteps) | ||
if (result) | ||
{ | ||
// forcing new step so data from each btd batch in | ||
// preferred_flush_target="buffer" can be flushed out | ||
openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); | ||
currIteration.seriesFlush(R"(adios2.engine.preferred_flush_target = "new_step")"); | ||
WARPX_PROFILE("WarpXOpenPMDPlot::FlattenSteps()"); | ||
guj marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); | ||
currIteration.seriesFlush(R"(adios2.engine.preferred_flush_target = "new_step")"); | ||
guj marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
else | ||
{ | ||
WARPX_PROFILE("WarpXOpenPMDPlot::ForceFlush()::Disk()"); | ||
openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); | ||
currIteration.seriesFlush(R"(adios2.engine.preferred_flush_target = "disk")"); | ||
guj marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
} | ||
|
||
|
||
void | ||
WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, | ||
const std::string& name, | ||
|
@@ -1509,12 +1526,22 @@ WarpXOpenPMDPlot::WriteOpenPMDFieldsAll ( //const std::string& filename, | |
// GPU pointers to the I/O library | ||
#ifdef AMREX_USE_GPU | ||
if (fab.arena()->isManaged() || fab.arena()->isDevice()) { | ||
amrex::BaseFab<amrex::Real> foo(local_box, 1, amrex::The_Pinned_Arena()); | ||
std::shared_ptr<amrex::Real> data_pinned(foo.release()); | ||
amrex::Gpu::dtoh_memcpy_async(data_pinned.get(), fab.dataPtr(icomp), local_box.numPts()*sizeof(amrex::Real)); | ||
// intentionally delayed until before we .flush(): amrex::Gpu::streamSynchronize(); | ||
mesh_comp.storeChunk(data_pinned, chunk_offset, chunk_size); | ||
} else | ||
{ | ||
WARPX_PROFILE("WarpXOpenPMDPlot::WriteOpenPMDFields::D2H_Span()"); | ||
auto dynamicMemoryView = mesh_comp.storeChunk<amrex::Real>( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh no, when we have an un-equal number of blocks over all MPI ranks, this will not work because There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To double check: is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Only one rank is writing when using BTD. Other ranks have no data. |
||
chunk_offset, chunk_size, | ||
[&local_box](size_t size) { | ||
(void) size; | ||
amrex::Print()<<" span failed \n"; | ||
guj marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
amrex::BaseFab<amrex::Real> foo(local_box, 1, amrex::The_Pinned_Arena()); | ||
std::shared_ptr<amrex::Real> data_pinned(foo.release()); | ||
return data_pinned; | ||
}); | ||
guj marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
||
auto span = dynamicMemoryView.currentBuffer(); | ||
amrex::Gpu::dtoh_memcpy_async(span.data(), fab.dataPtr(icomp), local_box.numPts()*sizeof(amrex::Real)); | ||
} | ||
} else | ||
#endif | ||
{ | ||
amrex::Real const *local_data = fab.dataPtr(icomp); | ||
ax3l marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
Uh oh!
There was an error while loading. Please reload this page.