Skip to content

Commit d783b19

Browse files
committed
Revert "Merge pull request #161 from JaredCrean2/jc_generalize_accumulate2"
This reverts commit 70d9f1c, reversing changes made to 06f2b77.
1 parent 938d7f9 commit d783b19

File tree

8 files changed

+16
-284
lines changed

8 files changed

+16
-284
lines changed

Diff for: apf/apf.cc

+2-7
Original file line numberDiff line numberDiff line change
@@ -424,16 +424,11 @@ void synchronize(Field* f, Sharing* shr)
424424
synchronizeFieldData<double>(f->getData(), shr);
425425
}
426426

427-
void accumulate(Field* f, Sharing* shr, bool delete_shr)
427+
void accumulate(Field* f, Sharing* shr)
428428
{
429-
sharedReduction(f, shr, delete_shr, ReductionSum<double>() );
429+
accumulateFieldData(f->getData(), shr);
430430
}
431431

432-
void sharedReduction(Field* f, Sharing* shr, bool delete_shr,
433-
const ReductionOp<double>& reduce_op )
434-
{
435-
reduceFieldData(f->getData(), shr, delete_shr, reduce_op);
436-
}
437432
void fail(const char* why)
438433
{
439434
fprintf(stderr,"APF FAILED: %s\n",why);

Diff for: apf/apf.h

+1-54
Original file line numberDiff line numberDiff line change
@@ -37,50 +37,6 @@ class VectorElement;
3737
typedef VectorElement MeshElement;
3838
class FieldShape;
3939
struct Sharing;
40-
template <class T> class ReductionOp;
41-
template <class T> class ReductionSum;
42-
43-
/** \brief Base class for applying operations to make a Field consistent
44-
* in parallel
45-
* \details This function gets applied pairwise to the Field values
46-
* from every partition, resulting in a single unique value. No guarantees
47-
* are made about the order in which this function is applied to the
48-
* values.
49-
*/
50-
template <class T>
51-
class ReductionOp
52-
{
53-
public:
54-
/* \brief apply operation, returning a single value */
55-
virtual T apply(T val1, T val2) const = 0;
56-
};
57-
58-
template <class T>
59-
class ReductionSum : public ReductionOp<T>
60-
{
61-
T apply(T val1, T val2) const { return val1 + val2; };
62-
};
63-
64-
template <class T>
65-
class ReductionMin : public ReductionOp<T>
66-
{
67-
T apply(T val1, T val2) const { return ( (val1 < val2) ? val1 : val2 ); };
68-
};
69-
70-
template <class T>
71-
class ReductionMax : public ReductionOp<T>
72-
{
73-
T apply(T val1, T val2) const { return ( (val1 < val2) ? val2 : val1 ); };
74-
};
75-
76-
77-
/* instantiate (is this necessary with the global consts below?) */
78-
template class ReductionSum<double>;
79-
template class ReductionMin<double>;
80-
template class ReductionMax<double>;
81-
82-
83-
8440

8541
/** \brief Destroys an apf::Mesh.
8642
*
@@ -669,16 +625,7 @@ void synchronize(Field* f, Sharing* shr = 0);
669625
all copies of an entity and assign the sum as the
670626
value for all copies.
671627
*/
672-
void accumulate(Field* f, Sharing* shr = 0, bool delete_shr = false);
673-
674-
/** \brief Apply a reduction operator along a partition boundary
675-
\details Using the copies described by an apf::Sharing object, applies
676-
the specified operation pairwise to the values of the field on each
677-
partition. No guarantee is made about the order of the pairwise
678-
application.
679-
*/
680-
void sharedReduction(Field* f, Sharing* shr = 0, bool delete_shr=false,
681-
const ReductionOp<double>& reduce_op = ReductionSum<double>());
628+
void accumulate(Field* f, Sharing* shr = 0);
682629

683630
/** \brief Declare failure of code inside APF.
684631
\details This function prints the string as an APF

Diff for: apf/apfFieldData.cc

+11-14
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ template void synchronizeFieldData<int>(FieldDataOf<int>*, Sharing*, bool);
8181
template void synchronizeFieldData<double>(FieldDataOf<double>*, Sharing*, bool);
8282
template void synchronizeFieldData<long>(FieldDataOf<long>*, Sharing*, bool);
8383

84-
void reduceFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr, const ReductionOp<double>& reduce_op /* =ReductionSum<double>() */)
84+
void accumulateFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr)
8585
{
8686
FieldBase* f = data->getField();
8787
Mesh* m = f->getMesh();
@@ -91,26 +91,26 @@ void reduceFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr, c
9191
shr = getSharing(m);
9292
delete_shr=true;
9393
}
94-
9594
for (int d=0; d < 4; ++d)
9695
{
9796
if ( ! s->hasNodesIn(d))
9897
continue;
99-
10098
MeshEntity* e;
10199
MeshIterator* it = m->begin(d);
102100
PCU_Comm_Begin();
103101
while ((e = m->iterate(it)))
104102
{
105-
if (( ! data->hasEntity(e)) || m->isGhost(e) )
106-
continue; /* send to all parts that can see this entity */
103+
if (( ! data->hasEntity(e)) || m->isGhost(e) ||
104+
(shr->isOwned(e)))
105+
continue; /* non-owners send to owners */
107106

108107
CopyArray copies;
109108
shr->getCopies(e, copies);
110109
int n = f->countValuesOn(e);
111110
NewArray<double> values(n);
112111
data->get(e,&(values[0]));
113-
112+
/* actually, non-owners send to all others,
113+
since apf::Sharing doesn't identify the owner */
114114
for (size_t i = 0; i < copies.getSize(); ++i)
115115
{
116116
PCU_COMM_PACK(copies[i].peer, copies[i].entity);
@@ -121,7 +121,8 @@ void reduceFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr, c
121121
PCU_Comm_Send();
122122
while (PCU_Comm_Listen())
123123
while ( ! PCU_Comm_Unpacked())
124-
{ /* receive and apply reduction */
124+
{ /* receive and add. we only care about correctness
125+
on the owners */
125126
MeshEntity* e;
126127
PCU_COMM_UNPACK(e);
127128
int n = f->countValuesOn(e);
@@ -130,15 +131,11 @@ void reduceFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr, c
130131
PCU_Comm_Unpack(&(inValues[0]),n*sizeof(double));
131132
data->get(e,&(values[0]));
132133
for (int i = 0; i < n; ++i)
133-
{
134-
values[i] = reduce_op.apply(values[i], inValues[i]);
135-
}
134+
values[i] += inValues[i];
136135
data->set(e,&(values[0]));
137136
}
138-
}
139-
140-
// every partition did the reduction, so no need to broadcast result
141-
if (delete_shr) delete shr;
137+
} /* broadcast back out to non-owners */
138+
synchronizeFieldData(data, shr, delete_shr);
142139
}
143140

144141
template <class T>

Diff for: apf/apfFieldData.h

+1-5
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,7 @@
1111
#include <string>
1212
#include "apfField.h"
1313
#include "apfShape.h"
14-
#include "apf.h" // needed for ReductionOp
1514

16-
#include <fstream> // DEBUGGING
17-
#include <iostream>
1815
namespace apf {
1916

2017
class FieldData
@@ -38,7 +35,7 @@ class FieldDataOf;
3835
template <class T>
3936
void synchronizeFieldData(FieldDataOf<T>* data, Sharing* shr, bool delete_shr=false);
4037

41-
void reduceFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr=false, const ReductionOp<double>& reduce_op=ReductionSum<double>());
38+
void accumulateFieldData(FieldDataOf<double>* data, Sharing* shr, bool delete_shr=false);
4239

4340
template <class T>
4441
void copyFieldData(FieldDataOf<T>* from, FieldDataOf<T>* to);
@@ -60,7 +57,6 @@ class FieldDataOf : public FieldData
6057
int getElementData(MeshEntity* entity, NewArray<T>& data);
6158
};
6259

63-
6460
} //namespace apf
6561

6662
#endif

Diff for: pumi/pumi_field.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ void pumi_field_synchronize(pField f, pOwnership o)
135135

136136
void pumi_field_accumulate(pField f, pOwnership o)
137137
{
138-
apf::reduceFieldData(f->getData(), o, false);
138+
apf::accumulateFieldData(f->getData(), o, false);
139139
}
140140

141141
void pumi_field_freeze(pField f)

Diff for: test/CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,6 @@ test_exe_func(poisson poisson.cc)
165165
test_exe_func(ph_adapt ph_adapt.cc)
166166
test_exe_func(assert_timing assert_timing.cc)
167167
test_exe_func(create_mis create_mis.cc)
168-
test_exe_func(fieldReduce fieldReduce.cc)
169168
if(ENABLE_DSP)
170169
test_exe_func(graphdist graphdist.cc)
171170
test_exe_func(moving moving.cc)

0 commit comments

Comments
 (0)