58 |
|
#ifdef IS_MPI |
59 |
|
|
60 |
|
enum direction { |
61 |
< |
I = 0, |
62 |
< |
J = 1 |
61 |
> |
Row = 0, |
62 |
> |
Column = 1 |
63 |
|
}; |
64 |
|
|
65 |
|
template<typename T> |
79 |
|
template<> const int MPITraits<Mat3x3d>::dim = 9; |
80 |
|
|
81 |
|
template<direction D, typename T> |
82 |
< |
class Comm { |
82 |
> |
class Communicator { |
83 |
|
public: |
84 |
|
|
85 |
< |
Comm<D, T>(int nObjects) { |
85 |
> |
Communicator<D, T>(int nObjects) { |
86 |
|
|
87 |
|
int nProc = MPI::COMM_WORLD.Get_size(); |
88 |
|
int myRank = MPI::COMM_WORLD.Get_rank(); |
98 |
|
rowIndex_ = myRank / nColumns; |
99 |
|
columnIndex_ = myRank % nColumns; |
100 |
|
|
101 |
< |
if (D == I) { |
101 |
> |
if (D == Row) { |
102 |
|
myComm = MPI::COMM_WORLD.Split(rowIndex_, 0); |
103 |
|
} else { |
104 |
|
myComm = MPI::COMM_WORLD.Split(columnIndex_, 0); |
113 |
|
|
114 |
|
myComm.Allgather(&planSize_, 1, MPI::INT, &counts[0], 1, MPI::INT); |
115 |
|
|
116 |
+ |
|
117 |
|
displacements[0] = 0; |
118 |
|
for (int i = 1; i < nCommProcs; i++) { |
119 |
|
displacements[i] = displacements[i-1] + counts[i-1]; |
120 |
< |
} |
120 |
> |
size_ += count[i-1]; |
121 |
> |
} |
122 |
> |
|
123 |
> |
size_ = 0; |
124 |
> |
for (int i = 0; i < nCommProcs; i++) { |
125 |
> |
size_ += counts[i]; |
126 |
> |
} |
127 |
|
} |
128 |
|
|
129 |
|
|
145 |
|
myComm.Reduce_scatter(&v1[0], &v2[0], &counts[0], |
146 |
|
MPITraits<T>::datatype, MPI::SUM); |
147 |
|
} |
148 |
+ |
|
149 |
+ |
int getSize() { |
150 |
+ |
return size_; |
151 |
+ |
} |
152 |
|
|
153 |
|
private: |
154 |
|
int planSize_; ///< how many are on local proc |
155 |
|
int rowIndex_; |
156 |
|
int columnIndex_; |
157 |
+ |
int size_; |
158 |
|
std::vector<int> counts; |
159 |
|
std::vector<int> displacements; |
160 |
|
MPI::Intracomm myComm; |