46 |
|
* [4] Vardeman & Gezelter, in progress (2009). |
47 |
|
*/ |
48 |
|
|
49 |
< |
#ifndef FORCEDECOMPOSITION_COMMUNICATOR_HPP |
50 |
< |
#define FORCEDECOMPOSITION_COMMUNICATOR_HPP |
49 |
> |
#ifndef PARALLEL_COMMUNICATOR_HPP |
50 |
> |
#define PARALLEL_COMMUNICATOR_HPP |
51 |
|
|
52 |
|
#include <config.h> |
53 |
|
#include <mpi.h> |
58 |
|
#ifdef IS_MPI |
59 |
|
|
60 |
|
enum direction { |
61 |
< |
I = 0, |
62 |
< |
J = 1 |
61 |
> |
Row = 0, |
62 |
> |
Column = 1 |
63 |
|
}; |
64 |
|
|
65 |
|
template<typename T> |
79 |
|
template<> const int MPITraits<Mat3x3d>::dim = 9; |
80 |
|
|
81 |
|
template<direction D, typename T> |
82 |
< |
class Comm { |
82 |
> |
class Communicator { |
83 |
|
public: |
84 |
|
|
85 |
< |
Comm<D, T>(int nObjects) { |
85 |
> |
Communicator<D, T>(int nObjects) { |
86 |
|
|
87 |
|
int nProc = MPI::COMM_WORLD.Get_size(); |
88 |
|
int myRank = MPI::COMM_WORLD.Get_rank(); |
98 |
|
rowIndex_ = myRank / nColumns; |
99 |
|
columnIndex_ = myRank % nColumns; |
100 |
|
|
101 |
< |
if (D == I) { |
101 |
> |
if (D == Row) { |
102 |
|
myComm = MPI::COMM_WORLD.Split(rowIndex_, 0); |
103 |
|
} else { |
104 |
|
myComm = MPI::COMM_WORLD.Split(columnIndex_, 0); |
116 |
|
displacements[0] = 0; |
117 |
|
for (int i = 1; i < nCommProcs; i++) { |
118 |
|
displacements[i] = displacements[i-1] + counts[i-1]; |
119 |
< |
} |
119 |
> |
size_ += counts[i-1]; |
120 |
> |
} |
121 |
> |
|
122 |
> |
size_ = 0; |
123 |
> |
for (int i = 0; i < nCommProcs; i++) { |
124 |
> |
size_ += counts[i]; |
125 |
> |
} |
126 |
|
} |
127 |
|
|
128 |
|
|
144 |
|
myComm.Reduce_scatter(&v1[0], &v2[0], &counts[0], |
145 |
|
MPITraits<T>::datatype, MPI::SUM); |
146 |
|
} |
147 |
+ |
|
148 |
+ |
int getSize() { |
149 |
+ |
return size_; |
150 |
+ |
} |
151 |
|
|
152 |
|
private: |
153 |
|
int planSize_; ///< how many are on local proc |
154 |
|
int rowIndex_; |
155 |
|
int columnIndex_; |
156 |
+ |
int size_; |
157 |
|
std::vector<int> counts; |
158 |
|
std::vector<int> displacements; |
159 |
|
MPI::Intracomm myComm; |