1 |
/** |
2 |
* @file Communicator.hpp |
3 |
* @author Charles Vardeman <cvardema.at.nd.edu> |
4 |
* @date 08/18/2010 |
5 |
* @version 1.0 |
6 |
* |
7 |
* @section LICENSE |
8 |
* Copyright (c) 2010 The University of Notre Dame. All Rights Reserved. |
9 |
* |
10 |
* The University of Notre Dame grants you ("Licensee") a |
11 |
* non-exclusive, royalty free, license to use, modify and |
12 |
* redistribute this software in source and binary code form, provided |
13 |
* that the following conditions are met: |
14 |
* |
15 |
* 1. Redistributions of source code must retain the above copyright |
16 |
* notice, this list of conditions and the following disclaimer. |
17 |
* |
18 |
* 2. Redistributions in binary form must reproduce the above copyright |
19 |
* notice, this list of conditions and the following disclaimer in the |
20 |
* documentation and/or other materials provided with the |
21 |
* distribution. |
22 |
* |
23 |
* This software is provided "AS IS," without a warranty of any |
24 |
* kind. All express or implied conditions, representations and |
25 |
* warranties, including any implied warranty of merchantability, |
26 |
* fitness for a particular purpose or non-infringement, are hereby |
27 |
* excluded. The University of Notre Dame and its licensors shall not |
28 |
* be liable for any damages suffered by licensee as a result of |
29 |
* using, modifying or distributing the software or its |
30 |
* derivatives. In no event will the University of Notre Dame or its |
31 |
* licensors be liable for any lost revenue, profit or data, or for |
32 |
* direct, indirect, special, consequential, incidental or punitive |
33 |
* damages, however caused and regardless of the theory of liability, |
34 |
* arising out of the use of or inability to use software, even if the |
35 |
* University of Notre Dame has been advised of the possibility of |
36 |
* such damages. |
37 |
* |
38 |
* SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
39 |
* research, please cite the appropriate papers when you publish your |
40 |
* work. Good starting points are: |
41 |
* |
42 |
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
43 |
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
44 |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
45 |
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
46 |
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
47 |
*/ |
48 |
|
49 |
#ifndef PARALLEL_COMMUNICATOR_HPP |
50 |
#define PARALLEL_COMMUNICATOR_HPP |
51 |
|
52 |
#include <config.h> |
53 |
#include <mpi.h> |
54 |
#include "math/SquareMatrix3.hpp" |
55 |
|
56 |
using namespace std; |
57 |
namespace OpenMD{ |
58 |
|
59 |
#ifdef IS_MPI |
60 |
|
61 |
enum communicatorType { |
62 |
Global = 0, |
63 |
Row = 1, |
64 |
Column = 2 |
65 |
}; |
66 |
|
67 |
template<class T> |
68 |
class MPITraits { |
69 |
public: |
70 |
static MPI_Datatype Type(); |
71 |
static int Length() { return 1; }; |
72 |
}; |
73 |
|
74 |
template<> inline MPI_Datatype MPITraits<int>::Type() { return MPI_INT; } |
75 |
template<> inline MPI_Datatype MPITraits<RealType>::Type() { return MPI_REALTYPE; } |
76 |
|
77 |
template<class T, unsigned int Dim> |
78 |
class MPITraits< Vector<T, Dim> > { |
79 |
public: |
80 |
static MPI_Datatype Type() { return MPITraits<T>::Type(); } |
81 |
static int Length() {return Dim;} |
82 |
}; |
83 |
|
84 |
template<class T> |
85 |
class MPITraits< Vector3<T> > { |
86 |
public: |
87 |
static MPI_Datatype Type() { return MPITraits<T>::Type(); } |
88 |
static int Length() {return 3;} |
89 |
}; |
90 |
|
91 |
template<class T, unsigned int R, unsigned int C> |
92 |
class MPITraits< RectMatrix<T, R, C> > { |
93 |
public: |
94 |
static MPI_Datatype Type() { return MPITraits<T>::Type(); } |
95 |
static int Length() {return R * C;} |
96 |
}; |
97 |
|
98 |
template<class T> |
99 |
class MPITraits< SquareMatrix3<T> > { |
100 |
public: |
101 |
static MPI_Datatype Type() { return MPITraits<T>::Type(); } |
102 |
static int Length() {return 9;} |
103 |
}; |
104 |
|
105 |
|
106 |
template<communicatorType D> |
107 |
class Communicator { |
108 |
public: |
109 |
|
110 |
Communicator<D>() { |
111 |
|
112 |
int nProc; |
113 |
int myRank; |
114 |
|
115 |
MPI_Comm_size( MPI_COMM_WORLD, &nProc ); |
116 |
MPI_Comm_rank( MPI_COMM_WORLD, &myRank ); |
117 |
|
118 |
int nColumnsMax = (int) sqrt(RealType(nProc)); |
119 |
|
120 |
int nColumns(0); |
121 |
for (int i = 1; i < nColumnsMax + 1; i++) { |
122 |
if (nProc % i == 0) nColumns = i; |
123 |
} |
124 |
|
125 |
// int nRows = nProc / nColumns; |
126 |
rowIndex_ = myRank / nColumns; |
127 |
columnIndex_ = myRank % nColumns; |
128 |
|
129 |
switch(D) { |
130 |
case Row : |
131 |
MPI_Comm_split(MPI_COMM_WORLD, rowIndex_, 0, &myComm); |
132 |
break; |
133 |
case Column: |
134 |
MPI_Comm_split(MPI_COMM_WORLD, columnIndex_, 0, &myComm); |
135 |
break; |
136 |
case Global: |
137 |
MPI_Comm_split(MPI_COMM_WORLD, myRank, 0, &myComm); |
138 |
} |
139 |
|
140 |
} |
141 |
|
142 |
MPI_Comm getComm() { return myComm; } |
143 |
|
144 |
private: |
145 |
int rowIndex_; |
146 |
int columnIndex_; |
147 |
MPI_Comm myComm; |
148 |
}; |
149 |
|
150 |
|
151 |
template<typename T> |
152 |
class Plan { |
153 |
public: |
154 |
|
155 |
Plan<T>(MPI_Comm comm, int nObjects) : myComm(comm) { |
156 |
|
157 |
int nCommProcs; |
158 |
MPI_Comm_size( myComm, &nCommProcs ); |
159 |
|
160 |
counts.resize(nCommProcs, 0); |
161 |
displacements.resize(nCommProcs, 0); |
162 |
|
163 |
planSize_ = MPITraits<T>::Length() * nObjects; |
164 |
|
165 |
MPI_Allgather(&planSize_, 1, MPI_INT, &counts[0], 1, MPI_INT, myComm); |
166 |
|
167 |
displacements[0] = 0; |
168 |
for (int i = 1; i < nCommProcs; i++) { |
169 |
displacements[i] = displacements[i-1] + counts[i-1]; |
170 |
} |
171 |
|
172 |
size_ = 0; |
173 |
for (int i = 0; i < nCommProcs; i++) { |
174 |
size_ += counts[i]; |
175 |
} |
176 |
} |
177 |
|
178 |
|
179 |
void gather(vector<T>& v1, vector<T>& v2) { |
180 |
|
181 |
// an assert would be helpful here to make sure the vectors are the |
182 |
// correct geometry |
183 |
|
184 |
MPI_Allgatherv(&v1[0], |
185 |
planSize_, |
186 |
MPITraits<T>::Type(), |
187 |
&v2[0], |
188 |
&counts[0], |
189 |
&displacements[0], |
190 |
MPITraits<T>::Type(), |
191 |
myComm); |
192 |
} |
193 |
|
194 |
void scatter(vector<T>& v1, vector<T>& v2) { |
195 |
// an assert would be helpful here to make sure the vectors are the |
196 |
// correct geometry |
197 |
|
198 |
MPI_Reduce_scatter(&v1[0], &v2[0], &counts[0], |
199 |
MPITraits<T>::Type(), MPI_SUM, myComm); |
200 |
} |
201 |
|
202 |
int getSize() { |
203 |
return size_; |
204 |
} |
205 |
|
206 |
private: |
207 |
int planSize_; ///< how many are on local proc |
208 |
int size_; |
209 |
vector<int> counts; |
210 |
vector<int> displacements; |
211 |
MPI_Comm myComm; |
212 |
}; |
213 |
|
214 |
#endif |
215 |
} |
216 |
#endif |
217 |
|
218 |
|