5 |
|
#include "mpiSimulation.hpp" |
6 |
|
#include "simError.h" |
7 |
|
|
8 |
+ |
mpiSimulation* mpiSim; |
9 |
|
|
9 |
– |
|
10 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
11 |
|
{ |
12 |
|
entryPlug = the_entryPlug; |
13 |
+ |
mpiPlug = new MpiSimData; |
14 |
|
|
15 |
< |
numberProcessors = MPI::COMM_WORLD.Get_size(); |
16 |
< |
myNode = worldRank; |
15 |
> |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
16 |
> |
mpiPlug->myNode = worldRank; |
17 |
|
|
18 |
< |
// let the simulation know were there. |
18 |
< |
entryPlug->mpiSim = this; |
18 |
> |
|
19 |
|
} |
20 |
|
|
21 |
|
|
22 |
|
mpiSimulation::~mpiSimulation(){ |
23 |
|
|
24 |
< |
// empty for now |
24 |
> |
delete mpiPlug; |
25 |
> |
// perhaps we should let fortran know the party is over. |
26 |
|
|
27 |
|
} |
28 |
|
|
29 |
|
|
30 |
< |
void mpiSimulation::divideLabor(int nComponents, MoleculeStamp** compStamps, int* componentsNmol ){ |
31 |
< |
|
30 |
> |
void mpiSimulation::divideLabor( void ){ |
31 |
> |
|
32 |
> |
int nComponents; |
33 |
> |
MoleculeStamp** compStamps; |
34 |
> |
int* componentsNmol; |
35 |
> |
|
36 |
|
double numerator; |
37 |
|
double denominator; |
38 |
|
double precast; |
40 |
|
int nTarget; |
41 |
|
int molIndex, atomIndex, compIndex, compStart; |
42 |
|
int done; |
43 |
< |
int nLocal; |
43 |
> |
int nLocal, molLocal; |
44 |
|
int i; |
45 |
|
int smallDiff, bigDiff; |
46 |
|
|
47 |
|
int testSum; |
48 |
|
|
49 |
+ |
nComponents = entryPlug->nComponents; |
50 |
+ |
compStamps = entryPlug->compStamps; |
51 |
+ |
componentsNmol = entryPlug->componentsNmol; |
52 |
+ |
|
53 |
+ |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
54 |
+ |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
55 |
+ |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
56 |
+ |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
57 |
+ |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
58 |
+ |
mpiPlug->nMolGlobal = entryPlug->n_nmol; |
59 |
+ |
|
60 |
|
numerator = (double) entryPlug->n_atoms; |
61 |
< |
denominator = (double) numberProcessors; |
61 |
> |
denominator = (double) mpiPlug->numberProcessors; |
62 |
|
precast = numerator / denominator; |
63 |
|
nTarget = (int)( precast + 0.5 ); |
64 |
|
|
66 |
|
atomIndex = 0; |
67 |
|
compIndex = 0; |
68 |
|
compStart = 0; |
69 |
< |
for( i=0; i<(numberProcessors-1); i++){ |
69 |
> |
for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
70 |
|
|
71 |
|
done = 0; |
72 |
|
nLocal = 0; |
73 |
+ |
molLocal = 0; |
74 |
|
|
75 |
< |
if( i == myNode ){ |
76 |
< |
myMolStart = molIndex; |
77 |
< |
myAtomStart = atomIndex; |
75 |
> |
if( i == mpiPlug->myNode ){ |
76 |
> |
mpiPlug->myMolStart = molIndex; |
77 |
> |
mpiPlug->myAtomStart = atomIndex; |
78 |
|
} |
79 |
|
|
80 |
|
while( !done ){ |
88 |
|
nLocal += compStamps[compIndex]->getNAtoms(); |
89 |
|
atomIndex += compStamps[compIndex]->getNAtoms(); |
90 |
|
molIndex++; |
91 |
+ |
molLocal++; |
92 |
|
|
93 |
|
if ( nLocal == nTarget ) done = 1; |
94 |
|
|
101 |
|
if( bigDiff < smallDiff ) done = 1; |
102 |
|
else{ |
103 |
|
molIndex--; |
104 |
+ |
molLocal--; |
105 |
|
atomIndex -= compStamps[compIndex]->getNAtoms(); |
106 |
|
nLocal -= compStamps[compIndex]->getNAtoms(); |
107 |
|
done = 1; |
109 |
|
} |
110 |
|
} |
111 |
|
|
112 |
< |
if( i == myNode ){ |
113 |
< |
myMolEnd = (molIndex - 1); |
114 |
< |
myAtomEnd = (atomIndex - 1); |
115 |
< |
myNlocal = nLocal; |
112 |
> |
if( i == mpiPlug->myNode ){ |
113 |
> |
mpiPlug->myMolEnd = (molIndex - 1); |
114 |
> |
mpiPlug->myAtomEnd = (atomIndex - 1); |
115 |
> |
mpiPlug->myNlocal = nLocal; |
116 |
> |
mpiPlug->myMol = molLocal; |
117 |
|
} |
118 |
|
|
119 |
|
numerator = (double)( entryPlug->n_atoms - atomIndex ); |
120 |
< |
denominator = (double)( numberProcessors - (i+1) ); |
120 |
> |
denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
121 |
|
precast = numerator / denominator; |
122 |
|
nTarget = (int)( precast + 0.5 ); |
123 |
|
} |
124 |
|
|
125 |
< |
if( myNode == numberProcessors-1 ){ |
126 |
< |
myMolStart = molIndex; |
127 |
< |
myAtomStart = atomIndex; |
125 |
> |
if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
126 |
> |
mpiPlug->myMolStart = molIndex; |
127 |
> |
mpiPlug->myAtomStart = atomIndex; |
128 |
|
|
129 |
|
nLocal = 0; |
130 |
+ |
molLocal = 0; |
131 |
|
while( compIndex < nComponents ){ |
132 |
|
|
133 |
|
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
139 |
|
nLocal += compStamps[compIndex]->getNAtoms(); |
140 |
|
atomIndex += compStamps[compIndex]->getNAtoms(); |
141 |
|
molIndex++; |
142 |
+ |
molLocal++; |
143 |
|
} |
144 |
|
|
145 |
< |
myMolEnd = (molIndex - 1); |
146 |
< |
myAtomEnd = (atomIndex - 1); |
147 |
< |
myNlocal = nLocal; |
145 |
> |
mpiPlug->myMolEnd = (molIndex - 1); |
146 |
> |
mpiPlug->myAtomEnd = (atomIndex - 1); |
147 |
> |
mpiPlug->myNlocal = nLocal; |
148 |
> |
mpiPlug->myMol = molLocal; |
149 |
|
} |
150 |
|
|
151 |
|
|
152 |
< |
MPI_Allreduce( &Nlocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
152 |
> |
MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
153 |
|
|
154 |
< |
if( myNode == 0 ){ |
154 |
> |
if( mpiPlug->myNode == 0 ){ |
155 |
|
if( testSum != entryPlug->n_atoms ){ |
156 |
|
sprintf( painCave.errMsg, |
157 |
|
"The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
164 |
|
sprintf( checkPointMsg, |
165 |
|
"Successfully divided the molecules among the processors.\n" ); |
166 |
|
MPIcheckPoint(); |
167 |
+ |
|
168 |
+ |
// lets create the identity array |
169 |
|
} |