135 |
|
#ifdef IS_MPI |
136 |
|
// in parallel, we need to add up the contributions from all |
137 |
|
// processors: |
138 |
< |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &totalFrc, 1, MPI::REALTYPE, |
139 |
< |
MPI::SUM); |
138 |
> |
MPI_Allreduce(MPI_IN_PLACE, &totalFrc, 1, MPI_REALTYPE, |
139 |
> |
MPI_SUM, MPI_COMM_WORLD); |
140 |
|
|
141 |
|
if (constrainRegions_) { |
142 |
< |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, ®ionForce_[0], |
143 |
< |
regionForce_.size(), MPI::REALTYPE, MPI::SUM); |
144 |
< |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, ®ionCharges_[0], |
145 |
< |
regionCharges_.size(), MPI::INT, MPI::SUM); |
142 |
> |
MPI_Allreduce(MPI_IN_PLACE, ®ionForce_[0], |
143 |
> |
regionForce_.size(), MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD); |
144 |
> |
MPI_Allreduce(MPI_IN_PLACE, ®ionCharges_[0], |
145 |
> |
regionCharges_.size(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
146 |
|
} |
147 |
|
|
148 |
|
#endif |
152 |
|
|
153 |
|
// do the same in the regions: |
154 |
|
if (constrainRegions_) { |
155 |
< |
for (int i = 0; i < regionForce_.size(); ++i) { |
155 |
> |
for (unsigned int i = 0; i < regionForce_.size(); ++i) { |
156 |
|
regionForce_[ i ] /= regionCharges_[ i ]; |
157 |
|
} |
158 |
|
} |
183 |
|
|
184 |
|
for (atom = mol->beginFluctuatingCharge(j); atom != NULL; |
185 |
|
atom = mol->nextFluctuatingCharge(j)) { |
186 |
< |
//constrainedFrc = atom->getFlucQFrc() - totalFrc - totalMolFrc; |
186 |
> |
constrainedFrc = atom->getFlucQFrc() - totalFrc - totalMolFrc; |
187 |
|
|
188 |
< |
constrainedFrc = atom->getFlucQFrc() - totalMolFrc; |
188 |
> |
//constrainedFrc = atom->getFlucQFrc() - totalMolFrc; |
189 |
|
|
190 |
|
if (constrainRegions_) |
191 |
|
constrainedFrc -= regionFrc; |