| 13 |  |  | 
| 14 |  | #ifdef IS_MPI | 
| 15 |  | #include <mpi.h> | 
| 16 | – | #include <mpi++.h> | 
| 16 |  | #include "mpiSimulation.hpp" | 
| 17 |  | #define TAKE_THIS_TAG_CHAR 0 | 
| 18 |  | #define TAKE_THIS_TAG_INT 1 | 
| 153 |  | int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone | 
| 154 |  | int haveError; | 
| 155 |  |  | 
| 156 | < | MPI::Status istatus; | 
| 156 | > | MPI_Status istatus; | 
| 157 |  | int *AtomToProcMap = mpiSim->getAtomToProcMap(); | 
| 158 |  |  | 
| 159 |  |  | 
| 227 |  | else { | 
| 228 |  |  | 
| 229 |  | myStatus = 1; | 
| 230 | < | MPI::COMM_WORLD.Send(&myStatus, 1, MPI_INT, which_node, | 
| 231 | < | TAKE_THIS_TAG_INT); | 
| 232 | < | MPI::COMM_WORLD.Send(read_buffer, BUFFERSIZE, MPI_CHAR, which_node, | 
| 233 | < | TAKE_THIS_TAG_CHAR); | 
| 234 | < | MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT); | 
| 235 | < | MPI::COMM_WORLD.Recv(&myStatus, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, istatus); | 
| 230 | > | MPI_Send(&myStatus, 1, MPI_INT, which_node, | 
| 231 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); | 
| 232 | > | MPI_Send(read_buffer, BUFFERSIZE, MPI_CHAR, which_node, | 
| 233 | > | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); | 
| 234 | > | MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, | 
| 235 | > | MPI_COMM_WORLD); | 
| 236 | > | MPI_Recv(&myStatus, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, | 
| 237 | > | MPI_COMM_WORLD, &istatus); | 
| 238 |  |  | 
| 239 |  | if(!myStatus) nodeZeroError(); | 
| 240 |  | } | 
| 241 |  | } | 
| 242 |  | myStatus = -1; | 
| 243 |  | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { | 
| 244 | < | MPI::COMM_WORLD.Send( &myStatus, 1, MPI_INT, j, | 
| 245 | < | TAKE_THIS_TAG_INT); | 
| 244 | > | MPI_Send( &myStatus, 1, MPI_INT, j, | 
| 245 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); | 
| 246 |  | } | 
| 247 |  |  | 
| 248 |  | } else { | 
| 250 |  | done = 0; | 
| 251 |  | while (!done) { | 
| 252 |  |  | 
| 253 | < | MPI::COMM_WORLD.Recv(&myStatus, 1, MPI_INT, 0, | 
| 254 | < | TAKE_THIS_TAG_INT, istatus); | 
| 253 | > | MPI_Recv(&myStatus, 1, MPI_INT, 0, | 
| 254 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); | 
| 255 |  |  | 
| 256 |  | if(!myStatus) anonymousNodeDie(); | 
| 257 |  |  | 
| 258 |  | if(myStatus < 0) break; | 
| 259 |  |  | 
| 260 | < | MPI::COMM_WORLD.Recv(read_buffer, BUFFERSIZE, MPI_CHAR, 0, | 
| 261 | < | TAKE_THIS_TAG_CHAR, istatus); | 
| 262 | < | MPI::COMM_WORLD.Recv(&which_atom, 1, MPI_INT, 0, | 
| 263 | < | TAKE_THIS_TAG_INT, istatus); | 
| 260 | > | MPI_Recv(read_buffer, BUFFERSIZE, MPI_CHAR, 0, | 
| 261 | > | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); | 
| 262 | > | MPI_Recv(&which_atom, 1, MPI_INT, 0, | 
| 263 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); | 
| 264 |  |  | 
| 265 |  | myStatus = 1; | 
| 266 |  | parseErr = parseDumpLine( read_buffer, which_atom ); | 
| 270 |  | simError(); | 
| 271 |  | } | 
| 272 |  |  | 
| 273 | < | MPI::COMM_WORLD.Send( &myStatus, 1, MPI_INT, 0, | 
| 274 | < | TAKE_THIS_TAG_INT); | 
| 273 | > | MPI_Send( &myStatus, 1, MPI_INT, 0, | 
| 274 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); | 
| 275 |  |  | 
| 276 |  | } | 
| 277 |  | } | 
| 516 |  |  | 
| 517 |  | myStatus = 0; | 
| 518 |  | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { | 
| 519 | < | MPI::COMM_WORLD.Send( &myStatus, 1, MPI_INT, j, | 
| 520 | < | TAKE_THIS_TAG_INT); | 
| 519 | > | MPI_Send( &myStatus, 1, MPI_INT, j, | 
| 520 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); | 
| 521 |  | } | 
| 522 |  |  | 
| 523 |  |  |