| 1 | 
#include <algorithm> | 
| 2 | 
#include <iostream> | 
| 3 | 
#include <vector> | 
| 4 | 
//#include <pair> | 
| 5 | 
#include "ZConsWriter.hpp" | 
| 6 | 
#include "simError.h" | 
| 7 | 
 | 
| 8 | 
using namespace std; | 
| 9 | 
 | 
| 10 | 
ZConsWriter::ZConsWriter(const char* filename, vector<ZConsParaItem>* thePara) | 
| 11 | 
{ | 
| 12 | 
  //use master - slave mode, only master node writes to disk | 
| 13 | 
#ifdef IS_MPI | 
| 14 | 
  if(worldRank == 0){ | 
| 15 | 
#endif | 
| 16 | 
 | 
| 17 | 
   output.open(filename); | 
| 18 | 
    | 
| 19 | 
   if(!output){ | 
| 20 | 
     sprintf( painCave.errMsg, | 
| 21 | 
              "Could not open \"s\" for z constrain output \n", | 
| 22 | 
         filename); | 
| 23 | 
     painCave.isFatal = 1; | 
| 24 | 
     simError(); | 
| 25 | 
   } | 
| 26 | 
   output << "#number of z constrain molecules" << endl; | 
| 27 | 
   output << "#global Index of molecule\tzPos" << endl; | 
| 28 | 
   output << "every frame will contain below data" <<endl; | 
| 29 | 
   output << "#time(fs)" << endl; | 
| 30 | 
   output << "#number of fixed z-constrain molecules" << endl; | 
| 31 | 
   output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl; | 
| 32 | 
 | 
| 33 | 
   parameters = thePara; | 
| 34 | 
   writeZPos(); | 
| 35 | 
 | 
| 36 | 
#ifdef IS_MPI | 
| 37 | 
  } | 
| 38 | 
#endif   | 
| 39 | 
 | 
| 40 | 
} | 
| 41 | 
 | 
| 42 | 
ZConsWriter::~ZConsWriter() | 
| 43 | 
{ | 
| 44 | 
 | 
| 45 | 
#ifdef IS_MPI | 
| 46 | 
  if(worldRank == 0 ){ | 
| 47 | 
#endif   | 
| 48 | 
  output.close();   | 
| 49 | 
#ifdef IS_MPI   | 
| 50 | 
  } | 
| 51 | 
#endif | 
| 52 | 
} | 
| 53 | 
 | 
| 54 | 
/** | 
| 55 | 
 * | 
| 56 | 
 */ | 
| 57 | 
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos){ | 
| 58 | 
 | 
| 59 | 
#ifndef IS_MPI | 
| 60 | 
  output << time << endl; | 
| 61 | 
  output << num << endl; | 
| 62 | 
   | 
| 63 | 
  for(int i = 0; i < num; i++) | 
| 64 | 
    output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << endl; | 
| 65 | 
 | 
| 66 | 
#else | 
| 67 | 
  int totalNum; | 
| 68 | 
  MPI_Allreduce(&num, &totalNum, 1, MPI_INT,MPI_SUM, MPI_COMM_WORLD);  | 
| 69 | 
   | 
| 70 | 
  if(worldRank == 0){ | 
| 71 | 
    output << time << endl; | 
| 72 | 
    output << totalNum << endl; | 
| 73 | 
  } | 
| 74 | 
   | 
| 75 | 
  int whichNode; | 
| 76 | 
  enum CommType { RequesPosAndForce, EndOfRequest} status; | 
| 77 | 
  double pos; | 
| 78 | 
  double force; | 
| 79 | 
  int localIndex; | 
| 80 | 
  MPI_Status ierr; | 
| 81 | 
  int tag = 0; | 
| 82 | 
   | 
| 83 | 
  if(worldRank == 0){ | 
| 84 | 
     | 
| 85 | 
    int globalIndexOfCurMol; | 
| 86 | 
    int *MolToProcMap; | 
| 87 | 
    MolToProcMap = mpiSim->getMolToProcMap(); | 
| 88 | 
     | 
| 89 | 
    for(int i = 0; i < parameters->size(); i++){ | 
| 90 | 
       | 
| 91 | 
      globalIndexOfCurMol = (*parameters)[i].zconsIndex; | 
| 92 | 
      whichNode = MolToProcMap[globalIndexOfCurMol]; | 
| 93 | 
       | 
| 94 | 
      if(whichNode == 0){ | 
| 95 | 
         | 
| 96 | 
       for(int j = 0; j < num; j++) | 
| 97 | 
        if(index[j] == globalIndexOfCurMol){ | 
| 98 | 
          localIndex = j; | 
| 99 | 
          break; | 
| 100 | 
        } | 
| 101 | 
 | 
| 102 | 
      force = fz[localIndex]; | 
| 103 | 
      pos = curZPos[localIndex]; | 
| 104 | 
       | 
| 105 | 
      } | 
| 106 | 
      else{ | 
| 107 | 
        status = RequesPosAndForce; | 
| 108 | 
        MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); | 
| 109 | 
        MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); | 
| 110 | 
        MPI_Recv(&force, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); | 
| 111 | 
        MPI_Recv(&pos, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); | 
| 112 | 
      } | 
| 113 | 
 | 
| 114 | 
     output << globalIndexOfCurMol << "\t" << force << "\t" << pos << endl; | 
| 115 | 
               | 
| 116 | 
    } //End of Request Loop | 
| 117 | 
     | 
| 118 | 
    //Send ending request message to slave nodes     | 
| 119 | 
    status = EndOfRequest; | 
| 120 | 
    for(int i =1; i < mpiSim->getNumberProcessors(); i++) | 
| 121 | 
      MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); | 
| 122 | 
      | 
| 123 | 
  } | 
| 124 | 
  else{ | 
| 125 | 
   | 
| 126 | 
    int whichMol; | 
| 127 | 
    bool done = false; | 
| 128 | 
 | 
| 129 | 
    while (!done){   | 
| 130 | 
       | 
| 131 | 
      MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr); | 
| 132 | 
     | 
| 133 | 
      switch (status){ | 
| 134 | 
           | 
| 135 | 
         case RequesPosAndForce :  | 
| 136 | 
           | 
| 137 | 
           MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr); | 
| 138 | 
     | 
| 139 | 
           for(int i = 0; i < num; i++) | 
| 140 | 
           if(index[i] == whichMol){ | 
| 141 | 
             localIndex = i; | 
| 142 | 
             break; | 
| 143 | 
           } | 
| 144 | 
     | 
| 145 | 
           MPI_Send(&fz[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);     | 
| 146 | 
           MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);        | 
| 147 | 
           break; | 
| 148 | 
        | 
| 149 | 
        case EndOfRequest : | 
| 150 | 
          | 
| 151 | 
         done = true; | 
| 152 | 
         break; | 
| 153 | 
      } | 
| 154 | 
       | 
| 155 | 
    } | 
| 156 | 
           | 
| 157 | 
  } | 
| 158 | 
 | 
| 159 | 
#endif | 
| 160 | 
 | 
| 161 | 
} | 
| 162 | 
 | 
| 163 | 
/* | 
| 164 | 
 * | 
| 165 | 
 */ | 
| 166 | 
void ZConsWriter::writeZPos(){ | 
| 167 | 
 | 
| 168 | 
#ifdef IS_MPI | 
| 169 | 
  if(worldRank == 0){ | 
| 170 | 
#endif | 
| 171 | 
     | 
| 172 | 
    output << parameters->size() << endl;      | 
| 173 | 
     | 
| 174 | 
    for(int i =0 ; i < parameters->size(); i++) | 
| 175 | 
      output << (*parameters)[i].zconsIndex << "\t" <<  (*parameters)[i].zPos << endl; | 
| 176 | 
 | 
| 177 | 
#ifdef IS_MPI | 
| 178 | 
  } | 
| 179 | 
#endif | 
| 180 | 
} |