ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/development/src/UseTheForce/DarkSide/simParallel.F90
Revision: 1496
Committed: Wed Sep 8 20:42:24 2010 UTC (14 years, 7 months ago) by chuckv
File size: 32204 byte(s)
Log Message:
Somewhat working cmake build system now. Does not yet build a parallel target. Builds all non-mpi programs.

File Contents

# Content
1 !!
2 !! Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 !!
4 !! The University of Notre Dame grants you ("Licensee") a
5 !! non-exclusive, royalty free, license to use, modify and
6 !! redistribute this software in source and binary code form, provided
7 !! that the following conditions are met:
8 !!
9 !! 1. Redistributions of source code must retain the above copyright
10 !! notice, this list of conditions and the following disclaimer.
11 !!
12 !! 2. Redistributions in binary form must reproduce the above copyright
13 !! notice, this list of conditions and the following disclaimer in the
14 !! documentation and/or other materials provided with the
15 !! distribution.
16 !!
17 !! This software is provided "AS IS," without a warranty of any
18 !! kind. All express or implied conditions, representations and
19 !! warranties, including any implied warranty of merchantability,
20 !! fitness for a particular purpose or non-infringement, are hereby
21 !! excluded. The University of Notre Dame and its licensors shall not
22 !! be liable for any damages suffered by licensee as a result of
23 !! using, modifying or distributing the software or its
24 !! derivatives. In no event will the University of Notre Dame or its
25 !! licensors be liable for any lost revenue, profit or data, or for
26 !! direct, indirect, special, consequential, incidental or punitive
27 !! damages, however caused and regardless of the theory of liability,
28 !! arising out of the use of or inability to use software, even if the
29 !! University of Notre Dame has been advised of the possibility of
30 !! such damages.
31 !!
32 !! SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 !! research, please cite the appropriate papers when you publish your
34 !! work. Good starting points are:
35 !!
36 !! [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 !! [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 !! [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 !! [4] Vardeman & Gezelter, in progress (2009).
40 !!
41
42
43 !! MPI support for long range forces using force decomposition
44 !! on a square grid of processors.
45 !! Corresponds to mpiSimulation.cpp for C++
46 !! mpi_module also contains a private interface for mpi f90 routines.
47 !!
48 !! @author Charles F. Vardeman II
49 !! @author Matthew Meineke
50 !! @version $Id$, $Date$, $Name: not supported by cvs2svn $, $Revision$
51
52 module mpiSimulation
53 use definitions
54 use status
55 #ifdef IS_MPI
56 use OpenMDMPI
57 implicit none
58 PRIVATE
59 #endif
60
61
62 !! Include mpiComponentPlan type. mpiComponentPlan is a
63 !! dual header file for both c and fortran.
64 #define __FORTRAN90
65 #include "UseTheForce/mpiComponentPlan.h"
66 public :: setupSimParallel
67
68 #ifdef IS_MPI
69
70 !! PUBLIC Subroutines contained in this module
71 !! gather and scatter are a generic interface
72 !! to gather and scatter routines
73 public :: gather, scatter
74 public :: setupSimParallel
75 public :: replanSimParallel
76 public :: getNatomsInCol
77 public :: getNatomsInRow
78 public :: getNgroupsInCol
79 public :: getNgroupsInRow
80 public :: isMPISimSet
81 public :: printComponentPlan
82 public :: getMyNode
83
84 !! PUBLIC Subroutines contained in MPI module
85 public :: mpi_bcast
86 public :: mpi_allreduce
87 ! public :: mpi_reduce
88 public :: mpi_send
89 public :: mpi_recv
90 public :: mpi_get_processor_name
91 public :: mpi_finalize
92
93 !! PUBLIC mpi variables
94 public :: mpi_comm_world
95 public :: mpi_character
96 public :: mpi_integer
97 public :: mpi_lor
98 public :: mpi_logical
99 public :: mpi_real
100 public :: mpi_double_precision
101 public :: mpi_sum
102 public :: mpi_max
103 public :: mpi_status_size
104 public :: mpi_any_source
105
106
107
108 !! Safety logical to prevent access to ComponetPlan until
109 !! set by C++.
110 logical, save :: ComponentPlanSet = .false.
111
112
113 !! generic mpi error declaration.
114 integer, public :: mpi_err
115 character(len = statusMsgSize) :: errMsg
116
117 #ifdef PROFILE
118 public :: printCommTime
119 public :: getCommTime
120 real,save :: commTime = 0.0
121 real :: commTimeInitial,commTimeFinal
122 #endif
123
124
125 !! Tags used during force loop for parallel simulation
126 integer, public, allocatable, dimension(:) :: AtomLocalToGlobal
127 integer, public, allocatable, dimension(:) :: AtomRowToGlobal
128 integer, public, allocatable, dimension(:) :: AtomColToGlobal
129 integer, public, allocatable, dimension(:) :: AtomGlobalToLocal
130 integer, public, allocatable, dimension(:) :: GroupLocalToGlobal
131 integer, public, allocatable, dimension(:) :: GroupRowToGlobal
132 integer, public, allocatable, dimension(:) :: GroupColToGlobal
133
134 !! Logical set true if mpiSimulation has been initialized
135 logical, save :: isSimSet = .false.
136
137
138 type (mpiComponentPlan), save :: mpiSim
139
140 !! gs_plan contains plans for gather and scatter routines
141 type, public :: gs_plan
142 private
143 type (mpiComponentPlan), pointer :: gsComponentPlan => NULL()
144 integer :: gsPlanSize !! size of this plan (nDim*nComponents)
145 integer :: globalPlanSize !! size of all components in plan
146 integer, dimension(:), pointer :: displs !! Displacements array for mpi indexed from 0.
147 integer, dimension(:), pointer :: counts !! Counts array for mpi indexed from 0.
148 integer :: myPlanComm !! My communicator for this plan
149 integer :: myPlanRank !! My rank in this plan
150 integer :: planNprocs !! Number of processors in this plan
151 end type gs_plan
152
153 ! plans for different decompositions
154 type (gs_plan), public, save :: plan_atom_row
155 type (gs_plan), public, save :: plan_atom_row_3d
156 type (gs_plan), public, save :: plan_atom_col
157 type (gs_plan), public, save :: plan_atom_col_3d
158 type (gs_plan), public, save :: plan_atom_row_Rotation
159 type (gs_plan), public, save :: plan_atom_col_Rotation
160 type (gs_plan), public, save :: plan_group_row
161 type (gs_plan), public, save :: plan_group_col
162 type (gs_plan), public, save :: plan_group_row_3d
163 type (gs_plan), public, save :: plan_group_col_3d
164
165 type (mpiComponentPlan), pointer :: simComponentPlan
166
167 ! interface for gather scatter routines
168 !! Generic interface for gather.
169 !! Gathers an local array into row or column array
170 !! Interface provided for integer, double and double
171 !! rank 2 arrays.
172 interface gather
173 module procedure gather_integer
174 module procedure gather_double
175 module procedure gather_double_2d
176 end interface
177
178 !! Generic interface for scatter.
179 !! Scatters a row or column array, adding componets
180 !! and reducing them to a local nComponent array.
181 !! Interface provided for double and double rank=2 arrays.
182
183 interface scatter
184 module procedure scatter_double
185 module procedure scatter_double_2d
186 end interface
187
188
189
190 contains
191
192 !! Sets up mpiComponentPlan with structure passed from C++.
193 subroutine setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
194 nGroupTags, groupTags, status)
195 !! Passed Arguments
196 !! mpiComponentPlan struct from C
197 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
198 !! Number of tags passed
199 integer, intent(in) :: nAtomTags, nGroupTags
200 !! Result status, 0 = normal, -1 = error
201 integer, intent(out) :: status
202 integer :: localStatus
203 !! Global reference tag for local particles
204 integer, dimension(nAtomTags), intent(inout) :: atomTags
205 integer, dimension(nGroupTags), intent(inout) :: groupTags
206
207 !write(*,*) 'mpiSim_mod thinks node', thisComponentPlan%myNode, &
208 ! ' has atomTags(1) = ', atomTags(1)
209
210 status = 0
211 if (componentPlanSet) then
212 return
213 endif
214 componentPlanSet = .true.
215
216 !! copy c component plan to fortran
217 mpiSim = thisComponentPlan
218
219 call make_Force_Grid(mpiSim, localStatus)
220 if (localStatus /= 0) then
221 write(errMsg, *) 'An error in making the force grid has occurred'
222 call handleError("setupSimParallel", errMsg)
223 status = -1
224 return
225 endif
226
227 call updateGridComponents(mpiSim, localStatus)
228 if (localStatus /= 0) then
229 write(errMsg,*) "Error updating grid components"
230 call handleError("setupSimParallel", errMsg)
231 status = -1
232 return
233 endif
234
235 !! initialize gather and scatter plans used in this simulation
236 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
237 mpiSim, mpiSim%rowComm, plan_atom_row)
238 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
239 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
240 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
241 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
242 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
243 mpiSim, mpiSim%rowComm, plan_group_row)
244 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
245 mpiSim, mpiSim%rowComm, plan_group_row_3d)
246
247 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
248 mpiSim, mpiSim%columnComm, plan_atom_col)
249 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
250 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
251 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
252 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
253 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
254 mpiSim, mpiSim%columnComm, plan_group_col)
255 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
256 mpiSim, mpiSim%columnComm, plan_group_col_3d)
257
258 ! Initialize tags
259
260 call setAtomTags(atomTags,localStatus)
261 if (localStatus /= 0) then
262 write(errMsg, *) 'An error in setting Atom Tags has occured'
263 call handleError("setupSimParallel", errMsg)
264 status = -1
265 return
266 endif
267
268
269 call setGroupTags(groupTags,localStatus)
270 if (localStatus /= 0) then
271 write(errMsg, *) 'An error in setting Group Tags has occured'
272 call handleError("setupSimParallel", errMsg)
273 status = -1
274 return
275 endif
276
277 isSimSet = .true.
278
279 ! call printComponentPlan(mpiSim,0)
280 end subroutine setupSimParallel
281
282 subroutine replanSimParallel(thisComponentPlan,status)
283 ! Passed Arguments
284 !! mpiComponentPlan struct from C
285 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
286 integer, intent(out) :: status
287 integer :: localStatus
288 integer :: mpierror
289 status = 0
290
291 call updateGridComponents(thisComponentPlan,localStatus)
292 if (localStatus /= 0) then
293 status = -1
294 return
295 endif
296
297 !! Unplan Gather Scatter plans
298 call unplan_gather_scatter(plan_atom_row)
299 call unplan_gather_scatter(plan_atom_row_3d)
300 call unplan_gather_scatter(plan_atom_row_Rotation)
301 call unplan_gather_scatter(plan_group_row)
302 call unplan_gather_scatter(plan_group_row_3d)
303
304 call unplan_gather_scatter(plan_atom_col)
305 call unplan_gather_scatter(plan_atom_col_3d)
306 call unplan_gather_scatter(plan_atom_col_Rotation)
307 call unplan_gather_scatter(plan_group_col)
308 call unplan_gather_scatter(plan_group_col_3d)
309
310 !! initialize gather and scatter plans used in this simulation
311 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
312 mpiSim, mpiSim%rowComm, plan_atom_row)
313 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
314 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
315 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
316 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
317 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
318 mpiSim, mpiSim%rowComm, plan_group_row)
319 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
320 mpiSim, mpiSim%rowComm, plan_group_row_3d)
321
322 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
323 mpiSim, mpiSim%columnComm, plan_atom_col)
324 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
325 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
326 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
327 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
328 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
329 mpiSim, mpiSim%columnComm, plan_group_col)
330 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
331 mpiSim, mpiSim%columnComm, plan_group_col_3d)
332
333 end subroutine replanSimParallel
334
335 !! Updates number of row and column components for long range forces.
336 subroutine updateGridComponents(thisComponentPlan, status)
337 type (mpiComponentPlan) :: thisComponentPlan !! mpiComponentPlan
338
339 !! Status return
340 !! - 0 Success
341 !! - -1 Failure
342 integer, intent(out) :: status
343 integer :: nAtomsLocal
344 integer :: nAtomsInRow = 0
345 integer :: nAtomsInColumn = 0
346 integer :: nGroupsLocal
347 integer :: nGroupsInRow = 0
348 integer :: nGroupsInColumn = 0
349 integer :: mpiErrors
350
351 status = 0
352 if (.not. componentPlanSet) return
353
354 if (thisComponentPlan%nAtomsLocal == 0) then
355 status = -1
356 return
357 endif
358 if (thisComponentPlan%nGroupsLocal == 0) then
359 write(*,*) 'tcp%ngl = ', thisComponentPlan%nGroupsLocal
360 status = -1
361 return
362 endif
363
364 nAtomsLocal = thisComponentPlan%nAtomsLocal
365 nGroupsLocal = thisComponentPlan%nGroupsLocal
366
367 call mpi_allreduce(nAtomsLocal, nAtomsInRow, 1, mpi_integer, &
368 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
369 if (mpiErrors /= 0) then
370 status = -1
371 return
372 endif
373
374 call mpi_allreduce(nAtomsLocal, nAtomsInColumn, 1, mpi_integer, &
375 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
376 if (mpiErrors /= 0) then
377 status = -1
378 return
379 endif
380
381 call mpi_allreduce(nGroupsLocal, nGroupsInRow, 1, mpi_integer, &
382 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
383 if (mpiErrors /= 0) then
384 status = -1
385 return
386 endif
387
388 call mpi_allreduce(nGroupsLocal, nGroupsInColumn, 1, mpi_integer, &
389 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
390 if (mpiErrors /= 0) then
391 status = -1
392 return
393 endif
394
395 thisComponentPlan%nAtomsInRow = nAtomsInRow
396 thisComponentPlan%nAtomsInColumn = nAtomsInColumn
397 thisComponentPlan%nGroupsInRow = nGroupsInRow
398 thisComponentPlan%nGroupsInColumn = nGroupsInColumn
399
400 end subroutine updateGridComponents
401
402
403 !! Creates a square force decomposition of processors into row and column
404 !! communicators.
405 subroutine make_Force_Grid(thisComponentPlan,status)
406 type (mpiComponentPlan) :: thisComponentPlan
407 integer, intent(out) :: status !! status returns -1 if error
408 integer :: nColumnsMax !! Maximum number of columns
409 integer :: nWorldProcessors !! Total number of processors in World comm.
410 integer :: rowIndex !! Row for this processor.
411 integer :: columnIndex !! Column for this processor.
412 integer :: nRows !! Total number of rows.
413 integer :: nColumns !! Total number of columns.
414 integer :: mpiErrors !! MPI errors.
415 integer :: rowCommunicator !! MPI row communicator.
416 integer :: columnCommunicator
417 integer :: myWorldRank
418 integer :: i
419
420
421 if (.not. ComponentPlanSet) return
422 status = 0
423
424 !! We make a dangerous assumption here that if numberProcessors is
425 !! zero, then we need to get the information from MPI.
426 if (thisComponentPlan%nProcessors == 0 ) then
427 call mpi_comm_size( MPI_COMM_WORLD, nWorldProcessors,mpiErrors)
428 if ( mpiErrors /= 0 ) then
429 status = -1
430 return
431 endif
432 call mpi_comm_rank( MPI_COMM_WORLD,myWorldRank,mpiErrors)
433 if ( mpiErrors /= 0 ) then
434 status = -1
435 return
436 endif
437
438 else
439 nWorldProcessors = thisComponentPlan%nProcessors
440 myWorldRank = thisComponentPlan%myNode
441 endif
442
443 nColumnsMax = nint(sqrt(real(nWorldProcessors,kind=dp)))
444
445 do i = 1, nColumnsMax
446 if (mod(nWorldProcessors,i) == 0) nColumns = i
447 end do
448
449 nRows = nWorldProcessors/nColumns
450
451 rowIndex = myWorldRank/nColumns
452
453 call mpi_comm_split(mpi_comm_world,rowIndex,0,rowCommunicator,mpiErrors)
454 if ( mpiErrors /= 0 ) then
455 write(errMsg, *) 'An error ',mpiErrors ,'occurred in splitting communicators'
456 call handleError("makeForceGrid", errMsg)
457 status = -1
458 return
459 endif
460
461 columnIndex = mod(myWorldRank,nColumns)
462 call mpi_comm_split(mpi_comm_world,columnIndex,0,columnCommunicator,mpiErrors)
463 if ( mpiErrors /= 0 ) then
464 write(errMsg, *) "MPI comm split faild at columnCommunicator by error ",mpiErrors
465 call handleError("makeForceGrid", errMsg)
466 status = -1
467 return
468 endif
469
470 ! Set appropriate components of thisComponentPlan
471 thisComponentPlan%rowComm = rowCommunicator
472 thisComponentPlan%columnComm = columnCommunicator
473 thisComponentPlan%rowIndex = rowIndex
474 thisComponentPlan%columnIndex = columnIndex
475 thisComponentPlan%nRows = nRows
476 thisComponentPlan%nColumns = nColumns
477
478 end subroutine make_Force_Grid
479
480 !! initalizes a gather scatter plan
481 subroutine plan_gather_scatter( nDim, nObjects, thisComponentPlan, &
482 thisComm, this_plan, status)
483 integer, intent(in) :: nDim !! Number of dimensions for gather scatter plan
484 integer, intent(in) :: nObjects
485 type (mpiComponentPlan), intent(in), target :: thisComponentPlan
486 type (gs_plan), intent(out) :: this_plan !! MPI Component Plan
487 integer, intent(in) :: thisComm !! MPI communicator for this plan
488
489 integer :: arraySize !! size to allocate plan for
490 integer, intent(out), optional :: status
491 integer :: ierror
492 integer :: i,junk
493
494 if (present(status)) status = 0
495
496 !! Set gsComponentPlan pointer
497 !! to the componet plan we want to use for this gather scatter plan.
498 !! WARNING this could be dangerous since thisComponentPlan was origionally
499 !! allocated in C++ and there is a significant difference between c and
500 !! f95 pointers....
501 this_plan%gsComponentPlan => thisComponentPlan
502
503 ! Set this plan size for displs array.
504 this_plan%gsPlanSize = nDim * nObjects
505
506 ! Duplicate communicator for this plan
507 call mpi_comm_dup(thisComm, this_plan%myPlanComm, mpi_err)
508 if (mpi_err /= 0) then
509 if (present(status)) status = -1
510 return
511 end if
512 call mpi_comm_rank(this_plan%myPlanComm, this_plan%myPlanRank, mpi_err)
513 if (mpi_err /= 0) then
514 if (present(status)) status = -1
515 return
516 end if
517
518 call mpi_comm_size(this_plan%myPlanComm, this_plan%planNprocs, mpi_err)
519
520 if (mpi_err /= 0) then
521 if (present(status)) status = -1
522 return
523 end if
524
525 !! counts and displacements arrays are indexed from 0 to be compatable
526 !! with MPI arrays.
527 allocate (this_plan%counts(0:this_plan%planNprocs-1),STAT=ierror)
528 if (ierror /= 0) then
529 if (present(status)) status = -1
530 return
531 end if
532
533 allocate (this_plan%displs(0:this_plan%planNprocs-1),STAT=ierror)
534 if (ierror /= 0) then
535 if (present(status)) status = -1
536 return
537 end if
538
539 !! gather all the local sizes into a size # processors array.
540 call mpi_allgather(this_plan%gsPlanSize,1,mpi_integer,this_plan%counts, &
541 1,mpi_integer,thisComm,mpi_err)
542
543 if (mpi_err /= 0) then
544 if (present(status)) status = -1
545 return
546 end if
547
548 !! figure out the total number of particles in this plan
549 this_plan%globalPlanSize = sum(this_plan%counts)
550
551 !! initialize plan displacements.
552 this_plan%displs(0) = 0
553 do i = 1, this_plan%planNprocs - 1,1
554 this_plan%displs(i) = this_plan%displs(i-1) + this_plan%counts(i-1)
555 end do
556 end subroutine plan_gather_scatter
557
558 subroutine unplan_gather_scatter(this_plan)
559 type (gs_plan), intent(inout) :: this_plan
560
561 this_plan%gsComponentPlan => null()
562 call mpi_comm_free(this_plan%myPlanComm,mpi_err)
563
564 deallocate(this_plan%counts)
565 deallocate(this_plan%displs)
566
567 end subroutine unplan_gather_scatter
568
569 subroutine gather_integer( sbuffer, rbuffer, this_plan, status)
570
571 type (gs_plan), intent(inout) :: this_plan
572 integer, dimension(:), intent(inout) :: sbuffer
573 integer, dimension(:), intent(inout) :: rbuffer
574 integer :: noffset
575 integer, intent(out), optional :: status
576 integer :: i
577
578 if (present(status)) status = 0
579 noffset = this_plan%displs(this_plan%myPlanRank)
580
581 ! if (getmyNode() == 1) then
582 ! write(*,*) "Node 0 printing allgatherv vars"
583 ! write(*,*) "Noffset: ", noffset
584 ! write(*,*) "PlanSize: ", this_plan%gsPlanSize
585 ! write(*,*) "PlanComm: ", this_plan%myPlanComm
586 ! end if
587
588 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_integer, &
589 rbuffer,this_plan%counts,this_plan%displs,mpi_integer, &
590 this_plan%myPlanComm, mpi_err)
591
592 if (mpi_err /= 0) then
593 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
594 call handleError("gather_integer", errMsg)
595 if (present(status)) status = -1
596 endif
597
598 end subroutine gather_integer
599
600 subroutine gather_double( sbuffer, rbuffer, this_plan, status)
601
602 type (gs_plan), intent(in) :: this_plan
603 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
604 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
605 integer :: noffset
606 integer, intent(out), optional :: status
607
608
609 if (present(status)) status = 0
610 noffset = this_plan%displs(this_plan%myPlanRank)
611 #ifdef PROFILE
612 call cpu_time(commTimeInitial)
613 #endif
614 #ifdef SINGLE_PRECISION
615 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
616 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
617 this_plan%myPlanComm, mpi_err)
618 #else
619 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
620 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
621 this_plan%myPlanComm, mpi_err)
622 #endif
623 #ifdef PROFILE
624 call cpu_time(commTimeFinal)
625 commTime = commTime + commTimeFinal - commTimeInitial
626 #endif
627
628 if (mpi_err /= 0) then
629 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
630 call handleError("gather_double", errMsg)
631 if (present(status)) status = -1
632 endif
633
634 end subroutine gather_double
635
636 subroutine gather_double_2d( sbuffer, rbuffer, this_plan, status)
637
638 type (gs_plan), intent(in) :: this_plan
639 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
640 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
641 integer :: noffset,i,ierror
642 integer, intent(out), optional :: status
643
644 external mpi_allgatherv
645
646 if (present(status)) status = 0
647
648 ! noffset = this_plan%displs(this_plan%me)
649 #ifdef PROFILE
650 call cpu_time(commTimeInitial)
651 #endif
652
653 #ifdef SINGLE_PRECISION
654 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
655 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
656 this_plan%myPlanComm, mpi_err)
657 #else
658 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
659 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
660 this_plan%myPlanComm, mpi_err)
661 #endif
662
663 #ifdef PROFILE
664 call cpu_time(commTimeFinal)
665 commTime = commTime + commTimeFinal - commTimeInitial
666 #endif
667
668 if (mpi_err /= 0) then
669 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
670 call handleError("gather_double_2d", errMsg)
671 if (present(status)) status = -1
672 endif
673
674 end subroutine gather_double_2d
675
676 subroutine scatter_double( sbuffer, rbuffer, this_plan, status)
677
678 type (gs_plan), intent(in) :: this_plan
679 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
680 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
681 integer, intent(out), optional :: status
682 external mpi_reduce_scatter
683
684 if (present(status)) status = 0
685
686 #ifdef PROFILE
687 call cpu_time(commTimeInitial)
688 #endif
689 #ifdef SINGLE_PRECISION
690 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
691 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
692 #else
693 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
694 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
695 #endif
696 #ifdef PROFILE
697 call cpu_time(commTimeFinal)
698 commTime = commTime + commTimeFinal - commTimeInitial
699 #endif
700
701 if (mpi_err /= 0) then
702 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
703 call handleError("scatter_double", errMsg)
704 if (present(status)) status = -1
705 endif
706
707 end subroutine scatter_double
708
709 subroutine scatter_double_2d( sbuffer, rbuffer, this_plan, status)
710
711 type (gs_plan), intent(in) :: this_plan
712 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
713 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
714 integer, intent(out), optional :: status
715 external mpi_reduce_scatter
716
717 if (present(status)) status = 0
718 #ifdef PROFILE
719 call cpu_time(commTimeInitial)
720 #endif
721 #ifdef SINGLE_PRECISION
722 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
723 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
724 #else
725 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
726 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
727 #endif
728 #ifdef PROFILE
729 call cpu_time(commTimeFinal)
730 commTime = commTime + commTimeFinal - commTimeInitial
731 #endif
732
733 if (mpi_err /= 0) then
734 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
735 call handleError("scatter_double_2d", errMsg)
736 if (present(status)) status = -1
737 endif
738
739 end subroutine scatter_double_2d
740
741 subroutine setAtomTags(tags, status)
742 integer, dimension(:) :: tags
743 integer :: status
744
745 integer :: alloc_stat
746
747 integer :: nAtomsInCol
748 integer :: nAtomsInRow
749 integer :: i, glob, nAtomTags, maxGlobal
750
751
752 status = 0
753 ! allocate row arrays
754 nAtomsInRow = getNatomsInRow(plan_atom_row)
755 nAtomsInCol = getNatomsInCol(plan_atom_col)
756
757 if(.not. allocated(AtomLocalToGlobal)) then
758 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
759 if (alloc_stat /= 0 ) then
760 status = -1
761 return
762 endif
763 else
764 deallocate(AtomLocalToGlobal)
765 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
766 if (alloc_stat /= 0 ) then
767 status = -1
768 return
769 endif
770
771 endif
772
773 AtomLocalToGlobal = tags
774
775 if (.not. allocated(AtomRowToGlobal)) then
776 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
777 if (alloc_stat /= 0 ) then
778 status = -1
779 return
780 endif
781 else
782 deallocate(AtomRowToGlobal)
783 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
784 if (alloc_stat /= 0 ) then
785 status = -1
786 return
787 endif
788
789 endif
790 ! allocate column arrays
791 if (.not. allocated(AtomColToGlobal)) then
792 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
793 if (alloc_stat /= 0 ) then
794 status = -1
795 return
796 endif
797 else
798 deallocate(AtomColToGlobal)
799 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
800 if (alloc_stat /= 0 ) then
801 status = -1
802 return
803 endif
804 endif
805
806 call gather(tags, AtomRowToGlobal, plan_atom_row)
807 call gather(tags, AtomColToGlobal, plan_atom_col)
808
809 nAtomTags = size(tags)
810 maxGlobal = -1
811 do i = 1, nAtomTags
812 if (tags(i).gt.maxGlobal) maxGlobal = tags(i)
813 enddo
814
815 if(.not. allocated(AtomGlobalToLocal)) then
816 allocate(AtomGlobalToLocal(maxGlobal),STAT=alloc_stat)
817 if (alloc_stat /= 0 ) then
818 status = -1
819 return
820 endif
821 else
822 deallocate(AtomGlobalToLocal)
823 allocate(AtomGlobalToLocal(maxGlobal),STAT=alloc_stat)
824 if (alloc_stat /= 0 ) then
825 status = -1
826 return
827 endif
828
829 endif
830
831 do i = 1, nAtomTags
832 glob = AtomLocalToGlobal(i)
833 AtomGlobalToLocal(glob) = i
834 enddo
835
836 end subroutine setAtomTags
837
838 subroutine setGroupTags(tags, status)
839 integer, dimension(:) :: tags
840 integer :: status
841
842 integer :: alloc_stat
843
844 integer :: nGroupsInCol
845 integer :: nGroupsInRow
846
847 status = 0
848
849 nGroupsInRow = getNgroupsInRow(plan_group_row)
850 nGroupsInCol = getNgroupsInCol(plan_group_col)
851
852 if(allocated(GroupLocalToGlobal)) then
853 deallocate(GroupLocalToGlobal)
854 endif
855 allocate(GroupLocalToGlobal(size(tags)),STAT=alloc_stat)
856 if (alloc_stat /= 0 ) then
857 status = -1
858 return
859 endif
860
861 GroupLocalToGlobal = tags
862
863 if(allocated(GroupRowToGlobal)) then
864 deallocate(GroupRowToGlobal)
865 endif
866 allocate(GroupRowToGlobal(nGroupsInRow),STAT=alloc_stat)
867 if (alloc_stat /= 0 ) then
868 status = -1
869 return
870 endif
871
872 if(allocated(GroupColToGlobal)) then
873 deallocate(GroupColToGlobal)
874 endif
875 allocate(GroupColToGlobal(nGroupsInCol),STAT=alloc_stat)
876 if (alloc_stat /= 0 ) then
877 status = -1
878 return
879 endif
880
881 call gather(tags, GroupRowToGlobal, plan_group_row)
882 call gather(tags, GroupColToGlobal, plan_group_col)
883
884 end subroutine setGroupTags
885
886 function getNatomsInCol(thisplan) result(nInCol)
887 type (gs_plan), intent(in) :: thisplan
888 integer :: nInCol
889 nInCol = thisplan%gsComponentPlan%nAtomsInColumn
890 end function getNatomsInCol
891
892 function getNatomsInRow(thisplan) result(nInRow)
893 type (gs_plan), intent(in) :: thisplan
894 integer :: nInRow
895 nInRow = thisplan%gsComponentPlan%nAtomsInRow
896 end function getNatomsInRow
897
898 function getNgroupsInCol(thisplan) result(nInCol)
899 type (gs_plan), intent(in) :: thisplan
900 integer :: nInCol
901 nInCol = thisplan%gsComponentPlan%nGroupsInColumn
902 end function getNgroupsInCol
903
904 function getNgroupsInRow(thisplan) result(nInRow)
905 type (gs_plan), intent(in) :: thisplan
906 integer :: nInRow
907 nInRow = thisplan%gsComponentPlan%nGroupsInRow
908 end function getNgroupsInRow
909
910 function isMPISimSet() result(isthisSimSet)
911 logical :: isthisSimSet
912 if (isSimSet) then
913 isthisSimSet = .true.
914 else
915 isthisSimSet = .false.
916 endif
917 end function isMPISimSet
918
919 subroutine printComponentPlan(this_plan,printNode)
920
921 type (mpiComponentPlan), intent(in) :: this_plan
922 integer, optional :: printNode
923 logical :: print_me = .false.
924
925 if (present(printNode)) then
926 if (printNode == mpiSim%myNode) print_me = .true.
927 else
928 print_me = .true.
929 endif
930
931 if (print_me) then
932 write(default_error,*) "SetupSimParallel: writing component plan"
933
934 write(default_error,*) "nMolGlobal: ", mpiSim%nMolGlobal
935 write(default_error,*) "nAtomsGlobal: ", mpiSim%nAtomsGlobal
936 write(default_error,*) "nAtomsLocal: ", mpiSim%nAtomsLocal
937 write(default_error,*) "myNode: ", mpiSim%myNode
938 write(default_error,*) "nProcessors: ", mpiSim%nProcessors
939 write(default_error,*) "rowComm: ", mpiSim%rowComm
940 write(default_error,*) "columnComm: ", mpiSim%columnComm
941 write(default_error,*) "nRows: ", mpiSim%nRows
942 write(default_error,*) "nColumns: ", mpiSim%nColumns
943 write(default_error,*) "nAtomsInRow: ", mpiSim%nAtomsInRow
944 write(default_error,*) "nAtomsInColumn: ", mpiSim%nAtomsInColumn
945 write(default_error,*) "rowIndex: ", mpiSim%rowIndex
946 write(default_error,*) "columnIndex: ", mpiSim%columnIndex
947 endif
948 end subroutine printComponentPlan
949
950 function getMyNode() result(myNode)
951 integer :: myNode
952 myNode = mpiSim%myNode
953 end function getMyNode
954
955 #ifdef PROFILE
956 subroutine printCommTime()
957 write(*,*) "MPI communication time is: ", commTime
958 end subroutine printCommTime
959
960 function getCommTime() result(comm_time)
961 real :: comm_time
962 comm_time = commTime
963 end function getCommTime
964
965 #endif
966
967 #else
968 contains
969 subroutine setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
970 nGroupTags, groupTags, status)
971 !! Passed Arguments
972 !! mpiComponentPlan struct from C
973 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
974 !! Number of tags passed
975 integer, intent(in) :: nAtomTags, nGroupTags
976 !! Result status, 0 = normal, -1 = error
977 integer, intent(out) :: status
978 integer :: localStatus
979 !! Global reference tag for local particles
980 integer, dimension(nAtomTags), intent(inout) :: atomTags
981 integer, dimension(nGroupTags), intent(inout) :: groupTags
982
983 end subroutine setupSimParallel
984
985 #endif
986
987
988 end module mpiSimulation
989

Properties

Name Value
svn:keywords Author Id Revision Date