ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/heatflux/src/UseTheForce/doForces.F90
Revision: 1681
Committed: Tue Feb 28 16:30:45 2012 UTC (13 years, 6 months ago) by chuckv
File size: 69619 byte(s)
Log Message:
More bug fixes....

File Contents

# Content
1 !!
2 !! Copyright (c) 2005, 2009 The University of Notre Dame. All Rights Reserved.
3 !!
4 !! The University of Notre Dame grants you ("Licensee") a
5 !! non-exclusive, royalty free, license to use, modify and
6 !! redistribute this software in source and binary code form, provided
7 !! that the following conditions are met:
8 !!
9 !! 1. Redistributions of source code must retain the above copyright
10 !! notice, this list of conditions and the following disclaimer.
11 !!
12 !! 2. Redistributions in binary form must reproduce the above copyright
13 !! notice, this list of conditions and the following disclaimer in the
14 !! documentation and/or other materials provided with the
15 !! distribution.
16 !!
17 !! This software is provided "AS IS," without a warranty of any
18 !! kind. All express or implied conditions, representations and
19 !! warranties, including any implied warranty of merchantability,
20 !! fitness for a particular purpose or non-infringement, are hereby
21 !! excluded. The University of Notre Dame and its licensors shall not
22 !! be liable for any damages suffered by licensee as a result of
23 !! using, modifying or distributing the software or its
24 !! derivatives. In no event will the University of Notre Dame or its
25 !! licensors be liable for any lost revenue, profit or data, or for
26 !! direct, indirect, special, consequential, incidental or punitive
27 !! damages, however caused and regardless of the theory of liability,
28 !! arising out of the use of or inability to use software, even if the
29 !! University of Notre Dame has been advised of the possibility of
30 !! such damages.
31 !!
32 !! SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 !! research, please cite the appropriate papers when you publish your
34 !! work. Good starting points are:
35 !!
36 !! [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 !! [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 !! [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 !! [4] Vardeman & Gezelter, in progress (2009).
40 !!
41
42 !! doForces.F90
43 !! module doForces
44 !! Calculates Long Range forces.
45
46 !! @author Charles F. Vardeman II
47 !! @author Matthew Meineke
48 !! @version $Id$, $Date$, $Name: not supported by cvs2svn $, $Revision$
49
50
51 module doForces
52 use force_globals
53 use fForceOptions
54 use simulation
55 use definitions
56 use atype_module
57 use switcheroo
58 use neighborLists
59 use lj
60 use sticky
61 use electrostatic_module
62 use gayberne
63 use shapes
64 use vector_class
65 use eam
66 use MetalNonMetal
67 use suttonchen
68 use status
69 #ifdef IS_MPI
70 use mpiSimulation
71 #endif
72
73 implicit none
74 PRIVATE
75
76 #define __FORTRAN90
77 #include "UseTheForce/fCutoffPolicy.h"
78 #include "UseTheForce/DarkSide/fInteractionMap.h"
79 #include "UseTheForce/DarkSide/fElectrostaticSummationMethod.h"
80
81 INTEGER, PARAMETER:: PREPAIR_LOOP = 1
82 INTEGER, PARAMETER:: PAIR_LOOP = 2
83
84 logical, save :: haveNeighborList = .false.
85 logical, save :: haveSIMvariables = .false.
86 logical, save :: haveSaneForceField = .false.
87 logical, save :: haveInteractionHash = .false.
88 logical, save :: haveGtypeCutoffMap = .false.
89 logical, save :: haveDefaultCutoffs = .false.
90 logical, save :: haveSkinThickness = .false.
91 logical, save :: haveElectrostaticSummationMethod = .false.
92 logical, save :: haveCutoffPolicy = .false.
93 logical, save :: VisitCutoffsAfterComputing = .false.
94 logical, save :: do_box_dipole = .false.
95
96 logical, save :: FF_uses_DirectionalAtoms
97 logical, save :: FF_uses_Dipoles
98 logical, save :: FF_uses_GayBerne
99 logical, save :: FF_uses_EAM
100 logical, save :: FF_uses_SC
101 logical, save :: FF_uses_MNM
102
103
104 logical, save :: SIM_uses_DirectionalAtoms
105 logical, save :: SIM_uses_EAM
106 logical, save :: SIM_uses_SC
107 logical, save :: SIM_uses_MNM
108 logical, save :: SIM_requires_postpair_calc
109 logical, save :: SIM_requires_prepair_calc
110 logical, save :: SIM_uses_PBC
111 logical, save :: SIM_uses_AtomicVirial
112
113 integer, save :: electrostaticSummationMethod
114 integer, save :: cutoffPolicy = TRADITIONAL_CUTOFF_POLICY
115
116 real(kind=dp), save :: defaultRcut, defaultRsw, largestRcut
117 real(kind=dp), save :: skinThickness
118 logical, save :: defaultDoShiftPot
119 logical, save :: defaultDoShiftFrc
120
121 public :: init_FF
122 public :: setCutoffs
123 public :: cWasLame
124 public :: setElectrostaticMethod
125 public :: setBoxDipole
126 public :: getBoxDipole
127 public :: setCutoffPolicy
128 public :: setSkinThickness
129 public :: do_force_loop
130
131 #ifdef PROFILE
132 public :: getforcetime
133 real, save :: forceTime = 0
134 real :: forceTimeInitial, forceTimeFinal
135 integer :: nLoops
136 #endif
137
138 !! Variables for cutoff mapping and interaction mapping
139 ! Bit hash to determine pair-pair interactions.
140 integer, dimension(:,:), allocatable :: InteractionHash
141 real(kind=dp), dimension(:), allocatable :: atypeMaxCutoff
142 real(kind=dp), dimension(:), allocatable, target :: groupMaxCutoffRow
143 real(kind=dp), dimension(:), pointer :: groupMaxCutoffCol
144
145 integer, dimension(:), allocatable, target :: groupToGtypeRow
146 integer, dimension(:), pointer :: groupToGtypeCol => null()
147
148 real(kind=dp), dimension(:), allocatable,target :: gtypeMaxCutoffRow
149 real(kind=dp), dimension(:), pointer :: gtypeMaxCutoffCol
150 type ::gtypeCutoffs
151 real(kind=dp) :: rcut
152 real(kind=dp) :: rcutsq
153 real(kind=dp) :: rlistsq
154 end type gtypeCutoffs
155 type(gtypeCutoffs), dimension(:,:), allocatable :: gtypeCutoffMap
156
157 real(kind=dp), dimension(3) :: boxDipole
158
159 contains
160
161 subroutine createInteractionHash()
162 integer :: nAtypes
163 integer :: i
164 integer :: j
165 integer :: iHash
166 !! Test Types
167 logical :: i_is_LJ
168 logical :: i_is_Elect
169 logical :: i_is_Sticky
170 logical :: i_is_StickyP
171 logical :: i_is_GB
172 logical :: i_is_EAM
173 logical :: i_is_Shape
174 logical :: i_is_SC
175 logical :: j_is_LJ
176 logical :: j_is_Elect
177 logical :: j_is_Sticky
178 logical :: j_is_StickyP
179 logical :: j_is_GB
180 logical :: j_is_EAM
181 logical :: j_is_Shape
182 logical :: j_is_SC
183 real(kind=dp) :: myRcut
184
185 if (.not. associated(atypes)) then
186 call handleError("doForces", "atypes was not present before call of createInteractionHash!")
187 return
188 endif
189
190 nAtypes = getSize(atypes)
191
192 if (nAtypes == 0) then
193 call handleError("doForces", "nAtypes was zero during call of createInteractionHash!")
194 return
195 end if
196
197 if (.not. allocated(InteractionHash)) then
198 allocate(InteractionHash(nAtypes,nAtypes))
199 else
200 deallocate(InteractionHash)
201 allocate(InteractionHash(nAtypes,nAtypes))
202 endif
203
204 if (.not. allocated(atypeMaxCutoff)) then
205 allocate(atypeMaxCutoff(nAtypes))
206 else
207 deallocate(atypeMaxCutoff)
208 allocate(atypeMaxCutoff(nAtypes))
209 endif
210
211 do i = 1, nAtypes
212 call getElementProperty(atypes, i, "is_LennardJones", i_is_LJ)
213 call getElementProperty(atypes, i, "is_Electrostatic", i_is_Elect)
214 call getElementProperty(atypes, i, "is_Sticky", i_is_Sticky)
215 call getElementProperty(atypes, i, "is_StickyPower", i_is_StickyP)
216 call getElementProperty(atypes, i, "is_GayBerne", i_is_GB)
217 call getElementProperty(atypes, i, "is_EAM", i_is_EAM)
218 call getElementProperty(atypes, i, "is_Shape", i_is_Shape)
219 call getElementProperty(atypes, i, "is_SC", i_is_SC)
220
221 do j = i, nAtypes
222
223 iHash = 0
224 myRcut = 0.0_dp
225
226 call getElementProperty(atypes, j, "is_LennardJones", j_is_LJ)
227 call getElementProperty(atypes, j, "is_Electrostatic", j_is_Elect)
228 call getElementProperty(atypes, j, "is_Sticky", j_is_Sticky)
229 call getElementProperty(atypes, j, "is_StickyPower", j_is_StickyP)
230 call getElementProperty(atypes, j, "is_GayBerne", j_is_GB)
231 call getElementProperty(atypes, j, "is_EAM", j_is_EAM)
232 call getElementProperty(atypes, j, "is_Shape", j_is_Shape)
233 call getElementProperty(atypes, j, "is_SC", j_is_SC)
234
235 if (i_is_LJ .and. j_is_LJ) then
236 iHash = ior(iHash, LJ_PAIR)
237 endif
238
239 if (i_is_Elect .and. j_is_Elect) then
240 iHash = ior(iHash, ELECTROSTATIC_PAIR)
241 endif
242
243 if (i_is_Sticky .and. j_is_Sticky) then
244 iHash = ior(iHash, STICKY_PAIR)
245 endif
246
247 if (i_is_StickyP .and. j_is_StickyP) then
248 iHash = ior(iHash, STICKYPOWER_PAIR)
249 endif
250
251 if (i_is_EAM .and. j_is_EAM) then
252 iHash = ior(iHash, EAM_PAIR)
253 endif
254
255 if (i_is_SC .and. j_is_SC) then
256 iHash = ior(iHash, SC_PAIR)
257 endif
258
259 if (i_is_GB .and. j_is_GB) iHash = ior(iHash, GAYBERNE_PAIR)
260 if (i_is_GB .and. j_is_LJ) iHash = ior(iHash, GAYBERNE_LJ)
261 if (i_is_LJ .and. j_is_GB) iHash = ior(iHash, GAYBERNE_LJ)
262
263 if ((i_is_EAM.or.i_is_SC).and.(.not.(j_is_EAM.or.j_is_SC))) iHash = ior(iHash, MNM_PAIR)
264 if ((j_is_EAM.or.j_is_SC).and.(.not.(i_is_EAM.or.i_is_SC))) iHash = ior(iHash, MNM_PAIR)
265
266 if (i_is_Shape .and. j_is_Shape) iHash = ior(iHash, SHAPE_PAIR)
267 if (i_is_Shape .and. j_is_LJ) iHash = ior(iHash, SHAPE_LJ)
268 if (i_is_LJ .and. j_is_Shape) iHash = ior(iHash, SHAPE_LJ)
269
270
271 InteractionHash(i,j) = iHash
272 InteractionHash(j,i) = iHash
273
274 end do
275
276 end do
277
278 haveInteractionHash = .true.
279 end subroutine createInteractionHash
280
281 subroutine createGtypeCutoffMap()
282
283 logical :: i_is_LJ
284 logical :: i_is_Elect
285 logical :: i_is_Sticky
286 logical :: i_is_StickyP
287 logical :: i_is_GB
288 logical :: i_is_EAM
289 logical :: i_is_Shape
290 logical :: i_is_SC
291 logical :: GtypeFound
292
293 integer :: myStatus, nAtypes, i, j, istart, iend, jstart, jend
294 integer :: n_in_i, me_i, ia, g, atom1, ja, n_in_j,me_j
295 integer :: nGroupsInRow
296 integer :: nGroupsInCol
297 integer :: nGroupTypesRow,nGroupTypesCol
298 real(kind=dp):: thisSigma, bigSigma, thisRcut, tradRcut, tol
299 real(kind=dp) :: biggestAtypeCutoff
300
301 if (.not. haveInteractionHash) then
302 call createInteractionHash()
303 endif
304 #ifdef IS_MPI
305 nGroupsInRow = getNgroupsInRow(plan_group_row)
306 nGroupsInCol = getNgroupsInCol(plan_group_col)
307 #endif
308 nAtypes = getSize(atypes)
309 ! Set all of the initial cutoffs to zero.
310 atypeMaxCutoff = 0.0_dp
311 biggestAtypeCutoff = 0.0_dp
312 do i = 1, nAtypes
313 if (SimHasAtype(i)) then
314 call getElementProperty(atypes, i, "is_LennardJones", i_is_LJ)
315 call getElementProperty(atypes, i, "is_Electrostatic", i_is_Elect)
316 call getElementProperty(atypes, i, "is_Sticky", i_is_Sticky)
317 call getElementProperty(atypes, i, "is_StickyPower", i_is_StickyP)
318 call getElementProperty(atypes, i, "is_GayBerne", i_is_GB)
319 call getElementProperty(atypes, i, "is_EAM", i_is_EAM)
320 call getElementProperty(atypes, i, "is_Shape", i_is_Shape)
321 call getElementProperty(atypes, i, "is_SC", i_is_SC)
322
323 if (haveDefaultCutoffs) then
324 atypeMaxCutoff(i) = defaultRcut
325 else
326 if (i_is_LJ) then
327 thisRcut = getSigma(i) * 2.5_dp
328 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
329 endif
330 if (i_is_Elect) then
331 thisRcut = defaultRcut
332 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
333 endif
334 if (i_is_Sticky) then
335 thisRcut = getStickyCut(i)
336 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
337 endif
338 if (i_is_StickyP) then
339 thisRcut = getStickyPowerCut(i)
340 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
341 endif
342 if (i_is_GB) then
343 thisRcut = getGayBerneCut(i)
344 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
345 endif
346 if (i_is_EAM) then
347 thisRcut = getEAMCut(i)
348 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
349 endif
350 if (i_is_Shape) then
351 thisRcut = getShapeCut(i)
352 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
353 endif
354 if (i_is_SC) then
355 thisRcut = getSCCut(i)
356 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
357 endif
358 endif
359
360 if (atypeMaxCutoff(i).gt.biggestAtypeCutoff) then
361 biggestAtypeCutoff = atypeMaxCutoff(i)
362 endif
363
364 endif
365 enddo
366
367 istart = 1
368 jstart = 1
369 #ifdef IS_MPI
370 iend = nGroupsInRow
371 jend = nGroupsInCol
372 #else
373 iend = nGroups
374 jend = nGroups
375 #endif
376
377 !! allocate the groupToGtype and gtypeMaxCutoff here.
378 if(.not.allocated(groupToGtypeRow)) then
379 ! allocate(groupToGtype(iend))
380 allocate(groupToGtypeRow(iend))
381 else
382 deallocate(groupToGtypeRow)
383 allocate(groupToGtypeRow(iend))
384 endif
385 if(.not.allocated(groupMaxCutoffRow)) then
386 allocate(groupMaxCutoffRow(iend))
387 else
388 deallocate(groupMaxCutoffRow)
389 allocate(groupMaxCutoffRow(iend))
390 end if
391
392 if(.not.allocated(gtypeMaxCutoffRow)) then
393 allocate(gtypeMaxCutoffRow(iend))
394 else
395 deallocate(gtypeMaxCutoffRow)
396 allocate(gtypeMaxCutoffRow(iend))
397 endif
398
399
400 #ifdef IS_MPI
401 ! We only allocate new storage if we are in MPI because Ncol /= Nrow
402 if(.not.associated(groupToGtypeCol)) then
403 allocate(groupToGtypeCol(jend))
404 else
405 deallocate(groupToGtypeCol)
406 allocate(groupToGtypeCol(jend))
407 end if
408
409 if(.not.associated(groupMaxCutoffCol)) then
410 allocate(groupMaxCutoffCol(jend))
411 else
412 deallocate(groupMaxCutoffCol)
413 allocate(groupMaxCutoffCol(jend))
414 end if
415 if(.not.associated(gtypeMaxCutoffCol)) then
416 allocate(gtypeMaxCutoffCol(jend))
417 else
418 deallocate(gtypeMaxCutoffCol)
419 allocate(gtypeMaxCutoffCol(jend))
420 end if
421
422 groupMaxCutoffCol = 0.0_dp
423 gtypeMaxCutoffCol = 0.0_dp
424
425 #endif
426 groupMaxCutoffRow = 0.0_dp
427 gtypeMaxCutoffRow = 0.0_dp
428
429
430 !! first we do a single loop over the cutoff groups to find the
431 !! largest cutoff for any atypes present in this group. We also
432 !! create gtypes at this point.
433
434 tol = 1.0e-6_dp
435 nGroupTypesRow = 0
436 nGroupTypesCol = 0
437 do i = istart, iend
438 n_in_i = groupStartRow(i+1) - groupStartRow(i)
439 groupMaxCutoffRow(i) = 0.0_dp
440 do ia = groupStartRow(i), groupStartRow(i+1)-1
441 atom1 = groupListRow(ia)
442 #ifdef IS_MPI
443 me_i = atid_row(atom1)
444 #else
445 me_i = atid(atom1)
446 #endif
447 if (atypeMaxCutoff(me_i).gt.groupMaxCutoffRow(i)) then
448 groupMaxCutoffRow(i)=atypeMaxCutoff(me_i)
449 endif
450 enddo
451 if (nGroupTypesRow.eq.0) then
452 nGroupTypesRow = nGroupTypesRow + 1
453 gtypeMaxCutoffRow(nGroupTypesRow) = groupMaxCutoffRow(i)
454 groupToGtypeRow(i) = nGroupTypesRow
455 else
456 GtypeFound = .false.
457 do g = 1, nGroupTypesRow
458 if ( abs(groupMaxCutoffRow(i) - gtypeMaxCutoffRow(g)).lt.tol) then
459 groupToGtypeRow(i) = g
460 GtypeFound = .true.
461 endif
462 enddo
463 if (.not.GtypeFound) then
464 nGroupTypesRow = nGroupTypesRow + 1
465 gtypeMaxCutoffRow(nGroupTypesRow) = groupMaxCutoffRow(i)
466 groupToGtypeRow(i) = nGroupTypesRow
467 endif
468 endif
469 enddo
470
471 #ifdef IS_MPI
472 do j = jstart, jend
473 n_in_j = groupStartCol(j+1) - groupStartCol(j)
474 groupMaxCutoffCol(j) = 0.0_dp
475 do ja = groupStartCol(j), groupStartCol(j+1)-1
476 atom1 = groupListCol(ja)
477
478 me_j = atid_col(atom1)
479
480 if (atypeMaxCutoff(me_j).gt.groupMaxCutoffCol(j)) then
481 groupMaxCutoffCol(j)=atypeMaxCutoff(me_j)
482 endif
483 enddo
484
485 if (nGroupTypesCol.eq.0) then
486 nGroupTypesCol = nGroupTypesCol + 1
487 gtypeMaxCutoffCol(nGroupTypesCol) = groupMaxCutoffCol(j)
488 groupToGtypeCol(j) = nGroupTypesCol
489 else
490 GtypeFound = .false.
491 do g = 1, nGroupTypesCol
492 if ( abs(groupMaxCutoffCol(j) - gtypeMaxCutoffCol(g)).lt.tol) then
493 groupToGtypeCol(j) = g
494 GtypeFound = .true.
495 endif
496 enddo
497 if (.not.GtypeFound) then
498 nGroupTypesCol = nGroupTypesCol + 1
499 gtypeMaxCutoffCol(nGroupTypesCol) = groupMaxCutoffCol(j)
500 groupToGtypeCol(j) = nGroupTypesCol
501 endif
502 endif
503 enddo
504
505 #else
506 ! Set pointers to information we just found
507 nGroupTypesCol = nGroupTypesRow
508 groupToGtypeCol => groupToGtypeRow
509 gtypeMaxCutoffCol => gtypeMaxCutoffRow
510 groupMaxCutoffCol => groupMaxCutoffRow
511 #endif
512
513 !! allocate the gtypeCutoffMap here.
514 allocate(gtypeCutoffMap(nGroupTypesRow,nGroupTypesCol))
515 !! then we do a double loop over all the group TYPES to find the cutoff
516 !! map between groups of two types
517 tradRcut = max(maxval(gtypeMaxCutoffRow),maxval(gtypeMaxCutoffCol))
518
519 do i = 1, nGroupTypesRow
520 do j = 1, nGroupTypesCol
521
522 select case(cutoffPolicy)
523 case(TRADITIONAL_CUTOFF_POLICY)
524 thisRcut = tradRcut
525 case(MIX_CUTOFF_POLICY)
526 thisRcut = 0.5_dp * (gtypeMaxCutoffRow(i) + gtypeMaxCutoffCol(j))
527 case(MAX_CUTOFF_POLICY)
528 thisRcut = max(gtypeMaxCutoffRow(i), gtypeMaxCutoffCol(j))
529 case default
530 call handleError("createGtypeCutoffMap", "Unknown Cutoff Policy")
531 return
532 end select
533 gtypeCutoffMap(i,j)%rcut = thisRcut
534
535 if (thisRcut.gt.largestRcut) largestRcut = thisRcut
536
537 gtypeCutoffMap(i,j)%rcutsq = thisRcut*thisRcut
538
539 if (.not.haveSkinThickness) then
540 skinThickness = 1.0_dp
541 endif
542
543 gtypeCutoffMap(i,j)%rlistsq = (thisRcut + skinThickness)**2
544
545 ! sanity check
546
547 if (haveDefaultCutoffs) then
548 if (abs(gtypeCutoffMap(i,j)%rcut - defaultRcut).gt.0.0001) then
549 call handleError("createGtypeCutoffMap", "user-specified rCut does not match computed group Cutoff")
550 endif
551 endif
552 enddo
553 enddo
554
555 if(allocated(gtypeMaxCutoffRow)) deallocate(gtypeMaxCutoffRow)
556 if(allocated(groupMaxCutoffRow)) deallocate(groupMaxCutoffRow)
557 if(allocated(atypeMaxCutoff)) deallocate(atypeMaxCutoff)
558 #ifdef IS_MPI
559 if(associated(groupMaxCutoffCol)) deallocate(groupMaxCutoffCol)
560 if(associated(gtypeMaxCutoffCol)) deallocate(gtypeMaxCutoffCol)
561 #endif
562 groupMaxCutoffCol => null()
563 gtypeMaxCutoffCol => null()
564
565 haveGtypeCutoffMap = .true.
566 end subroutine createGtypeCutoffMap
567
568 subroutine setCutoffs(defRcut, defRsw, defSP, defSF)
569
570 real(kind=dp),intent(in) :: defRcut, defRsw
571 integer, intent(in) :: defSP, defSF
572 character(len = statusMsgSize) :: errMsg
573 integer :: localError
574
575 defaultRcut = defRcut
576 defaultRsw = defRsw
577
578 if (defSP .ne. 0) then
579 defaultDoShiftPot = .true.
580 else
581 defaultDoShiftPot = .false.
582 endif
583 if (defSF .ne. 0) then
584 defaultDoShiftFrc = .true.
585 else
586 defaultDoShiftFrc = .false.
587 endif
588
589 if (abs(defaultRcut-defaultRsw) .lt. 0.0001) then
590 if (defaultDoShiftFrc) then
591 write(errMsg, *) &
592 'cutoffRadius and switchingRadius are set to the', newline &
593 // tab, 'same value. OpenMD will use shifted force', newline &
594 // tab, 'potentials instead of switching functions.'
595
596 call handleInfo("setCutoffs", errMsg)
597 else
598 write(errMsg, *) &
599 'cutoffRadius and switchingRadius are set to the', newline &
600 // tab, 'same value. OpenMD will use shifted', newline &
601 // tab, 'potentials instead of switching functions.'
602
603 call handleInfo("setCutoffs", errMsg)
604
605 defaultDoShiftPot = .true.
606 endif
607
608 endif
609
610 localError = 0
611 call setLJDefaultCutoff( defaultRcut, defaultDoShiftPot, &
612 defaultDoShiftFrc )
613 call setElectrostaticCutoffRadius( defaultRcut, defaultRsw )
614 call setCutoffEAM( defaultRcut )
615 call setCutoffSC( defaultRcut )
616 call setMnMDefaultCutoff( defaultRcut, defaultDoShiftPot, &
617 defaultDoShiftFrc )
618 call set_switch(defaultRsw, defaultRcut)
619 call setHmatDangerousRcutValue(defaultRcut)
620
621 haveDefaultCutoffs = .true.
622 haveGtypeCutoffMap = .false.
623
624 end subroutine setCutoffs
625
626 subroutine cWasLame()
627
628 VisitCutoffsAfterComputing = .true.
629 return
630
631 end subroutine cWasLame
632
633 subroutine setCutoffPolicy(cutPolicy)
634
635 integer, intent(in) :: cutPolicy
636
637 cutoffPolicy = cutPolicy
638 haveCutoffPolicy = .true.
639 haveGtypeCutoffMap = .false.
640
641 end subroutine setCutoffPolicy
642
643 subroutine setBoxDipole()
644
645 do_box_dipole = .true.
646
647 end subroutine setBoxDipole
648
649 subroutine getBoxDipole( box_dipole )
650
651 real(kind=dp), intent(inout), dimension(3) :: box_dipole
652
653 box_dipole = boxDipole
654
655 end subroutine getBoxDipole
656
657 subroutine setElectrostaticMethod( thisESM )
658
659 integer, intent(in) :: thisESM
660
661 electrostaticSummationMethod = thisESM
662 haveElectrostaticSummationMethod = .true.
663
664 end subroutine setElectrostaticMethod
665
666 subroutine setSkinThickness( thisSkin )
667
668 real(kind=dp), intent(in) :: thisSkin
669
670 skinThickness = thisSkin
671 haveSkinThickness = .true.
672 haveGtypeCutoffMap = .false.
673
674 end subroutine setSkinThickness
675
676 subroutine setSimVariables()
677 SIM_uses_DirectionalAtoms = SimUsesDirectionalAtoms()
678 SIM_uses_EAM = SimUsesEAM()
679 SIM_requires_postpair_calc = SimRequiresPostpairCalc()
680 SIM_requires_prepair_calc = SimRequiresPrepairCalc()
681 SIM_uses_PBC = SimUsesPBC()
682 SIM_uses_SC = SimUsesSC()
683 SIM_uses_AtomicVirial = SimUsesAtomicVirial()
684
685 haveSIMvariables = .true.
686
687 return
688 end subroutine setSimVariables
689
690 subroutine doReadyCheck(error)
691 integer, intent(out) :: error
692 integer :: myStatus
693
694 error = 0
695
696 if (.not. haveInteractionHash) then
697 call createInteractionHash()
698 endif
699
700 if (.not. haveGtypeCutoffMap) then
701 call createGtypeCutoffMap()
702 endif
703
704 if (VisitCutoffsAfterComputing) then
705 call set_switch(largestRcut, largestRcut)
706 call setHmatDangerousRcutValue(largestRcut)
707 call setCutoffEAM(largestRcut)
708 call setCutoffSC(largestRcut)
709 VisitCutoffsAfterComputing = .false.
710 endif
711
712 if (.not. haveSIMvariables) then
713 call setSimVariables()
714 endif
715
716 if (.not. haveNeighborList) then
717 write(default_error, *) 'neighbor list has not been initialized in doForces!'
718 error = -1
719 return
720 end if
721
722 if (.not. haveSaneForceField) then
723 write(default_error, *) 'Force Field is not sane in doForces!'
724 error = -1
725 return
726 end if
727
728 #ifdef IS_MPI
729 if (.not. isMPISimSet()) then
730 write(default_error,*) "ERROR: mpiSimulation has not been initialized!"
731 error = -1
732 return
733 endif
734 #endif
735 return
736 end subroutine doReadyCheck
737
738
739 subroutine init_FF(thisStat)
740
741 integer, intent(out) :: thisStat
742 integer :: my_status, nMatches
743 integer, pointer :: MatchList(:) => null()
744
745 !! assume things are copacetic, unless they aren't
746 thisStat = 0
747
748 !! init_FF is called *after* all of the atom types have been
749 !! defined in atype_module using the new_atype subroutine.
750 !!
751 !! this will scan through the known atypes and figure out what
752 !! interactions are used by the force field.
753
754 FF_uses_DirectionalAtoms = .false.
755 FF_uses_Dipoles = .false.
756 FF_uses_GayBerne = .false.
757 FF_uses_EAM = .false.
758 FF_uses_SC = .false.
759
760 call getMatchingElementList(atypes, "is_Directional", .true., &
761 nMatches, MatchList)
762 if (nMatches .gt. 0) FF_uses_DirectionalAtoms = .true.
763
764 call getMatchingElementList(atypes, "is_Dipole", .true., &
765 nMatches, MatchList)
766 if (nMatches .gt. 0) FF_uses_Dipoles = .true.
767
768 call getMatchingElementList(atypes, "is_GayBerne", .true., &
769 nMatches, MatchList)
770 if (nMatches .gt. 0) FF_uses_GayBerne = .true.
771
772 call getMatchingElementList(atypes, "is_EAM", .true., nMatches, MatchList)
773 if (nMatches .gt. 0) FF_uses_EAM = .true.
774
775 call getMatchingElementList(atypes, "is_SC", .true., nMatches, MatchList)
776 if (nMatches .gt. 0) FF_uses_SC = .true.
777
778
779 haveSaneForceField = .true.
780
781
782 if (.not. haveNeighborList) then
783 !! Create neighbor lists
784 call expandNeighborList(nLocal, my_status)
785 if (my_Status /= 0) then
786 write(default_error,*) "SimSetup: ExpandNeighborList returned error."
787 thisStat = -1
788 return
789 endif
790 haveNeighborList = .true.
791 endif
792
793 end subroutine init_FF
794
795
796 !! Does force loop over i,j pairs. Calls do_pair to calculates forces.
797 !------------------------------------------------------------->
798 subroutine do_force_loop(q,vel, q_group, A, eFrame, f, t, tau, S, pot, particle_pot, &
799 error)
800 !! Position array provided by C, dimensioned by getNlocal
801 real ( kind = dp ), dimension(3, nLocal) :: q
802 real ( kind = dp ), dimension(3, nlocal) :: vel
803 !! molecular center-of-mass position array
804 real ( kind = dp ), dimension(3, nGroups) :: q_group
805 !! Rotation Matrix for each long range particle in simulation.
806 real( kind = dp), dimension(9, nLocal) :: A
807 !! Unit vectors for dipoles (lab frame)
808 real( kind = dp ), dimension(9,nLocal) :: eFrame
809 !! Force array provided by C, dimensioned by getNlocal
810 real ( kind = dp ), dimension(3,nLocal) :: f
811 !! Torsion array provided by C, dimensioned by getNlocal
812 real( kind = dp ), dimension(3,nLocal) :: t
813
814 !! Stress Tensor
815 real( kind = dp), dimension(9) :: tau
816 !! Heat Flux component S
817 real(kind=dp), dimension(3) :: S
818 real ( kind = dp ),dimension(LR_POT_TYPES) :: pot
819 real( kind = dp ), dimension(nLocal) :: particle_pot
820
821 logical :: in_switching_region
822 #ifdef IS_MPI
823 real( kind = DP ), dimension(LR_POT_TYPES) :: pot_local
824 integer :: nAtomsInRow
825 integer :: nAtomsInCol
826 integer :: nprocs
827 integer :: nGroupsInRow
828 integer :: nGroupsInCol
829 #endif
830 integer :: natoms
831 logical :: update_nlist
832 integer :: i, j, jstart, jend, jnab
833 integer :: istart, iend
834 integer :: ia, jb, atom1, atom2
835 integer :: nlist
836 real( kind = DP ) :: ratmsq, rgrpsq, rgrp, rag, vpair, vij
837 real( kind = DP ) :: sw, dswdr, swderiv, mf
838 real( kind = DP ) :: rVal
839 real(kind=dp),dimension(3) :: d_atm, d_grp, fpair, fij, fg, dag, vel_j
840 real(kind=dp),dimension(3) :: S_local,vel_grp_j
841 real(kind=dp) :: rfpot, mu_i
842 real(kind=dp):: rCut
843 integer :: me_i, me_j, n_in_i, n_in_j, iG, j1
844 logical :: is_dp_i
845 integer :: neighborListSize
846 integer :: listerror, error
847 integer :: localError
848 integer :: propPack_i, propPack_j
849 integer :: loopStart, loopEnd, loop
850 integer :: iHash, jHash
851 integer :: i1, topoDist
852
853 !! the variables for the box dipole moment
854 #ifdef IS_MPI
855 integer :: pChgCount_local
856 integer :: nChgCount_local
857 real(kind=dp) :: pChg_local
858 real(kind=dp) :: nChg_local
859 real(kind=dp), dimension(3) :: pChgPos_local
860 real(kind=dp), dimension(3) :: nChgPos_local
861 real(kind=dp), dimension(3) :: dipVec_local
862 #endif
863 integer :: pChgCount
864 integer :: nChgCount
865 real(kind=dp) :: pChg
866 real(kind=dp) :: nChg
867 real(kind=dp) :: chg_value
868 real(kind=dp), dimension(3) :: pChgPos
869 real(kind=dp), dimension(3) :: nChgPos
870 real(kind=dp), dimension(3) :: dipVec
871 real(kind=dp), dimension(3) :: chgVec
872 real(kind=dp) :: skch
873
874 !! initialize a dummy group velocity variable
875 vel_j = 0.0_dp
876 vel_grp_j = 0.0_dp
877 S_local = 0.0_dp
878
879 !! initialize box dipole variables
880 if (do_box_dipole) then
881 #ifdef IS_MPI
882 pChg_local = 0.0_dp
883 nChg_local = 0.0_dp
884 pChgCount_local = 0
885 nChgCount_local = 0
886 do i=1, 3
887 pChgPos_local = 0.0_dp
888 nChgPos_local = 0.0_dp
889 dipVec_local = 0.0_dp
890 enddo
891 #endif
892 pChg = 0.0_dp
893 nChg = 0.0_dp
894 pChgCount = 0
895 nChgCount = 0
896 chg_value = 0.0_dp
897
898 do i=1, 3
899 pChgPos(i) = 0.0_dp
900 nChgPos(i) = 0.0_dp
901 dipVec(i) = 0.0_dp
902 chgVec(i) = 0.0_dp
903 boxDipole(i) = 0.0_dp
904 enddo
905 endif
906
907 !! initialize local variables
908 S_local = 0.0_dp
909
910 #ifdef IS_MPI
911 pot_local = 0.0_dp
912 nAtomsInRow = getNatomsInRow(plan_atom_row)
913 nAtomsInCol = getNatomsInCol(plan_atom_col)
914 nGroupsInRow = getNgroupsInRow(plan_group_row)
915 nGroupsInCol = getNgroupsInCol(plan_group_col)
916 #else
917 natoms = nlocal
918 #endif
919
920 call doReadyCheck(localError)
921 if ( localError .ne. 0 ) then
922 call handleError("do_force_loop", "Not Initialized")
923 error = -1
924 return
925 end if
926 call zero_work_arrays()
927
928 ! Gather all information needed by all force loops
929
930 #ifdef IS_MPI
931
932 call gather(q, q_Row, plan_atom_row_3d)
933 call gather(q, q_Col, plan_atom_col_3d)
934
935
936
937 call gather(vel, vel_Col, plan_atom_col_3d)
938
939 call gather(q_group, q_group_Row, plan_group_row_3d)
940 call gather(q_group, q_group_Col, plan_group_col_3d)
941
942 if (FF_UsesDirectionalAtoms() .and. SIM_uses_DirectionalAtoms) then
943 call gather(eFrame, eFrame_Row, plan_atom_row_rotation)
944 call gather(eFrame, eFrame_Col, plan_atom_col_rotation)
945
946 call gather(A, A_Row, plan_atom_row_rotation)
947 call gather(A, A_Col, plan_atom_col_rotation)
948 endif
949
950 #endif
951
952 !! Begin force loop timing:
953 #ifdef PROFILE
954 call cpu_time(forceTimeInitial)
955 nloops = nloops + 1
956 #endif
957
958 loopEnd = PAIR_LOOP
959 if (FF_RequiresPrepairCalc() .and. SIM_requires_prepair_calc) then
960 loopStart = PREPAIR_LOOP
961 else
962 loopStart = PAIR_LOOP
963 endif
964
965 do loop = loopStart, loopEnd
966
967 ! See if we need to update neighbor lists
968 ! (but only on the first time through):
969 if (loop .eq. loopStart) then
970 #ifdef IS_MPI
971 call checkNeighborList(nGroupsInRow, q_group_row, skinThickness, &
972 update_nlist)
973 #else
974 call checkNeighborList(nGroups, q_group, skinThickness, &
975 update_nlist)
976 #endif
977 endif
978
979 if (update_nlist) then
980 !! save current configuration and construct neighbor list
981 #ifdef IS_MPI
982 call saveNeighborList(nGroupsInRow, q_group_row)
983 #else
984 call saveNeighborList(nGroups, q_group)
985 #endif
986 neighborListSize = size(list)
987 nlist = 0
988 endif
989
990 istart = 1
991 #ifdef IS_MPI
992 iend = nGroupsInRow
993 #else
994 iend = nGroups - 1
995 #endif
996 outer: do i = istart, iend
997
998 if (update_nlist) point(i) = nlist + 1
999
1000 n_in_i = groupStartRow(i+1) - groupStartRow(i)
1001
1002 if (update_nlist) then
1003 #ifdef IS_MPI
1004 jstart = 1
1005 jend = nGroupsInCol
1006 #else
1007 jstart = i+1
1008 jend = nGroups
1009 #endif
1010 else
1011 jstart = point(i)
1012 jend = point(i+1) - 1
1013 ! make sure group i has neighbors
1014 if (jstart .gt. jend) cycle outer
1015 endif
1016
1017 do jnab = jstart, jend
1018 if (update_nlist) then
1019 j = jnab
1020 else
1021 j = list(jnab)
1022 endif
1023
1024 #ifdef IS_MPI
1025 me_j = atid_col(j)
1026 call get_interatomic_vector(q_group_Row(:,i), &
1027 q_group_Col(:,j), d_grp, rgrpsq)
1028 #else
1029 me_j = atid(j)
1030 call get_interatomic_vector(q_group(:,i), &
1031 q_group(:,j), d_grp, rgrpsq)
1032 #endif
1033
1034 if (rgrpsq < gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rListsq) then
1035 if (update_nlist) then
1036 nlist = nlist + 1
1037
1038 if (nlist > neighborListSize) then
1039 #ifdef IS_MPI
1040 call expandNeighborList(nGroupsInRow, listerror)
1041 #else
1042 call expandNeighborList(nGroups, listerror)
1043 #endif
1044 if (listerror /= 0) then
1045 error = -1
1046 write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded."
1047 return
1048 end if
1049 neighborListSize = size(list)
1050 endif
1051
1052 list(nlist) = j
1053 endif
1054
1055 if (rgrpsq < gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rCutsq) then
1056
1057 rCut = gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rCut
1058 if (loop .eq. PAIR_LOOP) then
1059 vij = 0.0_dp
1060 fij(1) = 0.0_dp
1061 fij(2) = 0.0_dp
1062 fij(3) = 0.0_dp
1063 endif
1064
1065 call get_switch(rgrpsq, sw, dswdr,rgrp, in_switching_region)
1066
1067 n_in_j = groupStartCol(j+1) - groupStartCol(j)
1068
1069 do ia = groupStartRow(i), groupStartRow(i+1)-1
1070
1071 atom1 = groupListRow(ia)
1072
1073 inner: do jb = groupStartCol(j), groupStartCol(j+1)-1
1074
1075 atom2 = groupListCol(jb)
1076
1077 if (skipThisPair(atom1, atom2)) cycle inner
1078
1079 if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then
1080 d_atm(1) = d_grp(1)
1081 d_atm(2) = d_grp(2)
1082 d_atm(3) = d_grp(3)
1083 ratmsq = rgrpsq
1084 else
1085 #ifdef IS_MPI
1086 call get_interatomic_vector(q_Row(:,atom1), &
1087 q_Col(:,atom2), d_atm, ratmsq)
1088 #else
1089 call get_interatomic_vector(q(:,atom1), &
1090 q(:,atom2), d_atm, ratmsq)
1091 #endif
1092 endif
1093
1094 topoDist = getTopoDistance(atom1, atom2)
1095
1096
1097 if (loop .eq. PREPAIR_LOOP) then
1098 #ifdef IS_MPI
1099 call do_prepair(atom1, atom2, ratmsq, d_atm, sw, &
1100 rgrpsq, d_grp, rCut, &
1101 eFrame, A, f, t, pot_local)
1102 #else
1103 call do_prepair(atom1, atom2, ratmsq, d_atm, sw, &
1104 rgrpsq, d_grp, rCut, &
1105 eFrame, A, f, t, pot)
1106 #endif
1107 else
1108 #ifdef IS_MPI
1109 call do_pair(atom1, atom2, ratmsq, d_atm, sw, &
1110 eFrame, A, f, t, pot_local, particle_pot, vpair, &
1111 fpair, d_grp, rgrp, rCut, topoDist)
1112 vel_j = vel_Col(:,atom2)
1113 ! particle_pot will be accumulated from row & column
1114 ! arrays later
1115 #else
1116 call do_pair(atom1, atom2, ratmsq, d_atm, sw, &
1117 eFrame, A, f, t, pot, particle_pot, vpair, &
1118 fpair, d_grp, rgrp, rCut, topoDist)
1119 vel_j = vel(:,atom2)
1120 #endif
1121
1122 vij = vij + vpair
1123 fij(1) = fij(1) + fpair(1)
1124 fij(2) = fij(2) + fpair(2)
1125 fij(3) = fij(3) + fpair(3)
1126 !good
1127 call add_stress_tensor(d_atm, fpair, tau, vel_j, S_local)
1128 !!call add_heat_flux(d_atm, fpair,vel_j,S_local)
1129 endif
1130 enddo inner
1131 enddo
1132
1133 if (loop .eq. PAIR_LOOP) then
1134 if (in_switching_region) then
1135 swderiv = vij*dswdr/rgrp
1136 fg = swderiv*d_grp
1137 fij(1) = fij(1) + fg(1)
1138 fij(2) = fij(2) + fg(2)
1139 fij(3) = fij(3) + fg(3)
1140
1141 if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then
1142 ! call add_stress_tensor(d_atm, fg, tau)
1143 call add_stress_tensor(d_atm, fg, tau, vel_j, S_local)
1144 endif
1145
1146 do ia=groupStartRow(i), groupStartRow(i+1)-1
1147 atom1=groupListRow(ia)
1148 mf = mfactRow(atom1)
1149 ! fg is the force on atom ia due to cutoff group's
1150 ! presence in switching region
1151 fg = swderiv*d_grp*mf
1152 #ifdef IS_MPI
1153 f_Row(1,atom1) = f_Row(1,atom1) + fg(1)
1154 f_Row(2,atom1) = f_Row(2,atom1) + fg(2)
1155 f_Row(3,atom1) = f_Row(3,atom1) + fg(3)
1156 #else
1157 f(1,atom1) = f(1,atom1) + fg(1)
1158 f(2,atom1) = f(2,atom1) + fg(2)
1159 f(3,atom1) = f(3,atom1) + fg(3)
1160 #endif
1161 if (n_in_i .gt. 1) then
1162 if (SIM_uses_AtomicVirial) then
1163 ! find the distance between the atom
1164 ! and the center of the cutoff group:
1165 #ifdef IS_MPI
1166 call get_interatomic_vector(q_Row(:,atom1), &
1167 q_group_Row(:,i), dag, rag)
1168 #else
1169 call get_interatomic_vector(q(:,atom1), &
1170 q_group(:,i), dag, rag)
1171 #endif
1172
1173 call add_stress_tensor(dag, fg, tau, vel_grp_j, S_local)
1174 !
1175 !call add_stress_tensor(dag,fg,tau)
1176
1177 endif
1178 endif
1179 enddo
1180
1181 do jb=groupStartCol(j), groupStartCol(j+1)-1
1182 atom2=groupListCol(jb)
1183 mf = mfactCol(atom2)
1184 ! fg is the force on atom jb due to cutoff group's
1185 ! presence in switching region
1186 fg = -swderiv*d_grp*mf
1187 #ifdef IS_MPI
1188 f_Col(1,atom2) = f_Col(1,atom2) + fg(1)
1189 f_Col(2,atom2) = f_Col(2,atom2) + fg(2)
1190 f_Col(3,atom2) = f_Col(3,atom2) + fg(3)
1191 #else
1192 f(1,atom2) = f(1,atom2) + fg(1)
1193 f(2,atom2) = f(2,atom2) + fg(2)
1194 f(3,atom2) = f(3,atom2) + fg(3)
1195 #endif
1196 if (n_in_j .gt. 1) then
1197 if (SIM_uses_AtomicVirial) then
1198 ! find the distance between the atom
1199 ! and the center of the cutoff group:
1200 #ifdef IS_MPI
1201 call get_interatomic_vector(q_Col(:,atom2), &
1202 q_group_Col(:,j), dag, rag)
1203 vel_j = vel_Col(:,atom2)
1204 #else
1205 call get_interatomic_vector(q(:,atom2), &
1206 q_group(:,j), dag, rag)
1207 vel_j = vel(:,atom2)
1208 #endif
1209 ! call add_stress_tensor(dag,fg,tau)
1210
1211 call add_stress_tensor(dag, fg, tau, vel_grp_j, S_local)
1212 ! call add_heat_flux(d_atm, fpair,vel_j,S_local)
1213 endif
1214 endif
1215 enddo
1216 endif
1217 !if (.not.SIM_uses_AtomicVirial) then
1218 ! call add_stress_tensor(d_grp, fij, tau)
1219 !endif
1220 endif
1221 endif
1222 endif
1223 enddo
1224
1225 enddo outer
1226
1227 if (update_nlist) then
1228 #ifdef IS_MPI
1229 point(nGroupsInRow + 1) = nlist + 1
1230 #else
1231 point(nGroups) = nlist + 1
1232 #endif
1233 if (loop .eq. PREPAIR_LOOP) then
1234 ! we just did the neighbor list update on the first
1235 ! pass, so we don't need to do it
1236 ! again on the second pass
1237 update_nlist = .false.
1238 endif
1239 endif
1240
1241 if (loop .eq. PREPAIR_LOOP) then
1242 #ifdef IS_MPI
1243 call do_preforce(nlocal, pot_local, particle_pot)
1244 #else
1245 call do_preforce(nlocal, pot, particle_pot)
1246 #endif
1247 endif
1248
1249 enddo
1250
1251 !! Do timing
1252 #ifdef PROFILE
1253 call cpu_time(forceTimeFinal)
1254 forceTime = forceTime + forceTimeFinal - forceTimeInitial
1255 #endif
1256
1257 #ifdef IS_MPI
1258 !!distribute forces
1259
1260 f_temp = 0.0_dp
1261 call scatter(f_Row,f_temp,plan_atom_row_3d)
1262 do i = 1,nlocal
1263 f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
1264 end do
1265
1266 f_temp = 0.0_dp
1267 call scatter(f_Col,f_temp,plan_atom_col_3d)
1268 do i = 1,nlocal
1269 f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
1270 end do
1271
1272 if (FF_UsesDirectionalAtoms() .and. SIM_uses_DirectionalAtoms) then
1273 t_temp = 0.0_dp
1274 call scatter(t_Row,t_temp,plan_atom_row_3d)
1275 do i = 1,nlocal
1276 t(1:3,i) = t(1:3,i) + t_temp(1:3,i)
1277 end do
1278 t_temp = 0.0_dp
1279 call scatter(t_Col,t_temp,plan_atom_col_3d)
1280
1281 do i = 1,nlocal
1282 t(1:3,i) = t(1:3,i) + t_temp(1:3,i)
1283 end do
1284 endif
1285
1286 ! scatter/gather pot_row into the members of my column
1287 do i = 1,LR_POT_TYPES
1288 call scatter(pot_Row(i,:), pot_Temp(i,:), plan_atom_row)
1289 end do
1290 ! scatter/gather pot_local into all other procs
1291 ! add resultant to get total pot
1292 do i = 1, nlocal
1293 pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES) &
1294 + pot_Temp(1:LR_POT_TYPES,i)
1295 enddo
1296
1297 ! factor of two is because the total potential terms are divided by 2 in parallel
1298 ! due to row/ column scatter
1299 do i = 1,LR_POT_TYPES
1300 particle_pot(1:nlocal) = particle_pot(1:nlocal) + 2.0 * pot_Temp(i,1:nlocal)
1301 enddo
1302
1303
1304 pot_Temp = 0.0_DP
1305
1306 do i = 1,LR_POT_TYPES
1307 call scatter(pot_Col(i,:), pot_Temp(i,:), plan_atom_col)
1308 end do
1309
1310 do i = 1, nlocal
1311 pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES)&
1312 + pot_Temp(1:LR_POT_TYPES,i)
1313 enddo
1314
1315 ! factor of two is because the total potential terms are divided by 2 in parallel
1316 ! due to row/ column scatter
1317 do i = 1,LR_POT_TYPES
1318 particle_pot(1:nlocal) = particle_pot(1:nlocal) + 2.0 * pot_Temp(i,1:nlocal)
1319 enddo
1320
1321 ppot_Temp = 0.0_DP
1322
1323 call scatter(ppot_Row(:), ppot_Temp(:), plan_atom_row)
1324 do i = 1, nlocal
1325 particle_pot(i) = particle_pot(i) + ppot_Temp(i)
1326 enddo
1327
1328 ppot_Temp = 0.0_DP
1329
1330 call scatter(ppot_Col(:), ppot_Temp(:), plan_atom_col)
1331 do i = 1, nlocal
1332 particle_pot(i) = particle_pot(i) + ppot_Temp(i)
1333 enddo
1334
1335 !! In parallel we need to accumulate S for the entire system
1336
1337 call mpi_allreduce(S, S_local, 3, mpi_double_precision, mpi_sum, &
1338 plan_atom_col%myPlanComm, mpi_err)
1339 #else
1340 S = S_Local
1341 #endif
1342
1343
1344
1345 if (SIM_requires_postpair_calc) then
1346 do i = 1, nlocal
1347
1348 ! we loop only over the local atoms, so we don't need row and column
1349 ! lookups for the types
1350
1351 me_i = atid(i)
1352
1353 ! is the atom electrostatic? See if it would have an
1354 ! electrostatic interaction with itself
1355 iHash = InteractionHash(me_i,me_i)
1356
1357 if ( iand(iHash, ELECTROSTATIC_PAIR).ne.0 ) then
1358
1359 ! loop over the excludes to accumulate charge in the
1360 ! cutoff sphere that we've left out of the normal pair loop
1361 skch = 0.0_dp
1362
1363 do i1 = 1, nSkipsForLocalAtom(i)
1364 j = skipsForLocalAtom(i, i1)
1365 me_j = atid(j)
1366 jHash = InteractionHash(me_i,me_j)
1367 if ( iand(jHash, ELECTROSTATIC_PAIR).ne.0 ) then
1368 skch = skch + getCharge(me_j)
1369 endif
1370 enddo
1371
1372 #ifdef IS_MPI
1373 call self_self(i, eFrame, skch, pot_local(ELECTROSTATIC_POT), t)
1374 #else
1375 call self_self(i, eFrame, skch, pot(ELECTROSTATIC_POT), t)
1376 #endif
1377 endif
1378
1379
1380 if (electrostaticSummationMethod.eq.REACTION_FIELD) then
1381
1382 ! loop over the excludes to accumulate RF stuff we've
1383 ! left out of the normal pair loop
1384
1385 do i1 = 1, nSkipsForLocalAtom(i)
1386 j = skipsForLocalAtom(i, i1)
1387
1388 ! prevent overcounting of the skips
1389 if (i.lt.j) then
1390 call get_interatomic_vector(q(:,i), q(:,j), d_atm, ratmsq)
1391 rVal = sqrt(ratmsq)
1392 call get_switch(ratmsq, sw, dswdr, rVal,in_switching_region)
1393 #ifdef IS_MPI
1394 call rf_self_excludes(i, j, sw, 1.0_dp, eFrame, d_atm, rVal, &
1395 vpair, pot_local(ELECTROSTATIC_POT), f, t)
1396 #else
1397 call rf_self_excludes(i, j, sw, 1.0_dp, eFrame, d_atm, rVal, &
1398 vpair, pot(ELECTROSTATIC_POT), f, t)
1399 #endif
1400 endif
1401 enddo
1402 endif
1403
1404 if (do_box_dipole) then
1405 #ifdef IS_MPI
1406 call accumulate_box_dipole(i, eFrame, q(:,i), pChg_local, &
1407 nChg_local, pChgPos_local, nChgPos_local, dipVec_local, &
1408 pChgCount_local, nChgCount_local)
1409 #else
1410 call accumulate_box_dipole(i, eFrame, q(:,i), pChg, nChg, &
1411 pChgPos, nChgPos, dipVec, pChgCount, nChgCount)
1412 #endif
1413 endif
1414 enddo
1415 endif
1416
1417 #ifdef IS_MPI
1418 #ifdef SINGLE_PRECISION
1419 call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_real,mpi_sum, &
1420 mpi_comm_world,mpi_err)
1421 #else
1422 call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_double_precision, &
1423 mpi_sum, mpi_comm_world,mpi_err)
1424 #endif
1425
1426 if (do_box_dipole) then
1427
1428 #ifdef SINGLE_PRECISION
1429 call mpi_allreduce(pChg_local, pChg, 1, mpi_real, mpi_sum, &
1430 mpi_comm_world, mpi_err)
1431 call mpi_allreduce(nChg_local, nChg, 1, mpi_real, mpi_sum, &
1432 mpi_comm_world, mpi_err)
1433 call mpi_allreduce(pChgCount_local, pChgCount, 1, mpi_integer, mpi_sum,&
1434 mpi_comm_world, mpi_err)
1435 call mpi_allreduce(nChgCount_local, nChgCount, 1, mpi_integer, mpi_sum,&
1436 mpi_comm_world, mpi_err)
1437 call mpi_allreduce(pChgPos_local, pChgPos, 3, mpi_real, mpi_sum, &
1438 mpi_comm_world, mpi_err)
1439 call mpi_allreduce(nChgPos_local, nChgPos, 3, mpi_real, mpi_sum, &
1440 mpi_comm_world, mpi_err)
1441 call mpi_allreduce(dipVec_local, dipVec, 3, mpi_real, mpi_sum, &
1442 mpi_comm_world, mpi_err)
1443 #else
1444 call mpi_allreduce(pChg_local, pChg, 1, mpi_double_precision, mpi_sum, &
1445 mpi_comm_world, mpi_err)
1446 call mpi_allreduce(nChg_local, nChg, 1, mpi_double_precision, mpi_sum, &
1447 mpi_comm_world, mpi_err)
1448 call mpi_allreduce(pChgCount_local, pChgCount, 1, mpi_integer,&
1449 mpi_sum, mpi_comm_world, mpi_err)
1450 call mpi_allreduce(nChgCount_local, nChgCount, 1, mpi_integer,&
1451 mpi_sum, mpi_comm_world, mpi_err)
1452 call mpi_allreduce(pChgPos_local, pChgPos, 3, mpi_double_precision, &
1453 mpi_sum, mpi_comm_world, mpi_err)
1454 call mpi_allreduce(nChgPos_local, nChgPos, 3, mpi_double_precision, &
1455 mpi_sum, mpi_comm_world, mpi_err)
1456 call mpi_allreduce(dipVec_local, dipVec, 3, mpi_double_precision, &
1457 mpi_sum, mpi_comm_world, mpi_err)
1458 #endif
1459
1460 endif
1461
1462 #endif
1463
1464 if (do_box_dipole) then
1465 ! first load the accumulated dipole moment (if dipoles were present)
1466 boxDipole(1) = dipVec(1)
1467 boxDipole(2) = dipVec(2)
1468 boxDipole(3) = dipVec(3)
1469
1470 ! now include the dipole moment due to charges
1471 ! use the lesser of the positive and negative charge totals
1472 if (nChg .le. pChg) then
1473 chg_value = nChg
1474 else
1475 chg_value = pChg
1476 endif
1477
1478 ! find the average positions
1479 if (pChgCount .gt. 0 .and. nChgCount .gt. 0) then
1480 pChgPos = pChgPos / pChgCount
1481 nChgPos = nChgPos / nChgCount
1482 endif
1483
1484 ! dipole is from the negative to the positive (physics notation)
1485 chgVec(1) = pChgPos(1) - nChgPos(1)
1486 chgVec(2) = pChgPos(2) - nChgPos(2)
1487 chgVec(3) = pChgPos(3) - nChgPos(3)
1488
1489 boxDipole(1) = boxDipole(1) + chgVec(1) * chg_value
1490 boxDipole(2) = boxDipole(2) + chgVec(2) * chg_value
1491 boxDipole(3) = boxDipole(3) + chgVec(3) * chg_value
1492
1493 endif
1494
1495 end subroutine do_force_loop
1496
1497 subroutine do_pair(i, j, rijsq, d, sw, &
1498 eFrame, A, f, t, pot, particle_pot, vpair, &
1499 fpair, d_grp, r_grp, rCut, topoDist)
1500
1501 real( kind = dp ) :: vpair, sw
1502 real( kind = dp ), dimension(LR_POT_TYPES) :: pot
1503 real( kind = dp ), dimension(nLocal) :: particle_pot
1504 real( kind = dp ), dimension(3) :: fpair
1505 real( kind = dp ), dimension(nLocal) :: mfact
1506 real( kind = dp ), dimension(9,nLocal) :: eFrame
1507 real( kind = dp ), dimension(9,nLocal) :: A
1508 real( kind = dp ), dimension(3,nLocal) :: f
1509 real( kind = dp ), dimension(3,nLocal) :: t
1510
1511 integer, intent(in) :: i, j
1512 real ( kind = dp ), intent(inout) :: rijsq
1513 real ( kind = dp ), intent(inout) :: r_grp
1514 real ( kind = dp ), intent(inout) :: d(3)
1515 real ( kind = dp ), intent(inout) :: d_grp(3)
1516 real ( kind = dp ), intent(inout) :: rCut
1517 integer, intent(inout) :: topoDist
1518 real ( kind = dp ) :: r, pair_pot, vdwMult, electroMult
1519 real ( kind = dp ) :: a_k, b_k, c_k, d_k, dx
1520
1521 real( kind = dp), dimension(3) :: f1, t1, t2
1522 real( kind = dp), dimension(9) :: A1, A2, eF1, eF2
1523 real( kind = dp) :: dfrhodrho_i, dfrhodrho_j
1524 real( kind = dp) :: rho_i, rho_j
1525 real( kind = dp) :: fshift_i, fshift_j
1526 real( kind = dp) :: p_vdw, p_elect, p_hb, p_met
1527 integer :: atid_i, atid_j, id1, id2, idx
1528 integer :: k
1529
1530 integer :: iHash
1531
1532 r = sqrt(rijsq)
1533
1534 vpair = 0.0_dp
1535 fpair(1:3) = 0.0_dp
1536
1537 p_vdw = 0.0
1538 p_elect = 0.0
1539 p_hb = 0.0
1540 p_met = 0.0
1541
1542 f1(1:3) = 0.0
1543
1544 #ifdef IS_MPI
1545 atid_i = atid_row(i)
1546 atid_j = atid_col(j)
1547 #else
1548 atid_i = atid(i)
1549 atid_j = atid(j)
1550 #endif
1551
1552 iHash = InteractionHash(atid_i, atid_j)
1553
1554 vdwMult = vdwScale(topoDist)
1555 electroMult = electrostaticScale(topoDist)
1556
1557 if ( iand(iHash, LJ_PAIR).ne.0 ) then
1558 call do_lj_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1559 p_vdw, f1)
1560 endif
1561
1562 if ( iand(iHash, ELECTROSTATIC_PAIR).ne.0 ) then
1563 #ifdef IS_MPI
1564 call doElectrostaticPair(atid_i, atid_j, d, r, rijsq, rcut, sw, electroMult, &
1565 vpair, fpair, p_elect, eFrame_Row(:,i), eFrame_Col(:,j), &
1566 f1, t_Row(:,i), t_Col(:,j))
1567 #else
1568 call doElectrostaticPair(atid_i, atid_j, d, r, rijsq, rcut, sw, electroMult, &
1569 vpair, fpair, p_elect, eFrame(:,i), eFrame(:,j), f1, t(:,i), t(:,j))
1570 #endif
1571 endif
1572
1573 if ( iand(iHash, STICKY_PAIR).ne.0 ) then
1574 #ifdef IS_MPI
1575 call do_sticky_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1576 p_hb, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1577 #else
1578 call do_sticky_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1579 p_hb, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1580 #endif
1581 endif
1582
1583 if ( iand(iHash, STICKYPOWER_PAIR).ne.0 ) then
1584 #ifdef IS_MPI
1585 call do_sticky_power_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1586 p_hb, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1587 #else
1588 call do_sticky_power_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1589 p_hb, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1590 #endif
1591 endif
1592
1593 if ( iand(iHash, GAYBERNE_PAIR).ne.0 ) then
1594 #ifdef IS_MPI
1595 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1596 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1597 #else
1598 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1599 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1600 #endif
1601 endif
1602
1603 if ( iand(iHash, GAYBERNE_LJ).ne.0 ) then
1604 #ifdef IS_MPI
1605 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1606 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1607 #else
1608 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1609 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1610 #endif
1611 endif
1612
1613 if ( iand(iHash, SHAPE_PAIR).ne.0 ) then
1614 #ifdef IS_MPI
1615 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1616 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1617 #else
1618 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1619 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1620 #endif
1621 endif
1622
1623 if ( iand(iHash, SHAPE_LJ).ne.0 ) then
1624 #ifdef IS_MPI
1625 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1626 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1627 #else
1628 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1629 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1630 #endif
1631 endif
1632
1633 if ( iand(iHash, EAM_PAIR).ne.0 ) then
1634 #ifdef IS_MPI
1635 call do_eam_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1636 fpair, p_met, f1, rho_row(i), rho_col(j), dfrhodrho_row(i), dfrhodrho_col(j), &
1637 fshift_i, fshift_j)
1638 #else
1639 call do_eam_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1640 fpair, p_met, f1, rho(i), rho(j), dfrhodrho(i), dfrhodrho(j), fshift_i, fshift_j)
1641 #endif
1642 endif
1643
1644 if ( iand(iHash, SC_PAIR).ne.0 ) then
1645 #ifdef IS_MPI
1646 call do_SC_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1647 fpair, p_met, f1, rho_row(i), rho_col(j), dfrhodrho_row(i), dfrhodrho_col(j), &
1648 fshift_i, fshift_j)
1649 #else
1650 call do_SC_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1651 fpair, p_met, f1, rho(i), rho(j), dfrhodrho(i), dfrhodrho(j), fshift_i, fshift_j)
1652 #endif
1653 endif
1654
1655 if ( iand(iHash, MNM_PAIR).ne.0 ) then
1656 #ifdef IS_MPI
1657 call do_mnm_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1658 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1659 #else
1660 call do_mnm_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1661 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1662 #endif
1663 endif
1664
1665
1666 #ifdef IS_MPI
1667 id1 = AtomRowToGlobal(i)
1668 id2 = AtomColToGlobal(j)
1669
1670 pot_row(VDW_POT,i) = pot_row(VDW_POT,i) + 0.5*p_vdw
1671 pot_col(VDW_POT,j) = pot_col(VDW_POT,j) + 0.5*p_vdw
1672 pot_row(ELECTROSTATIC_POT,i) = pot_row(ELECTROSTATIC_POT,i) + 0.5*p_elect
1673 pot_col(ELECTROSTATIC_POT,j) = pot_col(ELECTROSTATIC_POT,j) + 0.5*p_elect
1674 pot_row(HB_POT,i) = pot_row(HB_POT,i) + 0.5*p_hb
1675 pot_col(HB_POT,j) = pot_col(HB_POT,j) + 0.5*p_hb
1676 pot_Row(METALLIC_POT,i) = pot_Row(METALLIC_POT,i) + 0.5*p_met
1677 pot_Col(METALLIC_POT,j) = pot_Col(METALLIC_POT,j) + 0.5*p_met
1678
1679 do idx = 1, 3
1680 f_Row(idx,i) = f_Row(idx,i) + f1(idx)
1681 f_Col(idx,j) = f_Col(idx,j) - f1(idx)
1682 enddo
1683 ! particle_pot is the difference between the full potential
1684 ! and the full potential without the presence of a particular
1685 ! particle (atom1).
1686 !
1687 ! This reduces the density at other particle locations, so
1688 ! we need to recompute the density at atom2 assuming atom1
1689 ! didn't contribute. This then requires recomputing the
1690 ! density functional for atom2 as well.
1691 !
1692 ! Most of the particle_pot heavy lifting comes from the
1693 ! pair interaction, and will be handled by vpair. Parallel version.
1694
1695 if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then
1696 ppot_row(i) = ppot_row(i) - frho_row(j) + fshift_j
1697 ppot_col(j) = ppot_col(j) - frho_col(i) + fshift_i
1698 end if
1699
1700 #else
1701 id1 = i
1702 id2 = j
1703
1704 pot(VDW_POT) = pot(VDW_POT) + p_vdw
1705 pot(ELECTROSTATIC_POT) = pot(ELECTROSTATIC_POT) + p_elect
1706 pot(HB_POT) = pot(HB_POT) + p_hb
1707 pot(METALLIC_POT) = pot(METALLIC_POT) + p_met
1708
1709 ! only done for single processor. In Parallel, the particle_pot
1710 ! is constructed from the row and column potentials.
1711
1712 particle_pot(i) = particle_pot(i) + vpair*sw
1713 particle_pot(j) = particle_pot(j) + vpair*sw
1714
1715 do idx = 1, 3
1716 f(idx,i) = f(idx,i) + f1(idx)
1717 f(idx,j) = f(idx,j) - f1(idx)
1718 enddo
1719 ! particle_pot is the difference between the full potential
1720 ! and the full potential without the presence of a particular
1721 ! particle (atom1).
1722 !
1723 ! This reduces the density at other particle locations, so
1724 ! we need to recompute the density at atom2 assuming atom1
1725 ! didn't contribute. This then requires recomputing the
1726 ! density functional for atom2 as well.
1727 !
1728 ! Most of the particle_pot heavy lifting comes from the
1729 ! pair interaction, and will be handled by vpair. NonParallel version.
1730
1731 if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then
1732 particle_pot(i) = particle_pot(i) - frho(j) + fshift_j
1733 particle_pot(j) = particle_pot(j) - frho(i) + fshift_i
1734 end if
1735
1736
1737 #endif
1738
1739 if (molMembershipList(id1) .ne. molMembershipList(id2)) then
1740
1741 fpair(1) = fpair(1) + f1(1)
1742 fpair(2) = fpair(2) + f1(2)
1743 fpair(3) = fpair(3) + f1(3)
1744
1745 endif
1746
1747
1748 end subroutine do_pair
1749
1750 subroutine do_prepair(i, j, rijsq, d, sw, rcijsq, dc, rCut, &
1751 eFrame, A, f, t, pot)
1752
1753 real( kind = dp ) :: sw
1754 real( kind = dp ), dimension(LR_POT_TYPES) :: pot
1755 real( kind = dp ), dimension(9,nLocal) :: eFrame
1756 real (kind=dp), dimension(9,nLocal) :: A
1757 real (kind=dp), dimension(3,nLocal) :: f
1758 real (kind=dp), dimension(3,nLocal) :: t
1759
1760 integer, intent(in) :: i, j
1761 real ( kind = dp ), intent(inout) :: rijsq, rcijsq, rCut
1762 real ( kind = dp ) :: r, rc
1763 real ( kind = dp ), intent(inout) :: d(3), dc(3)
1764 real ( kind = dp ) :: rho_i_at_j, rho_j_at_i
1765 integer :: atid_i, atid_j, iHash
1766
1767 r = sqrt(rijsq)
1768
1769 #ifdef IS_MPI
1770 atid_i = atid_row(i)
1771 atid_j = atid_col(j)
1772 #else
1773 atid_i = atid(i)
1774 atid_j = atid(j)
1775 #endif
1776 rho_i_at_j = 0.0_dp
1777 rho_j_at_i = 0.0_dp
1778
1779 iHash = InteractionHash(atid_i, atid_j)
1780
1781 if ( iand(iHash, EAM_PAIR).ne.0 ) then
1782 call calc_EAM_prepair_rho(atid_i, atid_j, d, r, rijsq, rho_i_at_j, rho_j_at_i)
1783 endif
1784
1785 if ( iand(iHash, SC_PAIR).ne.0 ) then
1786 call calc_SC_prepair_rho(atid_i, atid_j, d, r, rijsq, rho_i_at_j, rho_j_at_i)
1787 endif
1788
1789 if ( iand(iHash, EAM_PAIR).ne.0 .or. iand(iHash, SC_PAIR).ne.0 ) then
1790 #ifdef IS_MPI
1791 rho_col(j) = rho_col(j) + rho_i_at_j
1792 rho_row(i) = rho_row(i) + rho_j_at_i
1793 #else
1794 rho(j) = rho(j) + rho_i_at_j
1795 rho(i) = rho(i) + rho_j_at_i
1796 #endif
1797 endif
1798
1799 end subroutine do_prepair
1800
1801
1802 subroutine do_preforce(nlocal, pot, particle_pot)
1803 integer :: nlocal
1804 real( kind = dp ),dimension(LR_POT_TYPES) :: pot
1805 real( kind = dp ),dimension(nlocal) :: particle_pot
1806 integer :: sc_err = 0
1807
1808 #ifdef IS_MPI
1809 if ((FF_uses_EAM .and. SIM_uses_EAM) .or. (FF_uses_SC .and. SIM_uses_SC)) then
1810 call scatter(rho_row,rho,plan_atom_row,sc_err)
1811 if (sc_err /= 0 ) then
1812 call handleError("do_preforce()", "Error scattering rho_row into rho")
1813 endif
1814 call scatter(rho_col,rho_tmp,plan_atom_col,sc_err)
1815 if (sc_err /= 0 ) then
1816 call handleError("do_preforce()", "Error scattering rho_col into rho")
1817 endif
1818 rho(1:nlocal) = rho(1:nlocal) + rho_tmp(1:nlocal)
1819 end if
1820 #endif
1821
1822
1823
1824 if (FF_uses_EAM .and. SIM_uses_EAM) then
1825 call calc_EAM_preforce_Frho(nlocal, pot(METALLIC_POT), particle_pot)
1826 endif
1827 if (FF_uses_SC .and. SIM_uses_SC) then
1828 call calc_SC_preforce_Frho(nlocal, pot(METALLIC_POT), particle_pot)
1829 endif
1830
1831 #ifdef IS_MPI
1832 if ((FF_uses_EAM .and. SIM_uses_EAM) .or. (FF_uses_SC .and. SIM_uses_SC)) then
1833 !! communicate f(rho) and derivatives back into row and column arrays
1834 call gather(frho,frho_row,plan_atom_row, sc_err)
1835 if (sc_err /= 0) then
1836 call handleError("do_preforce()","MPI gather frho_row failure")
1837 endif
1838 call gather(dfrhodrho,dfrhodrho_row,plan_atom_row, sc_err)
1839 if (sc_err /= 0) then
1840 call handleError("do_preforce()","MPI gather dfrhodrho_row failure")
1841 endif
1842 call gather(frho,frho_col,plan_atom_col, sc_err)
1843 if (sc_err /= 0) then
1844 call handleError("do_preforce()","MPI gather frho_col failure")
1845 endif
1846 call gather(dfrhodrho,dfrhodrho_col,plan_atom_col, sc_err)
1847 if (sc_err /= 0) then
1848 call handleError("do_preforce()","MPI gather dfrhodrho_col failure")
1849 endif
1850 end if
1851 #endif
1852
1853 end subroutine do_preforce
1854
1855
1856 subroutine get_interatomic_vector(q_i, q_j, d, r_sq)
1857
1858 real (kind = dp), dimension(3) :: q_i
1859 real (kind = dp), dimension(3) :: q_j
1860 real ( kind = dp ), intent(out) :: r_sq
1861 real( kind = dp ) :: d(3), scaled(3)
1862 real(kind=dp)::t
1863 integer i
1864
1865 d(1) = q_j(1) - q_i(1)
1866 d(2) = q_j(2) - q_i(2)
1867 d(3) = q_j(3) - q_i(3)
1868
1869 ! Wrap back into periodic box if necessary
1870 if ( SIM_uses_PBC ) then
1871
1872 if( .not.boxIsOrthorhombic ) then
1873 ! calc the scaled coordinates.
1874 ! unwrap the matmul and do things explicitly
1875 ! scaled = matmul(HmatInv, d)
1876
1877 scaled(1) = HmatInv(1,1)*d(1) + HmatInv(1,2)*d(2) + HmatInv(1,3)*d(3)
1878 scaled(2) = HmatInv(2,1)*d(1) + HmatInv(2,2)*d(2) + HmatInv(2,3)*d(3)
1879 scaled(3) = HmatInv(3,1)*d(1) + HmatInv(3,2)*d(2) + HmatInv(3,3)*d(3)
1880
1881 ! wrap the scaled coordinates (but don't use anint for speed)
1882
1883 t = scaled(1)
1884 if (t .gt. 0.0) then
1885 scaled(1) = t - floor(t + 0.5)
1886 else
1887 scaled(1) = t - ceiling(t - 0.5)
1888 endif
1889
1890 t = scaled(2)
1891 if (t .gt. 0.0) then
1892 scaled(2) = t - floor(t + 0.5)
1893 else
1894 scaled(2) = t - ceiling(t - 0.5)
1895 endif
1896
1897 t = scaled(3)
1898 if (t .gt. 0.0) then
1899 scaled(3) = t - floor(t + 0.5)
1900 else
1901 scaled(3) = t - ceiling(t - 0.5)
1902 endif
1903
1904 ! calc the wrapped real coordinates from the wrapped scaled
1905 ! coordinates
1906 ! d = matmul(Hmat,scaled)
1907 d(1)= Hmat(1,1)*scaled(1) + Hmat(1,2)*scaled(2) + Hmat(1,3)*scaled(3)
1908 d(2)= Hmat(2,1)*scaled(1) + Hmat(2,2)*scaled(2) + Hmat(2,3)*scaled(3)
1909 d(3)= Hmat(3,1)*scaled(1) + Hmat(3,2)*scaled(2) + Hmat(3,3)*scaled(3)
1910
1911 else
1912 ! calc the scaled coordinates
1913 scaled(1) = d(1) * HmatInv(1,1)
1914 scaled(2) = d(2) * HmatInv(2,2)
1915 scaled(3) = d(3) * HmatInv(3,3)
1916
1917 ! wrap the scaled coordinates
1918
1919 t = scaled(1)
1920 if (t .gt. 0.0) then
1921 scaled(1) = t - floor(t + 0.5)
1922 else
1923 scaled(1) = t - ceiling(t - 0.5)
1924 endif
1925
1926 t = scaled(2)
1927 if (t .gt. 0.0) then
1928 scaled(2) = t - floor(t + 0.5)
1929 else
1930 scaled(2) = t - ceiling(t - 0.5)
1931 endif
1932
1933 t = scaled(3)
1934 if (t .gt. 0.0) then
1935 scaled(3) = t - floor(t + 0.5)
1936 else
1937 scaled(3) = t - ceiling(t - 0.5)
1938 endif
1939
1940 ! calc the wrapped real coordinates from the wrapped scaled
1941 ! coordinates
1942
1943 d(1) = scaled(1)*Hmat(1,1)
1944 d(2) = scaled(2)*Hmat(2,2)
1945 d(3) = scaled(3)*Hmat(3,3)
1946
1947 endif
1948
1949 endif
1950
1951 r_sq = d(1)*d(1) + d(2)*d(2) + d(3)*d(3)
1952
1953 end subroutine get_interatomic_vector
1954
1955 subroutine zero_work_arrays()
1956
1957 #ifdef IS_MPI
1958
1959 q_Row = 0.0_dp
1960 q_Col = 0.0_dp
1961
1962 q_group_Row = 0.0_dp
1963 q_group_Col = 0.0_dp
1964
1965 eFrame_Row = 0.0_dp
1966 eFrame_Col = 0.0_dp
1967
1968 A_Row = 0.0_dp
1969 A_Col = 0.0_dp
1970
1971 f_Row = 0.0_dp
1972 f_Col = 0.0_dp
1973 f_Temp = 0.0_dp
1974
1975 t_Row = 0.0_dp
1976 t_Col = 0.0_dp
1977 t_Temp = 0.0_dp
1978
1979 pot_Row = 0.0_dp
1980 pot_Col = 0.0_dp
1981 pot_Temp = 0.0_dp
1982 ppot_Temp = 0.0_dp
1983
1984 frho_row = 0.0_dp
1985 frho_col = 0.0_dp
1986 rho_row = 0.0_dp
1987 rho_col = 0.0_dp
1988 rho_tmp = 0.0_dp
1989 dfrhodrho_row = 0.0_dp
1990 dfrhodrho_col = 0.0_dp
1991
1992 #endif
1993 rho = 0.0_dp
1994 frho = 0.0_dp
1995 dfrhodrho = 0.0_dp
1996
1997 end subroutine zero_work_arrays
1998
1999 function skipThisPair(atom1, atom2) result(skip_it)
2000 integer, intent(in) :: atom1
2001 integer, intent(in), optional :: atom2
2002 logical :: skip_it
2003 integer :: unique_id_1, unique_id_2
2004 integer :: me_i,me_j
2005 integer :: i
2006
2007 skip_it = .false.
2008
2009 !! there are a number of reasons to skip a pair or a particle
2010 !! mostly we do this to exclude atoms who are involved in short
2011 !! range interactions (bonds, bends, torsions), but we also need
2012 !! to exclude some overcounted interactions that result from
2013 !! the parallel decomposition
2014
2015 #ifdef IS_MPI
2016 !! in MPI, we have to look up the unique IDs for each atom
2017 unique_id_1 = AtomRowToGlobal(atom1)
2018 unique_id_2 = AtomColToGlobal(atom2)
2019 !! this situation should only arise in MPI simulations
2020 if (unique_id_1 == unique_id_2) then
2021 skip_it = .true.
2022 return
2023 end if
2024
2025 !! this prevents us from doing the pair on multiple processors
2026 if (unique_id_1 < unique_id_2) then
2027 if (mod(unique_id_1 + unique_id_2,2) == 0) then
2028 skip_it = .true.
2029 return
2030 endif
2031 else
2032 if (mod(unique_id_1 + unique_id_2,2) == 1) then
2033 skip_it = .true.
2034 return
2035 endif
2036 endif
2037 #else
2038 !! in the normal loop, the atom numbers are unique
2039 unique_id_1 = atom1
2040 unique_id_2 = atom2
2041 #endif
2042
2043 #ifdef IS_MPI
2044 do i = 1, nSkipsForRowAtom(atom1)
2045 if (skipsForRowAtom(atom1, i) .eq. unique_id_2) then
2046 skip_it = .true.
2047 return
2048 endif
2049 end do
2050 #else
2051 do i = 1, nSkipsForLocalAtom(atom1)
2052 if (skipsForLocalAtom(atom1, i) .eq. unique_id_2) then
2053 skip_it = .true.
2054 return
2055 endif
2056 end do
2057 #endif
2058
2059 return
2060 end function skipThisPair
2061
2062 function getTopoDistance(atom1, atom2) result(topoDist)
2063 integer, intent(in) :: atom1
2064 integer, intent(in) :: atom2
2065 integer :: topoDist
2066 integer :: unique_id_2
2067 integer :: i
2068
2069 #ifdef IS_MPI
2070 unique_id_2 = AtomColToGlobal(atom2)
2071 #else
2072 unique_id_2 = atom2
2073 #endif
2074
2075 ! zero is default for unconnected (i.e. normal) pair interactions
2076
2077 topoDist = 0
2078
2079 do i = 1, nTopoPairsForAtom(atom1)
2080 if (toposForAtom(atom1, i) .eq. unique_id_2) then
2081 topoDist = topoDistance(atom1, i)
2082 return
2083 endif
2084 end do
2085
2086 return
2087 end function getTopoDistance
2088
2089 function FF_UsesDirectionalAtoms() result(doesit)
2090 logical :: doesit
2091 doesit = FF_uses_DirectionalAtoms
2092 end function FF_UsesDirectionalAtoms
2093
2094 function FF_RequiresPrepairCalc() result(doesit)
2095 logical :: doesit
2096 doesit = FF_uses_EAM .or. FF_uses_SC
2097 end function FF_RequiresPrepairCalc
2098
2099 #ifdef PROFILE
2100 function getforcetime() result(totalforcetime)
2101 real(kind=dp) :: totalforcetime
2102 totalforcetime = forcetime
2103 end function getforcetime
2104 #endif
2105
2106 !! This cleans componets of force arrays belonging only to fortran
2107
2108 subroutine add_stress_tensor(dpair, fpair, tau, v_j, J_v)
2109
2110 real( kind = dp ), dimension(3), intent(in) :: dpair, fpair, v_j
2111 real( kind = dp ), dimension(9), intent(inout) :: tau
2112
2113 real( kind = dp ), dimension(3), intent(inout) :: J_v
2114 ! because the d vector is the rj - ri vector, and
2115 ! because fx, fy, fz are the force on atom i, we need a
2116 ! negative sign here:
2117
2118 tau(1) = tau(1) - dpair(1) * fpair(1)
2119 tau(2) = tau(2) - dpair(1) * fpair(2)
2120 tau(3) = tau(3) - dpair(1) * fpair(3)
2121 tau(4) = tau(4) - dpair(2) * fpair(1)
2122 tau(5) = tau(5) - dpair(2) * fpair(2)
2123 tau(6) = tau(6) - dpair(2) * fpair(3)
2124 tau(7) = tau(7) - dpair(3) * fpair(1)
2125 tau(8) = tau(8) - dpair(3) * fpair(2)
2126 tau(9) = tau(9) - dpair(3) * fpair(3)
2127
2128
2129
2130 J_v(1) = J_v(1) + dpair(1)*(fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2131 J_v(2) = J_v(2) + dpair(2)*(fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2132 J_v(3) = J_v(3) + dpair(3)*(fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2133 ! J_v(1) = J_v(1) + tau(1)*v_j(1) + tau(4)*v_j(2) + tau(7)*v_j(3)
2134 ! J_v(2) = J_v(2) + tau(2)*v_j(1) + tau(5)*v_j(2) + tau(8)*v_j(3)
2135 ! J_v(3) = J_v(3) + tau(3)*v_j(1) + tau(6)*v_j(2) + tau(9)*v_j(3)
2136
2137
2138
2139 end subroutine add_stress_tensor
2140
2141 !! Calculates the \sum r_ji*(f_ji *DOT* vj) component of the heat flux S
2142 subroutine add_heat_flux(dpair, fpair, v_j, S)
2143 real(kind=dp), dimension(3), intent(in) :: dpair,fpair, v_j
2144 real(kind=dp), dimension(3), intent(inout) :: S
2145 S(1) = S(1) + dpair(1) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2146 S(2) = S(2) + dpair(2) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2147 S(3) = S(3) + dpair(3) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2148 !!S(1) = S(1) + fpair(1)*v_j(1)*dpair(1)
2149 !!S(2) = S(2) + fpair(2)*v_j(2)*dpair(2)
2150 !!S(3) = S(3) + fpair(3)*v_j(3)*dpair(3)
2151
2152
2153 end subroutine add_heat_flux
2154 end module doForces

Properties

Name Value
svn:keywords Author Id Revision Date