ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/heatflux/src/UseTheForce/doForces.F90
Revision: 1679
Committed: Thu Feb 23 20:28:56 2012 UTC (13 years, 6 months ago) by chuckv
File size: 69317 byte(s)
Log Message:
Changed sign of force in stress tensor J_v calculation.

File Contents

# Content
1 !!
2 !! Copyright (c) 2005, 2009 The University of Notre Dame. All Rights Reserved.
3 !!
4 !! The University of Notre Dame grants you ("Licensee") a
5 !! non-exclusive, royalty free, license to use, modify and
6 !! redistribute this software in source and binary code form, provided
7 !! that the following conditions are met:
8 !!
9 !! 1. Redistributions of source code must retain the above copyright
10 !! notice, this list of conditions and the following disclaimer.
11 !!
12 !! 2. Redistributions in binary form must reproduce the above copyright
13 !! notice, this list of conditions and the following disclaimer in the
14 !! documentation and/or other materials provided with the
15 !! distribution.
16 !!
17 !! This software is provided "AS IS," without a warranty of any
18 !! kind. All express or implied conditions, representations and
19 !! warranties, including any implied warranty of merchantability,
20 !! fitness for a particular purpose or non-infringement, are hereby
21 !! excluded. The University of Notre Dame and its licensors shall not
22 !! be liable for any damages suffered by licensee as a result of
23 !! using, modifying or distributing the software or its
24 !! derivatives. In no event will the University of Notre Dame or its
25 !! licensors be liable for any lost revenue, profit or data, or for
26 !! direct, indirect, special, consequential, incidental or punitive
27 !! damages, however caused and regardless of the theory of liability,
28 !! arising out of the use of or inability to use software, even if the
29 !! University of Notre Dame has been advised of the possibility of
30 !! such damages.
31 !!
32 !! SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 !! research, please cite the appropriate papers when you publish your
34 !! work. Good starting points are:
35 !!
36 !! [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 !! [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 !! [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 !! [4] Vardeman & Gezelter, in progress (2009).
40 !!
41
42 !! doForces.F90
43 !! module doForces
44 !! Calculates Long Range forces.
45
46 !! @author Charles F. Vardeman II
47 !! @author Matthew Meineke
48 !! @version $Id$, $Date$, $Name: not supported by cvs2svn $, $Revision$
49
50
51 module doForces
52 use force_globals
53 use fForceOptions
54 use simulation
55 use definitions
56 use atype_module
57 use switcheroo
58 use neighborLists
59 use lj
60 use sticky
61 use electrostatic_module
62 use gayberne
63 use shapes
64 use vector_class
65 use eam
66 use MetalNonMetal
67 use suttonchen
68 use status
69 #ifdef IS_MPI
70 use mpiSimulation
71 #endif
72
73 implicit none
74 PRIVATE
75
76 #define __FORTRAN90
77 #include "UseTheForce/fCutoffPolicy.h"
78 #include "UseTheForce/DarkSide/fInteractionMap.h"
79 #include "UseTheForce/DarkSide/fElectrostaticSummationMethod.h"
80
81 INTEGER, PARAMETER:: PREPAIR_LOOP = 1
82 INTEGER, PARAMETER:: PAIR_LOOP = 2
83
84 logical, save :: haveNeighborList = .false.
85 logical, save :: haveSIMvariables = .false.
86 logical, save :: haveSaneForceField = .false.
87 logical, save :: haveInteractionHash = .false.
88 logical, save :: haveGtypeCutoffMap = .false.
89 logical, save :: haveDefaultCutoffs = .false.
90 logical, save :: haveSkinThickness = .false.
91 logical, save :: haveElectrostaticSummationMethod = .false.
92 logical, save :: haveCutoffPolicy = .false.
93 logical, save :: VisitCutoffsAfterComputing = .false.
94 logical, save :: do_box_dipole = .false.
95
96 logical, save :: FF_uses_DirectionalAtoms
97 logical, save :: FF_uses_Dipoles
98 logical, save :: FF_uses_GayBerne
99 logical, save :: FF_uses_EAM
100 logical, save :: FF_uses_SC
101 logical, save :: FF_uses_MNM
102
103
104 logical, save :: SIM_uses_DirectionalAtoms
105 logical, save :: SIM_uses_EAM
106 logical, save :: SIM_uses_SC
107 logical, save :: SIM_uses_MNM
108 logical, save :: SIM_requires_postpair_calc
109 logical, save :: SIM_requires_prepair_calc
110 logical, save :: SIM_uses_PBC
111 logical, save :: SIM_uses_AtomicVirial
112
113 integer, save :: electrostaticSummationMethod
114 integer, save :: cutoffPolicy = TRADITIONAL_CUTOFF_POLICY
115
116 real(kind=dp), save :: defaultRcut, defaultRsw, largestRcut
117 real(kind=dp), save :: skinThickness
118 logical, save :: defaultDoShiftPot
119 logical, save :: defaultDoShiftFrc
120
121 public :: init_FF
122 public :: setCutoffs
123 public :: cWasLame
124 public :: setElectrostaticMethod
125 public :: setBoxDipole
126 public :: getBoxDipole
127 public :: setCutoffPolicy
128 public :: setSkinThickness
129 public :: do_force_loop
130
131 #ifdef PROFILE
132 public :: getforcetime
133 real, save :: forceTime = 0
134 real :: forceTimeInitial, forceTimeFinal
135 integer :: nLoops
136 #endif
137
138 !! Variables for cutoff mapping and interaction mapping
139 ! Bit hash to determine pair-pair interactions.
140 integer, dimension(:,:), allocatable :: InteractionHash
141 real(kind=dp), dimension(:), allocatable :: atypeMaxCutoff
142 real(kind=dp), dimension(:), allocatable, target :: groupMaxCutoffRow
143 real(kind=dp), dimension(:), pointer :: groupMaxCutoffCol
144
145 integer, dimension(:), allocatable, target :: groupToGtypeRow
146 integer, dimension(:), pointer :: groupToGtypeCol => null()
147
148 real(kind=dp), dimension(:), allocatable,target :: gtypeMaxCutoffRow
149 real(kind=dp), dimension(:), pointer :: gtypeMaxCutoffCol
150 type ::gtypeCutoffs
151 real(kind=dp) :: rcut
152 real(kind=dp) :: rcutsq
153 real(kind=dp) :: rlistsq
154 end type gtypeCutoffs
155 type(gtypeCutoffs), dimension(:,:), allocatable :: gtypeCutoffMap
156
157 real(kind=dp), dimension(3) :: boxDipole
158
159 contains
160
161 subroutine createInteractionHash()
162 integer :: nAtypes
163 integer :: i
164 integer :: j
165 integer :: iHash
166 !! Test Types
167 logical :: i_is_LJ
168 logical :: i_is_Elect
169 logical :: i_is_Sticky
170 logical :: i_is_StickyP
171 logical :: i_is_GB
172 logical :: i_is_EAM
173 logical :: i_is_Shape
174 logical :: i_is_SC
175 logical :: j_is_LJ
176 logical :: j_is_Elect
177 logical :: j_is_Sticky
178 logical :: j_is_StickyP
179 logical :: j_is_GB
180 logical :: j_is_EAM
181 logical :: j_is_Shape
182 logical :: j_is_SC
183 real(kind=dp) :: myRcut
184
185 if (.not. associated(atypes)) then
186 call handleError("doForces", "atypes was not present before call of createInteractionHash!")
187 return
188 endif
189
190 nAtypes = getSize(atypes)
191
192 if (nAtypes == 0) then
193 call handleError("doForces", "nAtypes was zero during call of createInteractionHash!")
194 return
195 end if
196
197 if (.not. allocated(InteractionHash)) then
198 allocate(InteractionHash(nAtypes,nAtypes))
199 else
200 deallocate(InteractionHash)
201 allocate(InteractionHash(nAtypes,nAtypes))
202 endif
203
204 if (.not. allocated(atypeMaxCutoff)) then
205 allocate(atypeMaxCutoff(nAtypes))
206 else
207 deallocate(atypeMaxCutoff)
208 allocate(atypeMaxCutoff(nAtypes))
209 endif
210
211 do i = 1, nAtypes
212 call getElementProperty(atypes, i, "is_LennardJones", i_is_LJ)
213 call getElementProperty(atypes, i, "is_Electrostatic", i_is_Elect)
214 call getElementProperty(atypes, i, "is_Sticky", i_is_Sticky)
215 call getElementProperty(atypes, i, "is_StickyPower", i_is_StickyP)
216 call getElementProperty(atypes, i, "is_GayBerne", i_is_GB)
217 call getElementProperty(atypes, i, "is_EAM", i_is_EAM)
218 call getElementProperty(atypes, i, "is_Shape", i_is_Shape)
219 call getElementProperty(atypes, i, "is_SC", i_is_SC)
220
221 do j = i, nAtypes
222
223 iHash = 0
224 myRcut = 0.0_dp
225
226 call getElementProperty(atypes, j, "is_LennardJones", j_is_LJ)
227 call getElementProperty(atypes, j, "is_Electrostatic", j_is_Elect)
228 call getElementProperty(atypes, j, "is_Sticky", j_is_Sticky)
229 call getElementProperty(atypes, j, "is_StickyPower", j_is_StickyP)
230 call getElementProperty(atypes, j, "is_GayBerne", j_is_GB)
231 call getElementProperty(atypes, j, "is_EAM", j_is_EAM)
232 call getElementProperty(atypes, j, "is_Shape", j_is_Shape)
233 call getElementProperty(atypes, j, "is_SC", j_is_SC)
234
235 if (i_is_LJ .and. j_is_LJ) then
236 iHash = ior(iHash, LJ_PAIR)
237 endif
238
239 if (i_is_Elect .and. j_is_Elect) then
240 iHash = ior(iHash, ELECTROSTATIC_PAIR)
241 endif
242
243 if (i_is_Sticky .and. j_is_Sticky) then
244 iHash = ior(iHash, STICKY_PAIR)
245 endif
246
247 if (i_is_StickyP .and. j_is_StickyP) then
248 iHash = ior(iHash, STICKYPOWER_PAIR)
249 endif
250
251 if (i_is_EAM .and. j_is_EAM) then
252 iHash = ior(iHash, EAM_PAIR)
253 endif
254
255 if (i_is_SC .and. j_is_SC) then
256 iHash = ior(iHash, SC_PAIR)
257 endif
258
259 if (i_is_GB .and. j_is_GB) iHash = ior(iHash, GAYBERNE_PAIR)
260 if (i_is_GB .and. j_is_LJ) iHash = ior(iHash, GAYBERNE_LJ)
261 if (i_is_LJ .and. j_is_GB) iHash = ior(iHash, GAYBERNE_LJ)
262
263 if ((i_is_EAM.or.i_is_SC).and.(.not.(j_is_EAM.or.j_is_SC))) iHash = ior(iHash, MNM_PAIR)
264 if ((j_is_EAM.or.j_is_SC).and.(.not.(i_is_EAM.or.i_is_SC))) iHash = ior(iHash, MNM_PAIR)
265
266 if (i_is_Shape .and. j_is_Shape) iHash = ior(iHash, SHAPE_PAIR)
267 if (i_is_Shape .and. j_is_LJ) iHash = ior(iHash, SHAPE_LJ)
268 if (i_is_LJ .and. j_is_Shape) iHash = ior(iHash, SHAPE_LJ)
269
270
271 InteractionHash(i,j) = iHash
272 InteractionHash(j,i) = iHash
273
274 end do
275
276 end do
277
278 haveInteractionHash = .true.
279 end subroutine createInteractionHash
280
281 subroutine createGtypeCutoffMap()
282
283 logical :: i_is_LJ
284 logical :: i_is_Elect
285 logical :: i_is_Sticky
286 logical :: i_is_StickyP
287 logical :: i_is_GB
288 logical :: i_is_EAM
289 logical :: i_is_Shape
290 logical :: i_is_SC
291 logical :: GtypeFound
292
293 integer :: myStatus, nAtypes, i, j, istart, iend, jstart, jend
294 integer :: n_in_i, me_i, ia, g, atom1, ja, n_in_j,me_j
295 integer :: nGroupsInRow
296 integer :: nGroupsInCol
297 integer :: nGroupTypesRow,nGroupTypesCol
298 real(kind=dp):: thisSigma, bigSigma, thisRcut, tradRcut, tol
299 real(kind=dp) :: biggestAtypeCutoff
300
301 if (.not. haveInteractionHash) then
302 call createInteractionHash()
303 endif
304 #ifdef IS_MPI
305 nGroupsInRow = getNgroupsInRow(plan_group_row)
306 nGroupsInCol = getNgroupsInCol(plan_group_col)
307 #endif
308 nAtypes = getSize(atypes)
309 ! Set all of the initial cutoffs to zero.
310 atypeMaxCutoff = 0.0_dp
311 biggestAtypeCutoff = 0.0_dp
312 do i = 1, nAtypes
313 if (SimHasAtype(i)) then
314 call getElementProperty(atypes, i, "is_LennardJones", i_is_LJ)
315 call getElementProperty(atypes, i, "is_Electrostatic", i_is_Elect)
316 call getElementProperty(atypes, i, "is_Sticky", i_is_Sticky)
317 call getElementProperty(atypes, i, "is_StickyPower", i_is_StickyP)
318 call getElementProperty(atypes, i, "is_GayBerne", i_is_GB)
319 call getElementProperty(atypes, i, "is_EAM", i_is_EAM)
320 call getElementProperty(atypes, i, "is_Shape", i_is_Shape)
321 call getElementProperty(atypes, i, "is_SC", i_is_SC)
322
323 if (haveDefaultCutoffs) then
324 atypeMaxCutoff(i) = defaultRcut
325 else
326 if (i_is_LJ) then
327 thisRcut = getSigma(i) * 2.5_dp
328 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
329 endif
330 if (i_is_Elect) then
331 thisRcut = defaultRcut
332 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
333 endif
334 if (i_is_Sticky) then
335 thisRcut = getStickyCut(i)
336 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
337 endif
338 if (i_is_StickyP) then
339 thisRcut = getStickyPowerCut(i)
340 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
341 endif
342 if (i_is_GB) then
343 thisRcut = getGayBerneCut(i)
344 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
345 endif
346 if (i_is_EAM) then
347 thisRcut = getEAMCut(i)
348 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
349 endif
350 if (i_is_Shape) then
351 thisRcut = getShapeCut(i)
352 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
353 endif
354 if (i_is_SC) then
355 thisRcut = getSCCut(i)
356 if (thisRCut .gt. atypeMaxCutoff(i)) atypeMaxCutoff(i) = thisRCut
357 endif
358 endif
359
360 if (atypeMaxCutoff(i).gt.biggestAtypeCutoff) then
361 biggestAtypeCutoff = atypeMaxCutoff(i)
362 endif
363
364 endif
365 enddo
366
367 istart = 1
368 jstart = 1
369 #ifdef IS_MPI
370 iend = nGroupsInRow
371 jend = nGroupsInCol
372 #else
373 iend = nGroups
374 jend = nGroups
375 #endif
376
377 !! allocate the groupToGtype and gtypeMaxCutoff here.
378 if(.not.allocated(groupToGtypeRow)) then
379 ! allocate(groupToGtype(iend))
380 allocate(groupToGtypeRow(iend))
381 else
382 deallocate(groupToGtypeRow)
383 allocate(groupToGtypeRow(iend))
384 endif
385 if(.not.allocated(groupMaxCutoffRow)) then
386 allocate(groupMaxCutoffRow(iend))
387 else
388 deallocate(groupMaxCutoffRow)
389 allocate(groupMaxCutoffRow(iend))
390 end if
391
392 if(.not.allocated(gtypeMaxCutoffRow)) then
393 allocate(gtypeMaxCutoffRow(iend))
394 else
395 deallocate(gtypeMaxCutoffRow)
396 allocate(gtypeMaxCutoffRow(iend))
397 endif
398
399
400 #ifdef IS_MPI
401 ! We only allocate new storage if we are in MPI because Ncol /= Nrow
402 if(.not.associated(groupToGtypeCol)) then
403 allocate(groupToGtypeCol(jend))
404 else
405 deallocate(groupToGtypeCol)
406 allocate(groupToGtypeCol(jend))
407 end if
408
409 if(.not.associated(groupMaxCutoffCol)) then
410 allocate(groupMaxCutoffCol(jend))
411 else
412 deallocate(groupMaxCutoffCol)
413 allocate(groupMaxCutoffCol(jend))
414 end if
415 if(.not.associated(gtypeMaxCutoffCol)) then
416 allocate(gtypeMaxCutoffCol(jend))
417 else
418 deallocate(gtypeMaxCutoffCol)
419 allocate(gtypeMaxCutoffCol(jend))
420 end if
421
422 groupMaxCutoffCol = 0.0_dp
423 gtypeMaxCutoffCol = 0.0_dp
424
425 #endif
426 groupMaxCutoffRow = 0.0_dp
427 gtypeMaxCutoffRow = 0.0_dp
428
429
430 !! first we do a single loop over the cutoff groups to find the
431 !! largest cutoff for any atypes present in this group. We also
432 !! create gtypes at this point.
433
434 tol = 1.0e-6_dp
435 nGroupTypesRow = 0
436 nGroupTypesCol = 0
437 do i = istart, iend
438 n_in_i = groupStartRow(i+1) - groupStartRow(i)
439 groupMaxCutoffRow(i) = 0.0_dp
440 do ia = groupStartRow(i), groupStartRow(i+1)-1
441 atom1 = groupListRow(ia)
442 #ifdef IS_MPI
443 me_i = atid_row(atom1)
444 #else
445 me_i = atid(atom1)
446 #endif
447 if (atypeMaxCutoff(me_i).gt.groupMaxCutoffRow(i)) then
448 groupMaxCutoffRow(i)=atypeMaxCutoff(me_i)
449 endif
450 enddo
451 if (nGroupTypesRow.eq.0) then
452 nGroupTypesRow = nGroupTypesRow + 1
453 gtypeMaxCutoffRow(nGroupTypesRow) = groupMaxCutoffRow(i)
454 groupToGtypeRow(i) = nGroupTypesRow
455 else
456 GtypeFound = .false.
457 do g = 1, nGroupTypesRow
458 if ( abs(groupMaxCutoffRow(i) - gtypeMaxCutoffRow(g)).lt.tol) then
459 groupToGtypeRow(i) = g
460 GtypeFound = .true.
461 endif
462 enddo
463 if (.not.GtypeFound) then
464 nGroupTypesRow = nGroupTypesRow + 1
465 gtypeMaxCutoffRow(nGroupTypesRow) = groupMaxCutoffRow(i)
466 groupToGtypeRow(i) = nGroupTypesRow
467 endif
468 endif
469 enddo
470
471 #ifdef IS_MPI
472 do j = jstart, jend
473 n_in_j = groupStartCol(j+1) - groupStartCol(j)
474 groupMaxCutoffCol(j) = 0.0_dp
475 do ja = groupStartCol(j), groupStartCol(j+1)-1
476 atom1 = groupListCol(ja)
477
478 me_j = atid_col(atom1)
479
480 if (atypeMaxCutoff(me_j).gt.groupMaxCutoffCol(j)) then
481 groupMaxCutoffCol(j)=atypeMaxCutoff(me_j)
482 endif
483 enddo
484
485 if (nGroupTypesCol.eq.0) then
486 nGroupTypesCol = nGroupTypesCol + 1
487 gtypeMaxCutoffCol(nGroupTypesCol) = groupMaxCutoffCol(j)
488 groupToGtypeCol(j) = nGroupTypesCol
489 else
490 GtypeFound = .false.
491 do g = 1, nGroupTypesCol
492 if ( abs(groupMaxCutoffCol(j) - gtypeMaxCutoffCol(g)).lt.tol) then
493 groupToGtypeCol(j) = g
494 GtypeFound = .true.
495 endif
496 enddo
497 if (.not.GtypeFound) then
498 nGroupTypesCol = nGroupTypesCol + 1
499 gtypeMaxCutoffCol(nGroupTypesCol) = groupMaxCutoffCol(j)
500 groupToGtypeCol(j) = nGroupTypesCol
501 endif
502 endif
503 enddo
504
505 #else
506 ! Set pointers to information we just found
507 nGroupTypesCol = nGroupTypesRow
508 groupToGtypeCol => groupToGtypeRow
509 gtypeMaxCutoffCol => gtypeMaxCutoffRow
510 groupMaxCutoffCol => groupMaxCutoffRow
511 #endif
512
513 !! allocate the gtypeCutoffMap here.
514 allocate(gtypeCutoffMap(nGroupTypesRow,nGroupTypesCol))
515 !! then we do a double loop over all the group TYPES to find the cutoff
516 !! map between groups of two types
517 tradRcut = max(maxval(gtypeMaxCutoffRow),maxval(gtypeMaxCutoffCol))
518
519 do i = 1, nGroupTypesRow
520 do j = 1, nGroupTypesCol
521
522 select case(cutoffPolicy)
523 case(TRADITIONAL_CUTOFF_POLICY)
524 thisRcut = tradRcut
525 case(MIX_CUTOFF_POLICY)
526 thisRcut = 0.5_dp * (gtypeMaxCutoffRow(i) + gtypeMaxCutoffCol(j))
527 case(MAX_CUTOFF_POLICY)
528 thisRcut = max(gtypeMaxCutoffRow(i), gtypeMaxCutoffCol(j))
529 case default
530 call handleError("createGtypeCutoffMap", "Unknown Cutoff Policy")
531 return
532 end select
533 gtypeCutoffMap(i,j)%rcut = thisRcut
534
535 if (thisRcut.gt.largestRcut) largestRcut = thisRcut
536
537 gtypeCutoffMap(i,j)%rcutsq = thisRcut*thisRcut
538
539 if (.not.haveSkinThickness) then
540 skinThickness = 1.0_dp
541 endif
542
543 gtypeCutoffMap(i,j)%rlistsq = (thisRcut + skinThickness)**2
544
545 ! sanity check
546
547 if (haveDefaultCutoffs) then
548 if (abs(gtypeCutoffMap(i,j)%rcut - defaultRcut).gt.0.0001) then
549 call handleError("createGtypeCutoffMap", "user-specified rCut does not match computed group Cutoff")
550 endif
551 endif
552 enddo
553 enddo
554
555 if(allocated(gtypeMaxCutoffRow)) deallocate(gtypeMaxCutoffRow)
556 if(allocated(groupMaxCutoffRow)) deallocate(groupMaxCutoffRow)
557 if(allocated(atypeMaxCutoff)) deallocate(atypeMaxCutoff)
558 #ifdef IS_MPI
559 if(associated(groupMaxCutoffCol)) deallocate(groupMaxCutoffCol)
560 if(associated(gtypeMaxCutoffCol)) deallocate(gtypeMaxCutoffCol)
561 #endif
562 groupMaxCutoffCol => null()
563 gtypeMaxCutoffCol => null()
564
565 haveGtypeCutoffMap = .true.
566 end subroutine createGtypeCutoffMap
567
568 subroutine setCutoffs(defRcut, defRsw, defSP, defSF)
569
570 real(kind=dp),intent(in) :: defRcut, defRsw
571 integer, intent(in) :: defSP, defSF
572 character(len = statusMsgSize) :: errMsg
573 integer :: localError
574
575 defaultRcut = defRcut
576 defaultRsw = defRsw
577
578 if (defSP .ne. 0) then
579 defaultDoShiftPot = .true.
580 else
581 defaultDoShiftPot = .false.
582 endif
583 if (defSF .ne. 0) then
584 defaultDoShiftFrc = .true.
585 else
586 defaultDoShiftFrc = .false.
587 endif
588
589 if (abs(defaultRcut-defaultRsw) .lt. 0.0001) then
590 if (defaultDoShiftFrc) then
591 write(errMsg, *) &
592 'cutoffRadius and switchingRadius are set to the', newline &
593 // tab, 'same value. OpenMD will use shifted force', newline &
594 // tab, 'potentials instead of switching functions.'
595
596 call handleInfo("setCutoffs", errMsg)
597 else
598 write(errMsg, *) &
599 'cutoffRadius and switchingRadius are set to the', newline &
600 // tab, 'same value. OpenMD will use shifted', newline &
601 // tab, 'potentials instead of switching functions.'
602
603 call handleInfo("setCutoffs", errMsg)
604
605 defaultDoShiftPot = .true.
606 endif
607
608 endif
609
610 localError = 0
611 call setLJDefaultCutoff( defaultRcut, defaultDoShiftPot, &
612 defaultDoShiftFrc )
613 call setElectrostaticCutoffRadius( defaultRcut, defaultRsw )
614 call setCutoffEAM( defaultRcut )
615 call setCutoffSC( defaultRcut )
616 call setMnMDefaultCutoff( defaultRcut, defaultDoShiftPot, &
617 defaultDoShiftFrc )
618 call set_switch(defaultRsw, defaultRcut)
619 call setHmatDangerousRcutValue(defaultRcut)
620
621 haveDefaultCutoffs = .true.
622 haveGtypeCutoffMap = .false.
623
624 end subroutine setCutoffs
625
626 subroutine cWasLame()
627
628 VisitCutoffsAfterComputing = .true.
629 return
630
631 end subroutine cWasLame
632
633 subroutine setCutoffPolicy(cutPolicy)
634
635 integer, intent(in) :: cutPolicy
636
637 cutoffPolicy = cutPolicy
638 haveCutoffPolicy = .true.
639 haveGtypeCutoffMap = .false.
640
641 end subroutine setCutoffPolicy
642
643 subroutine setBoxDipole()
644
645 do_box_dipole = .true.
646
647 end subroutine setBoxDipole
648
649 subroutine getBoxDipole( box_dipole )
650
651 real(kind=dp), intent(inout), dimension(3) :: box_dipole
652
653 box_dipole = boxDipole
654
655 end subroutine getBoxDipole
656
657 subroutine setElectrostaticMethod( thisESM )
658
659 integer, intent(in) :: thisESM
660
661 electrostaticSummationMethod = thisESM
662 haveElectrostaticSummationMethod = .true.
663
664 end subroutine setElectrostaticMethod
665
666 subroutine setSkinThickness( thisSkin )
667
668 real(kind=dp), intent(in) :: thisSkin
669
670 skinThickness = thisSkin
671 haveSkinThickness = .true.
672 haveGtypeCutoffMap = .false.
673
674 end subroutine setSkinThickness
675
676 subroutine setSimVariables()
677 SIM_uses_DirectionalAtoms = SimUsesDirectionalAtoms()
678 SIM_uses_EAM = SimUsesEAM()
679 SIM_requires_postpair_calc = SimRequiresPostpairCalc()
680 SIM_requires_prepair_calc = SimRequiresPrepairCalc()
681 SIM_uses_PBC = SimUsesPBC()
682 SIM_uses_SC = SimUsesSC()
683 SIM_uses_AtomicVirial = SimUsesAtomicVirial()
684
685 haveSIMvariables = .true.
686
687 return
688 end subroutine setSimVariables
689
690 subroutine doReadyCheck(error)
691 integer, intent(out) :: error
692 integer :: myStatus
693
694 error = 0
695
696 if (.not. haveInteractionHash) then
697 call createInteractionHash()
698 endif
699
700 if (.not. haveGtypeCutoffMap) then
701 call createGtypeCutoffMap()
702 endif
703
704 if (VisitCutoffsAfterComputing) then
705 call set_switch(largestRcut, largestRcut)
706 call setHmatDangerousRcutValue(largestRcut)
707 call setCutoffEAM(largestRcut)
708 call setCutoffSC(largestRcut)
709 VisitCutoffsAfterComputing = .false.
710 endif
711
712 if (.not. haveSIMvariables) then
713 call setSimVariables()
714 endif
715
716 if (.not. haveNeighborList) then
717 write(default_error, *) 'neighbor list has not been initialized in doForces!'
718 error = -1
719 return
720 end if
721
722 if (.not. haveSaneForceField) then
723 write(default_error, *) 'Force Field is not sane in doForces!'
724 error = -1
725 return
726 end if
727
728 #ifdef IS_MPI
729 if (.not. isMPISimSet()) then
730 write(default_error,*) "ERROR: mpiSimulation has not been initialized!"
731 error = -1
732 return
733 endif
734 #endif
735 return
736 end subroutine doReadyCheck
737
738
739 subroutine init_FF(thisStat)
740
741 integer, intent(out) :: thisStat
742 integer :: my_status, nMatches
743 integer, pointer :: MatchList(:) => null()
744
745 !! assume things are copacetic, unless they aren't
746 thisStat = 0
747
748 !! init_FF is called *after* all of the atom types have been
749 !! defined in atype_module using the new_atype subroutine.
750 !!
751 !! this will scan through the known atypes and figure out what
752 !! interactions are used by the force field.
753
754 FF_uses_DirectionalAtoms = .false.
755 FF_uses_Dipoles = .false.
756 FF_uses_GayBerne = .false.
757 FF_uses_EAM = .false.
758 FF_uses_SC = .false.
759
760 call getMatchingElementList(atypes, "is_Directional", .true., &
761 nMatches, MatchList)
762 if (nMatches .gt. 0) FF_uses_DirectionalAtoms = .true.
763
764 call getMatchingElementList(atypes, "is_Dipole", .true., &
765 nMatches, MatchList)
766 if (nMatches .gt. 0) FF_uses_Dipoles = .true.
767
768 call getMatchingElementList(atypes, "is_GayBerne", .true., &
769 nMatches, MatchList)
770 if (nMatches .gt. 0) FF_uses_GayBerne = .true.
771
772 call getMatchingElementList(atypes, "is_EAM", .true., nMatches, MatchList)
773 if (nMatches .gt. 0) FF_uses_EAM = .true.
774
775 call getMatchingElementList(atypes, "is_SC", .true., nMatches, MatchList)
776 if (nMatches .gt. 0) FF_uses_SC = .true.
777
778
779 haveSaneForceField = .true.
780
781
782 if (.not. haveNeighborList) then
783 !! Create neighbor lists
784 call expandNeighborList(nLocal, my_status)
785 if (my_Status /= 0) then
786 write(default_error,*) "SimSetup: ExpandNeighborList returned error."
787 thisStat = -1
788 return
789 endif
790 haveNeighborList = .true.
791 endif
792
793 end subroutine init_FF
794
795
796 !! Does force loop over i,j pairs. Calls do_pair to calculates forces.
797 !------------------------------------------------------------->
798 subroutine do_force_loop(q,vel, q_group, A, eFrame, f, t, tau, S, pot, particle_pot, &
799 error)
800 !! Position array provided by C, dimensioned by getNlocal
801 real ( kind = dp ), dimension(3, nLocal) :: q
802 real ( kind = dp ), dimension(3, nlocal) :: vel
803 !! molecular center-of-mass position array
804 real ( kind = dp ), dimension(3, nGroups) :: q_group
805 !! Rotation Matrix for each long range particle in simulation.
806 real( kind = dp), dimension(9, nLocal) :: A
807 !! Unit vectors for dipoles (lab frame)
808 real( kind = dp ), dimension(9,nLocal) :: eFrame
809 !! Force array provided by C, dimensioned by getNlocal
810 real ( kind = dp ), dimension(3,nLocal) :: f
811 !! Torsion array provided by C, dimensioned by getNlocal
812 real( kind = dp ), dimension(3,nLocal) :: t
813
814 !! Stress Tensor
815 real( kind = dp), dimension(9) :: tau
816 !! Heat Flux component S
817 real(kind=dp), dimension(3) :: S
818 real ( kind = dp ),dimension(LR_POT_TYPES) :: pot
819 real( kind = dp ), dimension(nLocal) :: particle_pot
820
821 logical :: in_switching_region
822 #ifdef IS_MPI
823 real( kind = DP ), dimension(LR_POT_TYPES) :: pot_local
824 integer :: nAtomsInRow
825 integer :: nAtomsInCol
826 integer :: nprocs
827 integer :: nGroupsInRow
828 integer :: nGroupsInCol
829 #endif
830 integer :: natoms
831 logical :: update_nlist
832 integer :: i, j, jstart, jend, jnab
833 integer :: istart, iend
834 integer :: ia, jb, atom1, atom2
835 integer :: nlist
836 real( kind = DP ) :: ratmsq, rgrpsq, rgrp, rag, vpair, vij
837 real( kind = DP ) :: sw, dswdr, swderiv, mf
838 real( kind = DP ) :: rVal
839 real(kind=dp),dimension(3) :: d_atm, d_grp, fpair, fij, fg, dag, vel_j
840 real(kind=dp),dimension(3) :: S_local,vel_grp_j
841 real(kind=dp) :: rfpot, mu_i
842 real(kind=dp):: rCut
843 integer :: me_i, me_j, n_in_i, n_in_j, iG, j1
844 logical :: is_dp_i
845 integer :: neighborListSize
846 integer :: listerror, error
847 integer :: localError
848 integer :: propPack_i, propPack_j
849 integer :: loopStart, loopEnd, loop
850 integer :: iHash, jHash
851 integer :: i1, topoDist
852
853 !! the variables for the box dipole moment
854 #ifdef IS_MPI
855 integer :: pChgCount_local
856 integer :: nChgCount_local
857 real(kind=dp) :: pChg_local
858 real(kind=dp) :: nChg_local
859 real(kind=dp), dimension(3) :: pChgPos_local
860 real(kind=dp), dimension(3) :: nChgPos_local
861 real(kind=dp), dimension(3) :: dipVec_local
862 #endif
863 integer :: pChgCount
864 integer :: nChgCount
865 real(kind=dp) :: pChg
866 real(kind=dp) :: nChg
867 real(kind=dp) :: chg_value
868 real(kind=dp), dimension(3) :: pChgPos
869 real(kind=dp), dimension(3) :: nChgPos
870 real(kind=dp), dimension(3) :: dipVec
871 real(kind=dp), dimension(3) :: chgVec
872 real(kind=dp) :: skch
873
874 !! initialize a dummy group velocity variable
875 vel_j = 0.0_dp
876 vel_grp_j = 0.0_dp
877 S_local = 0.0_dp
878
879 !! initialize box dipole variables
880 if (do_box_dipole) then
881 #ifdef IS_MPI
882 pChg_local = 0.0_dp
883 nChg_local = 0.0_dp
884 pChgCount_local = 0
885 nChgCount_local = 0
886 do i=1, 3
887 pChgPos_local = 0.0_dp
888 nChgPos_local = 0.0_dp
889 dipVec_local = 0.0_dp
890 enddo
891 #endif
892 pChg = 0.0_dp
893 nChg = 0.0_dp
894 pChgCount = 0
895 nChgCount = 0
896 chg_value = 0.0_dp
897
898 do i=1, 3
899 pChgPos(i) = 0.0_dp
900 nChgPos(i) = 0.0_dp
901 dipVec(i) = 0.0_dp
902 chgVec(i) = 0.0_dp
903 boxDipole(i) = 0.0_dp
904 enddo
905 endif
906
907 !! initialize local variables
908 S_local = 0.0_dp
909
910 #ifdef IS_MPI
911 pot_local = 0.0_dp
912 nAtomsInRow = getNatomsInRow(plan_atom_row)
913 nAtomsInCol = getNatomsInCol(plan_atom_col)
914 nGroupsInRow = getNgroupsInRow(plan_group_row)
915 nGroupsInCol = getNgroupsInCol(plan_group_col)
916 #else
917 natoms = nlocal
918 #endif
919
920 call doReadyCheck(localError)
921 if ( localError .ne. 0 ) then
922 call handleError("do_force_loop", "Not Initialized")
923 error = -1
924 return
925 end if
926 call zero_work_arrays()
927
928 ! Gather all information needed by all force loops:
929
930 #ifdef IS_MPI
931
932 call gather(q, q_Row, plan_atom_row_3d)
933 call gather(q, q_Col, plan_atom_col_3d)
934
935 call gather(vel, vel_Col, plan_atom_col_3d)
936
937 call gather(q_group, q_group_Row, plan_group_row_3d)
938 call gather(q_group, q_group_Col, plan_group_col_3d)
939
940 if (FF_UsesDirectionalAtoms() .and. SIM_uses_DirectionalAtoms) then
941 call gather(eFrame, eFrame_Row, plan_atom_row_rotation)
942 call gather(eFrame, eFrame_Col, plan_atom_col_rotation)
943
944 call gather(A, A_Row, plan_atom_row_rotation)
945 call gather(A, A_Col, plan_atom_col_rotation)
946 endif
947
948 #endif
949
950 !! Begin force loop timing:
951 #ifdef PROFILE
952 call cpu_time(forceTimeInitial)
953 nloops = nloops + 1
954 #endif
955
956 loopEnd = PAIR_LOOP
957 if (FF_RequiresPrepairCalc() .and. SIM_requires_prepair_calc) then
958 loopStart = PREPAIR_LOOP
959 else
960 loopStart = PAIR_LOOP
961 endif
962
963 do loop = loopStart, loopEnd
964
965 ! See if we need to update neighbor lists
966 ! (but only on the first time through):
967 if (loop .eq. loopStart) then
968 #ifdef IS_MPI
969 call checkNeighborList(nGroupsInRow, q_group_row, skinThickness, &
970 update_nlist)
971 #else
972 call checkNeighborList(nGroups, q_group, skinThickness, &
973 update_nlist)
974 #endif
975 endif
976
977 if (update_nlist) then
978 !! save current configuration and construct neighbor list
979 #ifdef IS_MPI
980 call saveNeighborList(nGroupsInRow, q_group_row)
981 #else
982 call saveNeighborList(nGroups, q_group)
983 #endif
984 neighborListSize = size(list)
985 nlist = 0
986 endif
987
988 istart = 1
989 #ifdef IS_MPI
990 iend = nGroupsInRow
991 #else
992 iend = nGroups - 1
993 #endif
994 outer: do i = istart, iend
995
996 if (update_nlist) point(i) = nlist + 1
997
998 n_in_i = groupStartRow(i+1) - groupStartRow(i)
999
1000 if (update_nlist) then
1001 #ifdef IS_MPI
1002 jstart = 1
1003 jend = nGroupsInCol
1004 #else
1005 jstart = i+1
1006 jend = nGroups
1007 #endif
1008 else
1009 jstart = point(i)
1010 jend = point(i+1) - 1
1011 ! make sure group i has neighbors
1012 if (jstart .gt. jend) cycle outer
1013 endif
1014
1015 do jnab = jstart, jend
1016 if (update_nlist) then
1017 j = jnab
1018 else
1019 j = list(jnab)
1020 endif
1021
1022 #ifdef IS_MPI
1023 me_j = atid_col(j)
1024 call get_interatomic_vector(q_group_Row(:,i), &
1025 q_group_Col(:,j), d_grp, rgrpsq)
1026 #else
1027 me_j = atid(j)
1028 call get_interatomic_vector(q_group(:,i), &
1029 q_group(:,j), d_grp, rgrpsq)
1030 #endif
1031
1032 if (rgrpsq < gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rListsq) then
1033 if (update_nlist) then
1034 nlist = nlist + 1
1035
1036 if (nlist > neighborListSize) then
1037 #ifdef IS_MPI
1038 call expandNeighborList(nGroupsInRow, listerror)
1039 #else
1040 call expandNeighborList(nGroups, listerror)
1041 #endif
1042 if (listerror /= 0) then
1043 error = -1
1044 write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded."
1045 return
1046 end if
1047 neighborListSize = size(list)
1048 endif
1049
1050 list(nlist) = j
1051 endif
1052
1053 if (rgrpsq < gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rCutsq) then
1054
1055 rCut = gtypeCutoffMap(groupToGtypeRow(i),groupToGtypeCol(j))%rCut
1056 if (loop .eq. PAIR_LOOP) then
1057 vij = 0.0_dp
1058 fij(1) = 0.0_dp
1059 fij(2) = 0.0_dp
1060 fij(3) = 0.0_dp
1061 endif
1062
1063 call get_switch(rgrpsq, sw, dswdr,rgrp, in_switching_region)
1064
1065 n_in_j = groupStartCol(j+1) - groupStartCol(j)
1066
1067 do ia = groupStartRow(i), groupStartRow(i+1)-1
1068
1069 atom1 = groupListRow(ia)
1070
1071 inner: do jb = groupStartCol(j), groupStartCol(j+1)-1
1072
1073 atom2 = groupListCol(jb)
1074
1075 if (skipThisPair(atom1, atom2)) cycle inner
1076
1077 if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then
1078 d_atm(1) = d_grp(1)
1079 d_atm(2) = d_grp(2)
1080 d_atm(3) = d_grp(3)
1081 ratmsq = rgrpsq
1082 else
1083 #ifdef IS_MPI
1084 call get_interatomic_vector(q_Row(:,atom1), &
1085 q_Col(:,atom2), d_atm, ratmsq)
1086 #else
1087 call get_interatomic_vector(q(:,atom1), &
1088 q(:,atom2), d_atm, ratmsq)
1089 #endif
1090 endif
1091
1092 topoDist = getTopoDistance(atom1, atom2)
1093
1094 if (loop .eq. PREPAIR_LOOP) then
1095 #ifdef IS_MPI
1096 call do_prepair(atom1, atom2, ratmsq, d_atm, sw, &
1097 rgrpsq, d_grp, rCut, &
1098 eFrame, A, f, t, pot_local)
1099 vel_j = vel_Col(:,atom2)
1100 #else
1101 call do_prepair(atom1, atom2, ratmsq, d_atm, sw, &
1102 rgrpsq, d_grp, rCut, &
1103 eFrame, A, f, t, pot)
1104 vel_j = vel(:,atom2)
1105 #endif
1106 else
1107 #ifdef IS_MPI
1108 call do_pair(atom1, atom2, ratmsq, d_atm, sw, &
1109 eFrame, A, f, t, pot_local, particle_pot, vpair, &
1110 fpair, d_grp, rgrp, rCut, topoDist)
1111 ! particle_pot will be accumulated from row & column
1112 ! arrays later
1113 #else
1114 call do_pair(atom1, atom2, ratmsq, d_atm, sw, &
1115 eFrame, A, f, t, pot, particle_pot, vpair, &
1116 fpair, d_grp, rgrp, rCut, topoDist)
1117 #endif
1118 vij = vij + vpair
1119 fij(1) = fij(1) + fpair(1)
1120 fij(2) = fij(2) + fpair(2)
1121 fij(3) = fij(3) + fpair(3)
1122 call add_stress_tensor(d_atm, fpair, tau, vel_j, S_local)
1123 !!call add_heat_flux(d_atm, fpair,vel_j,S_local)
1124 endif
1125 enddo inner
1126 enddo
1127
1128 if (loop .eq. PAIR_LOOP) then
1129 if (in_switching_region) then
1130 swderiv = vij*dswdr/rgrp
1131 fg = swderiv*d_grp
1132 fij(1) = fij(1) + fg(1)
1133 fij(2) = fij(2) + fg(2)
1134 fij(3) = fij(3) + fg(3)
1135
1136 if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then
1137 ! call add_stress_tensor(d_atm, fg, tau)
1138 call add_stress_tensor(d_atm, fg, tau, vel_grp_j, S_local)
1139 endif
1140
1141 do ia=groupStartRow(i), groupStartRow(i+1)-1
1142 atom1=groupListRow(ia)
1143 mf = mfactRow(atom1)
1144 ! fg is the force on atom ia due to cutoff group's
1145 ! presence in switching region
1146 fg = swderiv*d_grp*mf
1147 #ifdef IS_MPI
1148 f_Row(1,atom1) = f_Row(1,atom1) + fg(1)
1149 f_Row(2,atom1) = f_Row(2,atom1) + fg(2)
1150 f_Row(3,atom1) = f_Row(3,atom1) + fg(3)
1151 #else
1152 f(1,atom1) = f(1,atom1) + fg(1)
1153 f(2,atom1) = f(2,atom1) + fg(2)
1154 f(3,atom1) = f(3,atom1) + fg(3)
1155 #endif
1156 if (n_in_i .gt. 1) then
1157 if (SIM_uses_AtomicVirial) then
1158 ! find the distance between the atom
1159 ! and the center of the cutoff group:
1160 #ifdef IS_MPI
1161 call get_interatomic_vector(q_Row(:,atom1), &
1162 q_group_Row(:,i), dag, rag)
1163 #else
1164 call get_interatomic_vector(q(:,atom1), &
1165 q_group(:,i), dag, rag)
1166 #endif
1167 call add_stress_tensor(dag, fg, tau, vel_grp_j, S_local)
1168 !
1169 !call add_stress_tensor(dag,fg,tau)
1170
1171 endif
1172 endif
1173 enddo
1174
1175 do jb=groupStartCol(j), groupStartCol(j+1)-1
1176 atom2=groupListCol(jb)
1177 mf = mfactCol(atom2)
1178 ! fg is the force on atom jb due to cutoff group's
1179 ! presence in switching region
1180 fg = -swderiv*d_grp*mf
1181 #ifdef IS_MPI
1182 f_Col(1,atom2) = f_Col(1,atom2) + fg(1)
1183 f_Col(2,atom2) = f_Col(2,atom2) + fg(2)
1184 f_Col(3,atom2) = f_Col(3,atom2) + fg(3)
1185 #else
1186 f(1,atom2) = f(1,atom2) + fg(1)
1187 f(2,atom2) = f(2,atom2) + fg(2)
1188 f(3,atom2) = f(3,atom2) + fg(3)
1189 #endif
1190 if (n_in_j .gt. 1) then
1191 if (SIM_uses_AtomicVirial) then
1192 ! find the distance between the atom
1193 ! and the center of the cutoff group:
1194 #ifdef IS_MPI
1195 call get_interatomic_vector(q_Col(:,atom2), &
1196 q_group_Col(:,j), dag, rag)
1197 vel_j = vel_Col(:,atom2)
1198 #else
1199 call get_interatomic_vector(q(:,atom2), &
1200 q_group(:,j), dag, rag)
1201 vel_j = vel(:,atom2)
1202 #endif
1203 ! call add_stress_tensor(dag,fg,tau)
1204 call add_stress_tensor(dag, fg, tau, vel_grp_j, S_local)
1205 ! call add_heat_flux(d_atm, fpair,vel_j,S_local)
1206 endif
1207 endif
1208 enddo
1209 endif
1210 !if (.not.SIM_uses_AtomicVirial) then
1211 ! call add_stress_tensor(d_grp, fij, tau)
1212 !endif
1213 endif
1214 endif
1215 endif
1216 enddo
1217
1218 enddo outer
1219
1220 if (update_nlist) then
1221 #ifdef IS_MPI
1222 point(nGroupsInRow + 1) = nlist + 1
1223 #else
1224 point(nGroups) = nlist + 1
1225 #endif
1226 if (loop .eq. PREPAIR_LOOP) then
1227 ! we just did the neighbor list update on the first
1228 ! pass, so we don't need to do it
1229 ! again on the second pass
1230 update_nlist = .false.
1231 endif
1232 endif
1233
1234 if (loop .eq. PREPAIR_LOOP) then
1235 #ifdef IS_MPI
1236 call do_preforce(nlocal, pot_local, particle_pot)
1237 #else
1238 call do_preforce(nlocal, pot, particle_pot)
1239 #endif
1240 endif
1241
1242 enddo
1243
1244 !! Do timing
1245 #ifdef PROFILE
1246 call cpu_time(forceTimeFinal)
1247 forceTime = forceTime + forceTimeFinal - forceTimeInitial
1248 #endif
1249
1250 #ifdef IS_MPI
1251 !!distribute forces
1252
1253 f_temp = 0.0_dp
1254 call scatter(f_Row,f_temp,plan_atom_row_3d)
1255 do i = 1,nlocal
1256 f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
1257 end do
1258
1259 f_temp = 0.0_dp
1260 call scatter(f_Col,f_temp,plan_atom_col_3d)
1261 do i = 1,nlocal
1262 f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
1263 end do
1264
1265 if (FF_UsesDirectionalAtoms() .and. SIM_uses_DirectionalAtoms) then
1266 t_temp = 0.0_dp
1267 call scatter(t_Row,t_temp,plan_atom_row_3d)
1268 do i = 1,nlocal
1269 t(1:3,i) = t(1:3,i) + t_temp(1:3,i)
1270 end do
1271 t_temp = 0.0_dp
1272 call scatter(t_Col,t_temp,plan_atom_col_3d)
1273
1274 do i = 1,nlocal
1275 t(1:3,i) = t(1:3,i) + t_temp(1:3,i)
1276 end do
1277 endif
1278
1279 ! scatter/gather pot_row into the members of my column
1280 do i = 1,LR_POT_TYPES
1281 call scatter(pot_Row(i,:), pot_Temp(i,:), plan_atom_row)
1282 end do
1283 ! scatter/gather pot_local into all other procs
1284 ! add resultant to get total pot
1285 do i = 1, nlocal
1286 pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES) &
1287 + pot_Temp(1:LR_POT_TYPES,i)
1288 enddo
1289
1290 ! factor of two is because the total potential terms are divided by 2 in parallel
1291 ! due to row/ column scatter
1292 do i = 1,LR_POT_TYPES
1293 particle_pot(1:nlocal) = particle_pot(1:nlocal) + 2.0 * pot_Temp(i,1:nlocal)
1294 enddo
1295
1296
1297 pot_Temp = 0.0_DP
1298
1299 do i = 1,LR_POT_TYPES
1300 call scatter(pot_Col(i,:), pot_Temp(i,:), plan_atom_col)
1301 end do
1302
1303 do i = 1, nlocal
1304 pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES)&
1305 + pot_Temp(1:LR_POT_TYPES,i)
1306 enddo
1307
1308 ! factor of two is because the total potential terms are divided by 2 in parallel
1309 ! due to row/ column scatter
1310 do i = 1,LR_POT_TYPES
1311 particle_pot(1:nlocal) = particle_pot(1:nlocal) + 2.0 * pot_Temp(i,1:nlocal)
1312 enddo
1313
1314 ppot_Temp = 0.0_DP
1315
1316 call scatter(ppot_Row(:), ppot_Temp(:), plan_atom_row)
1317 do i = 1, nlocal
1318 particle_pot(i) = particle_pot(i) + ppot_Temp(i)
1319 enddo
1320
1321 ppot_Temp = 0.0_DP
1322
1323 call scatter(ppot_Col(:), ppot_Temp(:), plan_atom_col)
1324 do i = 1, nlocal
1325 particle_pot(i) = particle_pot(i) + ppot_Temp(i)
1326 enddo
1327
1328 !! In parallel we need to accumulate S for the entire system
1329
1330 call mpi_allreduce(S, S_local, 3, mpi_double_precision, mpi_sum, &
1331 plan_atom_col%myPlanComm, mpi_err)
1332 #else
1333 S = S_Local
1334 #endif
1335
1336
1337
1338 if (SIM_requires_postpair_calc) then
1339 do i = 1, nlocal
1340
1341 ! we loop only over the local atoms, so we don't need row and column
1342 ! lookups for the types
1343
1344 me_i = atid(i)
1345
1346 ! is the atom electrostatic? See if it would have an
1347 ! electrostatic interaction with itself
1348 iHash = InteractionHash(me_i,me_i)
1349
1350 if ( iand(iHash, ELECTROSTATIC_PAIR).ne.0 ) then
1351
1352 ! loop over the excludes to accumulate charge in the
1353 ! cutoff sphere that we've left out of the normal pair loop
1354 skch = 0.0_dp
1355
1356 do i1 = 1, nSkipsForLocalAtom(i)
1357 j = skipsForLocalAtom(i, i1)
1358 me_j = atid(j)
1359 jHash = InteractionHash(me_i,me_j)
1360 if ( iand(jHash, ELECTROSTATIC_PAIR).ne.0 ) then
1361 skch = skch + getCharge(me_j)
1362 endif
1363 enddo
1364
1365 #ifdef IS_MPI
1366 call self_self(i, eFrame, skch, pot_local(ELECTROSTATIC_POT), t)
1367 #else
1368 call self_self(i, eFrame, skch, pot(ELECTROSTATIC_POT), t)
1369 #endif
1370 endif
1371
1372
1373 if (electrostaticSummationMethod.eq.REACTION_FIELD) then
1374
1375 ! loop over the excludes to accumulate RF stuff we've
1376 ! left out of the normal pair loop
1377
1378 do i1 = 1, nSkipsForLocalAtom(i)
1379 j = skipsForLocalAtom(i, i1)
1380
1381 ! prevent overcounting of the skips
1382 if (i.lt.j) then
1383 call get_interatomic_vector(q(:,i), q(:,j), d_atm, ratmsq)
1384 rVal = sqrt(ratmsq)
1385 call get_switch(ratmsq, sw, dswdr, rVal,in_switching_region)
1386 #ifdef IS_MPI
1387 call rf_self_excludes(i, j, sw, 1.0_dp, eFrame, d_atm, rVal, &
1388 vpair, pot_local(ELECTROSTATIC_POT), f, t)
1389 #else
1390 call rf_self_excludes(i, j, sw, 1.0_dp, eFrame, d_atm, rVal, &
1391 vpair, pot(ELECTROSTATIC_POT), f, t)
1392 #endif
1393 endif
1394 enddo
1395 endif
1396
1397 if (do_box_dipole) then
1398 #ifdef IS_MPI
1399 call accumulate_box_dipole(i, eFrame, q(:,i), pChg_local, &
1400 nChg_local, pChgPos_local, nChgPos_local, dipVec_local, &
1401 pChgCount_local, nChgCount_local)
1402 #else
1403 call accumulate_box_dipole(i, eFrame, q(:,i), pChg, nChg, &
1404 pChgPos, nChgPos, dipVec, pChgCount, nChgCount)
1405 #endif
1406 endif
1407 enddo
1408 endif
1409
1410 #ifdef IS_MPI
1411 #ifdef SINGLE_PRECISION
1412 call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_real,mpi_sum, &
1413 mpi_comm_world,mpi_err)
1414 #else
1415 call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_double_precision, &
1416 mpi_sum, mpi_comm_world,mpi_err)
1417 #endif
1418
1419 if (do_box_dipole) then
1420
1421 #ifdef SINGLE_PRECISION
1422 call mpi_allreduce(pChg_local, pChg, 1, mpi_real, mpi_sum, &
1423 mpi_comm_world, mpi_err)
1424 call mpi_allreduce(nChg_local, nChg, 1, mpi_real, mpi_sum, &
1425 mpi_comm_world, mpi_err)
1426 call mpi_allreduce(pChgCount_local, pChgCount, 1, mpi_integer, mpi_sum,&
1427 mpi_comm_world, mpi_err)
1428 call mpi_allreduce(nChgCount_local, nChgCount, 1, mpi_integer, mpi_sum,&
1429 mpi_comm_world, mpi_err)
1430 call mpi_allreduce(pChgPos_local, pChgPos, 3, mpi_real, mpi_sum, &
1431 mpi_comm_world, mpi_err)
1432 call mpi_allreduce(nChgPos_local, nChgPos, 3, mpi_real, mpi_sum, &
1433 mpi_comm_world, mpi_err)
1434 call mpi_allreduce(dipVec_local, dipVec, 3, mpi_real, mpi_sum, &
1435 mpi_comm_world, mpi_err)
1436 #else
1437 call mpi_allreduce(pChg_local, pChg, 1, mpi_double_precision, mpi_sum, &
1438 mpi_comm_world, mpi_err)
1439 call mpi_allreduce(nChg_local, nChg, 1, mpi_double_precision, mpi_sum, &
1440 mpi_comm_world, mpi_err)
1441 call mpi_allreduce(pChgCount_local, pChgCount, 1, mpi_integer,&
1442 mpi_sum, mpi_comm_world, mpi_err)
1443 call mpi_allreduce(nChgCount_local, nChgCount, 1, mpi_integer,&
1444 mpi_sum, mpi_comm_world, mpi_err)
1445 call mpi_allreduce(pChgPos_local, pChgPos, 3, mpi_double_precision, &
1446 mpi_sum, mpi_comm_world, mpi_err)
1447 call mpi_allreduce(nChgPos_local, nChgPos, 3, mpi_double_precision, &
1448 mpi_sum, mpi_comm_world, mpi_err)
1449 call mpi_allreduce(dipVec_local, dipVec, 3, mpi_double_precision, &
1450 mpi_sum, mpi_comm_world, mpi_err)
1451 #endif
1452
1453 endif
1454
1455 #endif
1456
1457 if (do_box_dipole) then
1458 ! first load the accumulated dipole moment (if dipoles were present)
1459 boxDipole(1) = dipVec(1)
1460 boxDipole(2) = dipVec(2)
1461 boxDipole(3) = dipVec(3)
1462
1463 ! now include the dipole moment due to charges
1464 ! use the lesser of the positive and negative charge totals
1465 if (nChg .le. pChg) then
1466 chg_value = nChg
1467 else
1468 chg_value = pChg
1469 endif
1470
1471 ! find the average positions
1472 if (pChgCount .gt. 0 .and. nChgCount .gt. 0) then
1473 pChgPos = pChgPos / pChgCount
1474 nChgPos = nChgPos / nChgCount
1475 endif
1476
1477 ! dipole is from the negative to the positive (physics notation)
1478 chgVec(1) = pChgPos(1) - nChgPos(1)
1479 chgVec(2) = pChgPos(2) - nChgPos(2)
1480 chgVec(3) = pChgPos(3) - nChgPos(3)
1481
1482 boxDipole(1) = boxDipole(1) + chgVec(1) * chg_value
1483 boxDipole(2) = boxDipole(2) + chgVec(2) * chg_value
1484 boxDipole(3) = boxDipole(3) + chgVec(3) * chg_value
1485
1486 endif
1487
1488 end subroutine do_force_loop
1489
1490 subroutine do_pair(i, j, rijsq, d, sw, &
1491 eFrame, A, f, t, pot, particle_pot, vpair, &
1492 fpair, d_grp, r_grp, rCut, topoDist)
1493
1494 real( kind = dp ) :: vpair, sw
1495 real( kind = dp ), dimension(LR_POT_TYPES) :: pot
1496 real( kind = dp ), dimension(nLocal) :: particle_pot
1497 real( kind = dp ), dimension(3) :: fpair
1498 real( kind = dp ), dimension(nLocal) :: mfact
1499 real( kind = dp ), dimension(9,nLocal) :: eFrame
1500 real( kind = dp ), dimension(9,nLocal) :: A
1501 real( kind = dp ), dimension(3,nLocal) :: f
1502 real( kind = dp ), dimension(3,nLocal) :: t
1503
1504 integer, intent(in) :: i, j
1505 real ( kind = dp ), intent(inout) :: rijsq
1506 real ( kind = dp ), intent(inout) :: r_grp
1507 real ( kind = dp ), intent(inout) :: d(3)
1508 real ( kind = dp ), intent(inout) :: d_grp(3)
1509 real ( kind = dp ), intent(inout) :: rCut
1510 integer, intent(inout) :: topoDist
1511 real ( kind = dp ) :: r, pair_pot, vdwMult, electroMult
1512 real ( kind = dp ) :: a_k, b_k, c_k, d_k, dx
1513
1514 real( kind = dp), dimension(3) :: f1, t1, t2
1515 real( kind = dp), dimension(9) :: A1, A2, eF1, eF2
1516 real( kind = dp) :: dfrhodrho_i, dfrhodrho_j
1517 real( kind = dp) :: rho_i, rho_j
1518 real( kind = dp) :: fshift_i, fshift_j
1519 real( kind = dp) :: p_vdw, p_elect, p_hb, p_met
1520 integer :: atid_i, atid_j, id1, id2, idx
1521 integer :: k
1522
1523 integer :: iHash
1524
1525 r = sqrt(rijsq)
1526
1527 vpair = 0.0_dp
1528 fpair(1:3) = 0.0_dp
1529
1530 p_vdw = 0.0
1531 p_elect = 0.0
1532 p_hb = 0.0
1533 p_met = 0.0
1534
1535 f1(1:3) = 0.0
1536
1537 #ifdef IS_MPI
1538 atid_i = atid_row(i)
1539 atid_j = atid_col(j)
1540 #else
1541 atid_i = atid(i)
1542 atid_j = atid(j)
1543 #endif
1544
1545 iHash = InteractionHash(atid_i, atid_j)
1546
1547 vdwMult = vdwScale(topoDist)
1548 electroMult = electrostaticScale(topoDist)
1549
1550 if ( iand(iHash, LJ_PAIR).ne.0 ) then
1551 call do_lj_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1552 p_vdw, f1)
1553 endif
1554
1555 if ( iand(iHash, ELECTROSTATIC_PAIR).ne.0 ) then
1556 #ifdef IS_MPI
1557 call doElectrostaticPair(atid_i, atid_j, d, r, rijsq, rcut, sw, electroMult, &
1558 vpair, fpair, p_elect, eFrame_Row(:,i), eFrame_Col(:,j), &
1559 f1, t_Row(:,i), t_Col(:,j))
1560 #else
1561 call doElectrostaticPair(atid_i, atid_j, d, r, rijsq, rcut, sw, electroMult, &
1562 vpair, fpair, p_elect, eFrame(:,i), eFrame(:,j), f1, t(:,i), t(:,j))
1563 #endif
1564 endif
1565
1566 if ( iand(iHash, STICKY_PAIR).ne.0 ) then
1567 #ifdef IS_MPI
1568 call do_sticky_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1569 p_hb, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1570 #else
1571 call do_sticky_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1572 p_hb, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1573 #endif
1574 endif
1575
1576 if ( iand(iHash, STICKYPOWER_PAIR).ne.0 ) then
1577 #ifdef IS_MPI
1578 call do_sticky_power_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1579 p_hb, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1580 #else
1581 call do_sticky_power_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1582 p_hb, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1583 #endif
1584 endif
1585
1586 if ( iand(iHash, GAYBERNE_PAIR).ne.0 ) then
1587 #ifdef IS_MPI
1588 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1589 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1590 #else
1591 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1592 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1593 #endif
1594 endif
1595
1596 if ( iand(iHash, GAYBERNE_LJ).ne.0 ) then
1597 #ifdef IS_MPI
1598 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1599 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1600 #else
1601 call do_gb_pair(atid_i, atid_j, d, r, rijsq, sw, vdwMult, vpair, fpair, &
1602 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1603 #endif
1604 endif
1605
1606 if ( iand(iHash, SHAPE_PAIR).ne.0 ) then
1607 #ifdef IS_MPI
1608 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1609 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1610 #else
1611 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1612 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1613 #endif
1614 endif
1615
1616 if ( iand(iHash, SHAPE_LJ).ne.0 ) then
1617 #ifdef IS_MPI
1618 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1619 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1620 #else
1621 call do_shape_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, fpair, &
1622 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1623 #endif
1624 endif
1625
1626 if ( iand(iHash, EAM_PAIR).ne.0 ) then
1627 #ifdef IS_MPI
1628 call do_eam_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1629 fpair, p_met, f1, rho_row(i), rho_col(j), dfrhodrho_row(i), dfrhodrho_col(j), &
1630 fshift_i, fshift_j)
1631 #else
1632 call do_eam_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1633 fpair, p_met, f1, rho(i), rho(j), dfrhodrho(i), dfrhodrho(j), fshift_i, fshift_j)
1634 #endif
1635 endif
1636
1637 if ( iand(iHash, SC_PAIR).ne.0 ) then
1638 #ifdef IS_MPI
1639 call do_SC_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1640 fpair, p_met, f1, rho_row(i), rho_col(j), dfrhodrho_row(i), dfrhodrho_col(j), &
1641 fshift_i, fshift_j)
1642 #else
1643 call do_SC_pair(atid_i, atid_j, d, r, rijsq, sw, vpair, &
1644 fpair, p_met, f1, rho(i), rho(j), dfrhodrho(i), dfrhodrho(j), fshift_i, fshift_j)
1645 #endif
1646 endif
1647
1648 if ( iand(iHash, MNM_PAIR).ne.0 ) then
1649 #ifdef IS_MPI
1650 call do_mnm_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1651 p_vdw, A_Row(:,i), A_Col(:,j), f1, t_Row(:,i), t_Col(:,j))
1652 #else
1653 call do_mnm_pair(atid_i, atid_j, d, r, rijsq, rcut, sw, vdwMult, vpair, fpair, &
1654 p_vdw, A(:,i), A(:,j), f1, t(:,i), t(:,j))
1655 #endif
1656 endif
1657
1658
1659 #ifdef IS_MPI
1660 id1 = AtomRowToGlobal(i)
1661 id2 = AtomColToGlobal(j)
1662
1663 pot_row(VDW_POT,i) = pot_row(VDW_POT,i) + 0.5*p_vdw
1664 pot_col(VDW_POT,j) = pot_col(VDW_POT,j) + 0.5*p_vdw
1665 pot_row(ELECTROSTATIC_POT,i) = pot_row(ELECTROSTATIC_POT,i) + 0.5*p_elect
1666 pot_col(ELECTROSTATIC_POT,j) = pot_col(ELECTROSTATIC_POT,j) + 0.5*p_elect
1667 pot_row(HB_POT,i) = pot_row(HB_POT,i) + 0.5*p_hb
1668 pot_col(HB_POT,j) = pot_col(HB_POT,j) + 0.5*p_hb
1669 pot_Row(METALLIC_POT,i) = pot_Row(METALLIC_POT,i) + 0.5*p_met
1670 pot_Col(METALLIC_POT,j) = pot_Col(METALLIC_POT,j) + 0.5*p_met
1671
1672 do idx = 1, 3
1673 f_Row(idx,i) = f_Row(idx,i) + f1(idx)
1674 f_Col(idx,j) = f_Col(idx,j) - f1(idx)
1675 enddo
1676 ! particle_pot is the difference between the full potential
1677 ! and the full potential without the presence of a particular
1678 ! particle (atom1).
1679 !
1680 ! This reduces the density at other particle locations, so
1681 ! we need to recompute the density at atom2 assuming atom1
1682 ! didn't contribute. This then requires recomputing the
1683 ! density functional for atom2 as well.
1684 !
1685 ! Most of the particle_pot heavy lifting comes from the
1686 ! pair interaction, and will be handled by vpair. Parallel version.
1687
1688 if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then
1689 ppot_row(i) = ppot_row(i) - frho_row(j) + fshift_j
1690 ppot_col(j) = ppot_col(j) - frho_col(i) + fshift_i
1691 end if
1692
1693 #else
1694 id1 = i
1695 id2 = j
1696
1697 pot(VDW_POT) = pot(VDW_POT) + p_vdw
1698 pot(ELECTROSTATIC_POT) = pot(ELECTROSTATIC_POT) + p_elect
1699 pot(HB_POT) = pot(HB_POT) + p_hb
1700 pot(METALLIC_POT) = pot(METALLIC_POT) + p_met
1701
1702 ! only done for single processor. In Parallel, the particle_pot
1703 ! is constructed from the row and column potentials.
1704
1705 particle_pot(i) = particle_pot(i) + vpair*sw
1706 particle_pot(j) = particle_pot(j) + vpair*sw
1707
1708 do idx = 1, 3
1709 f(idx,i) = f(idx,i) + f1(idx)
1710 f(idx,j) = f(idx,j) - f1(idx)
1711 enddo
1712 ! particle_pot is the difference between the full potential
1713 ! and the full potential without the presence of a particular
1714 ! particle (atom1).
1715 !
1716 ! This reduces the density at other particle locations, so
1717 ! we need to recompute the density at atom2 assuming atom1
1718 ! didn't contribute. This then requires recomputing the
1719 ! density functional for atom2 as well.
1720 !
1721 ! Most of the particle_pot heavy lifting comes from the
1722 ! pair interaction, and will be handled by vpair. NonParallel version.
1723
1724 if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then
1725 particle_pot(i) = particle_pot(i) - frho(j) + fshift_j
1726 particle_pot(j) = particle_pot(j) - frho(i) + fshift_i
1727 end if
1728
1729
1730 #endif
1731
1732 if (molMembershipList(id1) .ne. molMembershipList(id2)) then
1733
1734 fpair(1) = fpair(1) + f1(1)
1735 fpair(2) = fpair(2) + f1(2)
1736 fpair(3) = fpair(3) + f1(3)
1737
1738 endif
1739
1740
1741 end subroutine do_pair
1742
1743 subroutine do_prepair(i, j, rijsq, d, sw, rcijsq, dc, rCut, &
1744 eFrame, A, f, t, pot)
1745
1746 real( kind = dp ) :: sw
1747 real( kind = dp ), dimension(LR_POT_TYPES) :: pot
1748 real( kind = dp ), dimension(9,nLocal) :: eFrame
1749 real (kind=dp), dimension(9,nLocal) :: A
1750 real (kind=dp), dimension(3,nLocal) :: f
1751 real (kind=dp), dimension(3,nLocal) :: t
1752
1753 integer, intent(in) :: i, j
1754 real ( kind = dp ), intent(inout) :: rijsq, rcijsq, rCut
1755 real ( kind = dp ) :: r, rc
1756 real ( kind = dp ), intent(inout) :: d(3), dc(3)
1757 real ( kind = dp ) :: rho_i_at_j, rho_j_at_i
1758 integer :: atid_i, atid_j, iHash
1759
1760 r = sqrt(rijsq)
1761
1762 #ifdef IS_MPI
1763 atid_i = atid_row(i)
1764 atid_j = atid_col(j)
1765 #else
1766 atid_i = atid(i)
1767 atid_j = atid(j)
1768 #endif
1769 rho_i_at_j = 0.0_dp
1770 rho_j_at_i = 0.0_dp
1771
1772 iHash = InteractionHash(atid_i, atid_j)
1773
1774 if ( iand(iHash, EAM_PAIR).ne.0 ) then
1775 call calc_EAM_prepair_rho(atid_i, atid_j, d, r, rijsq, rho_i_at_j, rho_j_at_i)
1776 endif
1777
1778 if ( iand(iHash, SC_PAIR).ne.0 ) then
1779 call calc_SC_prepair_rho(atid_i, atid_j, d, r, rijsq, rho_i_at_j, rho_j_at_i)
1780 endif
1781
1782 if ( iand(iHash, EAM_PAIR).ne.0 .or. iand(iHash, SC_PAIR).ne.0 ) then
1783 #ifdef IS_MPI
1784 rho_col(j) = rho_col(j) + rho_i_at_j
1785 rho_row(i) = rho_row(i) + rho_j_at_i
1786 #else
1787 rho(j) = rho(j) + rho_i_at_j
1788 rho(i) = rho(i) + rho_j_at_i
1789 #endif
1790 endif
1791
1792 end subroutine do_prepair
1793
1794
1795 subroutine do_preforce(nlocal, pot, particle_pot)
1796 integer :: nlocal
1797 real( kind = dp ),dimension(LR_POT_TYPES) :: pot
1798 real( kind = dp ),dimension(nlocal) :: particle_pot
1799 integer :: sc_err = 0
1800
1801 #ifdef IS_MPI
1802 if ((FF_uses_EAM .and. SIM_uses_EAM) .or. (FF_uses_SC .and. SIM_uses_SC)) then
1803 call scatter(rho_row,rho,plan_atom_row,sc_err)
1804 if (sc_err /= 0 ) then
1805 call handleError("do_preforce()", "Error scattering rho_row into rho")
1806 endif
1807 call scatter(rho_col,rho_tmp,plan_atom_col,sc_err)
1808 if (sc_err /= 0 ) then
1809 call handleError("do_preforce()", "Error scattering rho_col into rho")
1810 endif
1811 rho(1:nlocal) = rho(1:nlocal) + rho_tmp(1:nlocal)
1812 end if
1813 #endif
1814
1815
1816
1817 if (FF_uses_EAM .and. SIM_uses_EAM) then
1818 call calc_EAM_preforce_Frho(nlocal, pot(METALLIC_POT), particle_pot)
1819 endif
1820 if (FF_uses_SC .and. SIM_uses_SC) then
1821 call calc_SC_preforce_Frho(nlocal, pot(METALLIC_POT), particle_pot)
1822 endif
1823
1824 #ifdef IS_MPI
1825 if ((FF_uses_EAM .and. SIM_uses_EAM) .or. (FF_uses_SC .and. SIM_uses_SC)) then
1826 !! communicate f(rho) and derivatives back into row and column arrays
1827 call gather(frho,frho_row,plan_atom_row, sc_err)
1828 if (sc_err /= 0) then
1829 call handleError("do_preforce()","MPI gather frho_row failure")
1830 endif
1831 call gather(dfrhodrho,dfrhodrho_row,plan_atom_row, sc_err)
1832 if (sc_err /= 0) then
1833 call handleError("do_preforce()","MPI gather dfrhodrho_row failure")
1834 endif
1835 call gather(frho,frho_col,plan_atom_col, sc_err)
1836 if (sc_err /= 0) then
1837 call handleError("do_preforce()","MPI gather frho_col failure")
1838 endif
1839 call gather(dfrhodrho,dfrhodrho_col,plan_atom_col, sc_err)
1840 if (sc_err /= 0) then
1841 call handleError("do_preforce()","MPI gather dfrhodrho_col failure")
1842 endif
1843 end if
1844 #endif
1845
1846 end subroutine do_preforce
1847
1848
1849 subroutine get_interatomic_vector(q_i, q_j, d, r_sq)
1850
1851 real (kind = dp), dimension(3) :: q_i
1852 real (kind = dp), dimension(3) :: q_j
1853 real ( kind = dp ), intent(out) :: r_sq
1854 real( kind = dp ) :: d(3), scaled(3)
1855 real(kind=dp)::t
1856 integer i
1857
1858 d(1) = q_j(1) - q_i(1)
1859 d(2) = q_j(2) - q_i(2)
1860 d(3) = q_j(3) - q_i(3)
1861
1862 ! Wrap back into periodic box if necessary
1863 if ( SIM_uses_PBC ) then
1864
1865 if( .not.boxIsOrthorhombic ) then
1866 ! calc the scaled coordinates.
1867 ! unwrap the matmul and do things explicitly
1868 ! scaled = matmul(HmatInv, d)
1869
1870 scaled(1) = HmatInv(1,1)*d(1) + HmatInv(1,2)*d(2) + HmatInv(1,3)*d(3)
1871 scaled(2) = HmatInv(2,1)*d(1) + HmatInv(2,2)*d(2) + HmatInv(2,3)*d(3)
1872 scaled(3) = HmatInv(3,1)*d(1) + HmatInv(3,2)*d(2) + HmatInv(3,3)*d(3)
1873
1874 ! wrap the scaled coordinates (but don't use anint for speed)
1875
1876 t = scaled(1)
1877 if (t .gt. 0.0) then
1878 scaled(1) = t - floor(t + 0.5)
1879 else
1880 scaled(1) = t - ceiling(t - 0.5)
1881 endif
1882
1883 t = scaled(2)
1884 if (t .gt. 0.0) then
1885 scaled(2) = t - floor(t + 0.5)
1886 else
1887 scaled(2) = t - ceiling(t - 0.5)
1888 endif
1889
1890 t = scaled(3)
1891 if (t .gt. 0.0) then
1892 scaled(3) = t - floor(t + 0.5)
1893 else
1894 scaled(3) = t - ceiling(t - 0.5)
1895 endif
1896
1897 ! calc the wrapped real coordinates from the wrapped scaled
1898 ! coordinates
1899 ! d = matmul(Hmat,scaled)
1900 d(1)= Hmat(1,1)*scaled(1) + Hmat(1,2)*scaled(2) + Hmat(1,3)*scaled(3)
1901 d(2)= Hmat(2,1)*scaled(1) + Hmat(2,2)*scaled(2) + Hmat(2,3)*scaled(3)
1902 d(3)= Hmat(3,1)*scaled(1) + Hmat(3,2)*scaled(2) + Hmat(3,3)*scaled(3)
1903
1904 else
1905 ! calc the scaled coordinates
1906 scaled(1) = d(1) * HmatInv(1,1)
1907 scaled(2) = d(2) * HmatInv(2,2)
1908 scaled(3) = d(3) * HmatInv(3,3)
1909
1910 ! wrap the scaled coordinates
1911
1912 t = scaled(1)
1913 if (t .gt. 0.0) then
1914 scaled(1) = t - floor(t + 0.5)
1915 else
1916 scaled(1) = t - ceiling(t - 0.5)
1917 endif
1918
1919 t = scaled(2)
1920 if (t .gt. 0.0) then
1921 scaled(2) = t - floor(t + 0.5)
1922 else
1923 scaled(2) = t - ceiling(t - 0.5)
1924 endif
1925
1926 t = scaled(3)
1927 if (t .gt. 0.0) then
1928 scaled(3) = t - floor(t + 0.5)
1929 else
1930 scaled(3) = t - ceiling(t - 0.5)
1931 endif
1932
1933 ! calc the wrapped real coordinates from the wrapped scaled
1934 ! coordinates
1935
1936 d(1) = scaled(1)*Hmat(1,1)
1937 d(2) = scaled(2)*Hmat(2,2)
1938 d(3) = scaled(3)*Hmat(3,3)
1939
1940 endif
1941
1942 endif
1943
1944 r_sq = d(1)*d(1) + d(2)*d(2) + d(3)*d(3)
1945
1946 end subroutine get_interatomic_vector
1947
1948 subroutine zero_work_arrays()
1949
1950 #ifdef IS_MPI
1951
1952 q_Row = 0.0_dp
1953 q_Col = 0.0_dp
1954
1955 q_group_Row = 0.0_dp
1956 q_group_Col = 0.0_dp
1957
1958 eFrame_Row = 0.0_dp
1959 eFrame_Col = 0.0_dp
1960
1961 A_Row = 0.0_dp
1962 A_Col = 0.0_dp
1963
1964 f_Row = 0.0_dp
1965 f_Col = 0.0_dp
1966 f_Temp = 0.0_dp
1967
1968 t_Row = 0.0_dp
1969 t_Col = 0.0_dp
1970 t_Temp = 0.0_dp
1971
1972 pot_Row = 0.0_dp
1973 pot_Col = 0.0_dp
1974 pot_Temp = 0.0_dp
1975 ppot_Temp = 0.0_dp
1976
1977 frho_row = 0.0_dp
1978 frho_col = 0.0_dp
1979 rho_row = 0.0_dp
1980 rho_col = 0.0_dp
1981 rho_tmp = 0.0_dp
1982 dfrhodrho_row = 0.0_dp
1983 dfrhodrho_col = 0.0_dp
1984
1985 #endif
1986 rho = 0.0_dp
1987 frho = 0.0_dp
1988 dfrhodrho = 0.0_dp
1989
1990 end subroutine zero_work_arrays
1991
1992 function skipThisPair(atom1, atom2) result(skip_it)
1993 integer, intent(in) :: atom1
1994 integer, intent(in), optional :: atom2
1995 logical :: skip_it
1996 integer :: unique_id_1, unique_id_2
1997 integer :: me_i,me_j
1998 integer :: i
1999
2000 skip_it = .false.
2001
2002 !! there are a number of reasons to skip a pair or a particle
2003 !! mostly we do this to exclude atoms who are involved in short
2004 !! range interactions (bonds, bends, torsions), but we also need
2005 !! to exclude some overcounted interactions that result from
2006 !! the parallel decomposition
2007
2008 #ifdef IS_MPI
2009 !! in MPI, we have to look up the unique IDs for each atom
2010 unique_id_1 = AtomRowToGlobal(atom1)
2011 unique_id_2 = AtomColToGlobal(atom2)
2012 !! this situation should only arise in MPI simulations
2013 if (unique_id_1 == unique_id_2) then
2014 skip_it = .true.
2015 return
2016 end if
2017
2018 !! this prevents us from doing the pair on multiple processors
2019 if (unique_id_1 < unique_id_2) then
2020 if (mod(unique_id_1 + unique_id_2,2) == 0) then
2021 skip_it = .true.
2022 return
2023 endif
2024 else
2025 if (mod(unique_id_1 + unique_id_2,2) == 1) then
2026 skip_it = .true.
2027 return
2028 endif
2029 endif
2030 #else
2031 !! in the normal loop, the atom numbers are unique
2032 unique_id_1 = atom1
2033 unique_id_2 = atom2
2034 #endif
2035
2036 #ifdef IS_MPI
2037 do i = 1, nSkipsForRowAtom(atom1)
2038 if (skipsForRowAtom(atom1, i) .eq. unique_id_2) then
2039 skip_it = .true.
2040 return
2041 endif
2042 end do
2043 #else
2044 do i = 1, nSkipsForLocalAtom(atom1)
2045 if (skipsForLocalAtom(atom1, i) .eq. unique_id_2) then
2046 skip_it = .true.
2047 return
2048 endif
2049 end do
2050 #endif
2051
2052 return
2053 end function skipThisPair
2054
2055 function getTopoDistance(atom1, atom2) result(topoDist)
2056 integer, intent(in) :: atom1
2057 integer, intent(in) :: atom2
2058 integer :: topoDist
2059 integer :: unique_id_2
2060 integer :: i
2061
2062 #ifdef IS_MPI
2063 unique_id_2 = AtomColToGlobal(atom2)
2064 #else
2065 unique_id_2 = atom2
2066 #endif
2067
2068 ! zero is default for unconnected (i.e. normal) pair interactions
2069
2070 topoDist = 0
2071
2072 do i = 1, nTopoPairsForAtom(atom1)
2073 if (toposForAtom(atom1, i) .eq. unique_id_2) then
2074 topoDist = topoDistance(atom1, i)
2075 return
2076 endif
2077 end do
2078
2079 return
2080 end function getTopoDistance
2081
2082 function FF_UsesDirectionalAtoms() result(doesit)
2083 logical :: doesit
2084 doesit = FF_uses_DirectionalAtoms
2085 end function FF_UsesDirectionalAtoms
2086
2087 function FF_RequiresPrepairCalc() result(doesit)
2088 logical :: doesit
2089 doesit = FF_uses_EAM .or. FF_uses_SC
2090 end function FF_RequiresPrepairCalc
2091
2092 #ifdef PROFILE
2093 function getforcetime() result(totalforcetime)
2094 real(kind=dp) :: totalforcetime
2095 totalforcetime = forcetime
2096 end function getforcetime
2097 #endif
2098
2099 !! This cleans componets of force arrays belonging only to fortran
2100
2101 subroutine add_stress_tensor(dpair, fpair, tau, v_j, J_v)
2102
2103 real( kind = dp ), dimension(3), intent(in) :: dpair, fpair, v_j
2104 real( kind = dp ), dimension(9), intent(inout) :: tau
2105
2106 real( kind = dp ), dimension(3), intent(inout) :: J_v
2107 ! because the d vector is the rj - ri vector, and
2108 ! because fx, fy, fz are the force on atom i, we need a
2109 ! negative sign here:
2110
2111 tau(1) = tau(1) - dpair(1) * fpair(1)
2112 tau(2) = tau(2) - dpair(1) * fpair(2)
2113 tau(3) = tau(3) - dpair(1) * fpair(3)
2114 tau(4) = tau(4) - dpair(2) * fpair(1)
2115 tau(5) = tau(5) - dpair(2) * fpair(2)
2116 tau(6) = tau(6) - dpair(2) * fpair(3)
2117 tau(7) = tau(7) - dpair(3) * fpair(1)
2118 tau(8) = tau(8) - dpair(3) * fpair(2)
2119 tau(9) = tau(9) - dpair(3) * fpair(3)
2120
2121 J_v(1) = J_v(1) - tau(1)*v_j(1) + tau(4)*v_j(2) + tau(7)*v_j(3)
2122 J_v(2) = J_v(2) - tau(2)*v_j(1) + tau(5)*v_j(2) + tau(8)*v_j(3)
2123 J_v(3) = J_v(3) - tau(3)*v_j(1) + tau(6)*v_j(2) + tau(9)*v_j(3)
2124 end subroutine add_stress_tensor
2125
2126 !! Calculates the \sum r_ji*(f_ji *DOT* vj) component of the heat flux S
2127 subroutine add_heat_flux(dpair, fpair, v_j, S)
2128 real(kind=dp), dimension(3), intent(in) :: dpair,fpair, v_j
2129 real(kind=dp), dimension(3), intent(inout) :: S
2130 S(1) = S(1) + dpair(1) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2131 S(2) = S(2) + dpair(2) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2132 S(3) = S(3) + dpair(3) * (fpair(1)*v_j(1) + fpair(2)*v_j(2) + fpair(3)*v_j(3))
2133 !!S(1) = S(1) + fpair(1)*v_j(1)*dpair(1)
2134 !!S(2) = S(2) + fpair(2)*v_j(2)*dpair(2)
2135 !!S(3) = S(3) + fpair(3)*v_j(3)*dpair(3)
2136
2137 end subroutine add_heat_flux
2138 end module doForces

Properties

Name Value
svn:keywords Author Id Revision Date