1 |
!! |
2 |
!! Copyright (c) 2005, 2009 The University of Notre Dame. All Rights Reserved. |
3 |
!! |
4 |
!! The University of Notre Dame grants you ("Licensee") a |
5 |
!! non-exclusive, royalty free, license to use, modify and |
6 |
!! redistribute this software in source and binary code form, provided |
7 |
!! that the following conditions are met: |
8 |
!! |
9 |
!! 1. Redistributions of source code must retain the above copyright |
10 |
!! notice, this list of conditions and the following disclaimer. |
11 |
!! |
12 |
!! 2. Redistributions in binary form must reproduce the above copyright |
13 |
!! notice, this list of conditions and the following disclaimer in the |
14 |
!! documentation and/or other materials provided with the |
15 |
!! distribution. |
16 |
!! |
17 |
!! This software is provided "AS IS," without a warranty of any |
18 |
!! kind. All express or implied conditions, representations and |
19 |
!! warranties, including any implied warranty of merchantability, |
20 |
!! fitness for a particular purpose or non-infringement, are hereby |
21 |
!! excluded. The University of Notre Dame and its licensors shall not |
22 |
!! be liable for any damages suffered by licensee as a result of |
23 |
!! using, modifying or distributing the software or its |
24 |
!! derivatives. In no event will the University of Notre Dame or its |
25 |
!! licensors be liable for any lost revenue, profit or data, or for |
26 |
!! direct, indirect, special, consequential, incidental or punitive |
27 |
!! damages, however caused and regardless of the theory of liability, |
28 |
!! arising out of the use of or inability to use software, even if the |
29 |
!! University of Notre Dame has been advised of the possibility of |
30 |
!! such damages. |
31 |
!! |
32 |
!! SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
33 |
!! research, please cite the appropriate papers when you publish your |
34 |
!! work. Good starting points are: |
35 |
!! |
36 |
!! [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
!! [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
!! [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
!! [4] Vardeman & Gezelter, in progress (2009). |
40 |
!! |
41 |
|
42 |
!! doForces.F90 |
43 |
!! module doForces |
44 |
!! Calculates Long Range forces. |
45 |
|
46 |
!! @author Charles F. Vardeman II |
47 |
!! @author Matthew Meineke |
48 |
!! @version $Id$, $Date$, $Name: not supported by cvs2svn $, $Revision$ |
49 |
|
50 |
|
51 |
module doForces |
52 |
use force_globals |
53 |
use simulation |
54 |
use definitions |
55 |
use neighborLists |
56 |
use status |
57 |
use ISO_C_BINDING |
58 |
|
59 |
#ifdef IS_MPI |
60 |
use mpiSimulation |
61 |
#endif |
62 |
|
63 |
implicit none |
64 |
PRIVATE |
65 |
|
66 |
real(kind=dp), external :: get_cutoff |
67 |
|
68 |
#define __FORTRAN90 |
69 |
#include "UseTheForce/DarkSide/fInteractionMap.h" |
70 |
|
71 |
INTEGER, PARAMETER:: PREPAIR_LOOP = 1 |
72 |
INTEGER, PARAMETER:: PAIR_LOOP = 2 |
73 |
|
74 |
logical, save :: haveNeighborList = .false. |
75 |
logical, save :: haveSIMvariables = .false. |
76 |
logical, save :: haveCutoffs = .false. |
77 |
logical, save :: haveSkinThickness = .false. |
78 |
|
79 |
logical, save :: SIM_uses_DirectionalAtoms |
80 |
logical, save :: SIM_uses_MetallicAtoms |
81 |
logical, save :: SIM_requires_skip_correction |
82 |
logical, save :: SIM_requires_self_correction |
83 |
logical, save :: SIM_uses_PBC |
84 |
logical, save :: SIM_uses_AtomicVirial |
85 |
|
86 |
real(kind=dp), save :: rCut, rSwitch, rList, rListSq, rCutSq |
87 |
real(kind=dp), save :: skinThickness |
88 |
logical, save :: shifted_pot |
89 |
logical, save :: shifted_force |
90 |
|
91 |
public :: init_FF |
92 |
public :: setCutoffs |
93 |
public :: setSkinThickness |
94 |
public :: do_force_loop |
95 |
|
96 |
#ifdef PROFILE |
97 |
public :: getforcetime |
98 |
real, save :: forceTime = 0 |
99 |
real :: forceTimeInitial, forceTimeFinal |
100 |
integer :: nLoops |
101 |
#endif |
102 |
|
103 |
contains |
104 |
|
105 |
subroutine setCutoffs(rc, rsw, sp, sf) |
106 |
|
107 |
real(kind=dp),intent(in) :: rc, rsw |
108 |
integer, intent(in) :: sp, sf |
109 |
character(len = statusMsgSize) :: errMsg |
110 |
integer :: localError |
111 |
|
112 |
rCut = rc |
113 |
rSwitch = rsw |
114 |
rCutsq = rCut * rCut |
115 |
|
116 |
if (haveSkinThickness) then |
117 |
rList = rCut + skinThickness |
118 |
rListSq = rList * rList |
119 |
endif |
120 |
|
121 |
if (sp .ne. 0) then |
122 |
shifted_pot = .true. |
123 |
else |
124 |
shifted_pot = .false. |
125 |
endif |
126 |
if (sf .ne. 0) then |
127 |
shifted_force = .true. |
128 |
else |
129 |
shifted_force = .false. |
130 |
endif |
131 |
|
132 |
if (abs(rCut - rSwitch) .lt. 0.0001) then |
133 |
if (shifted_force) then |
134 |
write(errMsg, *) & |
135 |
'cutoffRadius and switchingRadius are set to the', newline & |
136 |
// tab, 'same value. OpenMD will use shifted force', newline & |
137 |
// tab, 'potentials instead of switching functions.' |
138 |
|
139 |
call handleInfo("setCutoffs", errMsg) |
140 |
else |
141 |
write(errMsg, *) & |
142 |
'cutoffRadius and switchingRadius are set to the', newline & |
143 |
// tab, 'same value. OpenMD will use shifted', newline & |
144 |
// tab, 'potentials instead of switching functions.' |
145 |
|
146 |
call handleInfo("setCutoffs", errMsg) |
147 |
|
148 |
shifted_pot = .true. |
149 |
endif |
150 |
|
151 |
endif |
152 |
|
153 |
localError = 0 |
154 |
call setHmatDangerousRcutValue(rCut) |
155 |
|
156 |
haveCutoffs = .true. |
157 |
end subroutine setCutoffs |
158 |
|
159 |
subroutine setSkinThickness( thisSkin ) |
160 |
|
161 |
real(kind=dp), intent(in) :: thisSkin |
162 |
|
163 |
skinThickness = thisSkin |
164 |
haveSkinThickness = .true. |
165 |
|
166 |
if (haveCutoffs) then |
167 |
rList = rCut + skinThickness |
168 |
rListSq = rList * rList |
169 |
endif |
170 |
|
171 |
end subroutine setSkinThickness |
172 |
|
173 |
subroutine setSimVariables() |
174 |
SIM_uses_DirectionalAtoms = SimUsesDirectionalAtoms() |
175 |
SIM_uses_MetallicAtoms = SimUsesMetallicAtoms() |
176 |
SIM_requires_skip_correction = SimRequiresSkipCorrection() |
177 |
SIM_requires_self_correction = SimRequiresSelfCorrection() |
178 |
SIM_uses_PBC = SimUsesPBC() |
179 |
SIM_uses_AtomicVirial = SimUsesAtomicVirial() |
180 |
|
181 |
haveSIMvariables = .true. |
182 |
return |
183 |
end subroutine setSimVariables |
184 |
|
185 |
subroutine doReadyCheck(error) |
186 |
integer, intent(out) :: error |
187 |
integer :: myStatus |
188 |
|
189 |
error = 0 |
190 |
|
191 |
if (.not. haveSIMvariables) then |
192 |
call setSimVariables() |
193 |
endif |
194 |
|
195 |
if (.not. haveCutoffs) then |
196 |
write(default_error, *) 'cutoffs have not been set in doForces!' |
197 |
error = -1 |
198 |
return |
199 |
endif |
200 |
|
201 |
if (.not. haveSkinThickness) then |
202 |
write(default_error, *) 'skin thickness has not been set in doForces!' |
203 |
error = -1 |
204 |
return |
205 |
endif |
206 |
|
207 |
if (.not. haveNeighborList) then |
208 |
write(default_error, *) 'neighbor list has not been initialized in doForces!' |
209 |
error = -1 |
210 |
return |
211 |
end if |
212 |
|
213 |
|
214 |
#ifdef IS_MPI |
215 |
if (.not. isMPISimSet()) then |
216 |
write(default_error,*) "ERROR: mpiSimulation has not been initialized!" |
217 |
error = -1 |
218 |
return |
219 |
endif |
220 |
#endif |
221 |
return |
222 |
end subroutine doReadyCheck |
223 |
|
224 |
subroutine init_FF(thisStat) |
225 |
|
226 |
integer :: my_status |
227 |
integer, intent(out) :: thisStat |
228 |
|
229 |
!! assume things are copacetic, unless they aren't |
230 |
thisStat = 0 |
231 |
|
232 |
if (.not. haveNeighborList) then |
233 |
!! Create neighbor lists |
234 |
call expandNeighborList(nLocal, my_status) |
235 |
if (my_Status /= 0) then |
236 |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
237 |
thisStat = -1 |
238 |
return |
239 |
endif |
240 |
haveNeighborList = .true. |
241 |
endif |
242 |
|
243 |
end subroutine init_FF |
244 |
|
245 |
|
246 |
!! Does force loop over i,j pairs. Calls do_pair to calculates forces. |
247 |
!-------------------------------------------------------------> |
248 |
subroutine do_force_loop(q, q_group, A, eFrame, f, t, tau, pot, particle_pot, & |
249 |
error) |
250 |
!! Position array provided by C, dimensioned by getNlocal |
251 |
real ( kind = dp ), dimension(3, nLocal) :: q |
252 |
!! molecular center-of-mass position array |
253 |
real ( kind = dp ), dimension(3, nGroups) :: q_group |
254 |
!! Rotation Matrix for each long range particle in simulation. |
255 |
real( kind = dp), dimension(9, nLocal) :: A |
256 |
!! Unit vectors for dipoles (lab frame) |
257 |
real( kind = dp ), dimension(9,nLocal) :: eFrame |
258 |
!! Force array provided by C, dimensioned by getNlocal |
259 |
real ( kind = dp ), dimension(3,nLocal) :: f |
260 |
!! Torsion array provided by C, dimensioned by getNlocal |
261 |
real( kind = dp ), dimension(3,nLocal) :: t |
262 |
|
263 |
!! Stress Tensor |
264 |
real( kind = dp), dimension(9) :: tau |
265 |
real ( kind = dp ),dimension(LR_POT_TYPES) :: pot |
266 |
real( kind = dp ), dimension(nLocal) :: particle_pot |
267 |
real( kind = dp ), dimension(nLocal) :: skipped_charge |
268 |
|
269 |
logical :: in_switching_region |
270 |
#ifdef IS_MPI |
271 |
real( kind = DP ), dimension(LR_POT_TYPES) :: pot_local |
272 |
integer :: nAtomsInRow |
273 |
integer :: nAtomsInCol |
274 |
integer :: nprocs |
275 |
integer :: nGroupsInRow |
276 |
integer :: nGroupsInCol |
277 |
#endif |
278 |
integer :: natoms |
279 |
logical :: update_nlist |
280 |
integer :: i, j, jstart, jend, jnab |
281 |
integer :: istart, iend |
282 |
integer :: ia, jb, atom1, atom2 |
283 |
integer :: nlist |
284 |
real( kind = DP ) :: ratmsq, rgrpsq, rgrp, rag, vpair, vij |
285 |
real( kind = DP ) :: sw, dswdr, swderiv, mf |
286 |
real( kind = DP ) :: rVal |
287 |
real(kind=dp),dimension(3) :: d_atm, d_grp, fpair, fij, fg, dag |
288 |
real(kind=dp) :: rfpot, mu_i |
289 |
real(kind=dp):: rCut |
290 |
integer :: n_in_i, n_in_j, iG, j1 |
291 |
logical :: is_dp_i |
292 |
integer :: neighborListSize |
293 |
integer :: listerror, error |
294 |
integer :: localError |
295 |
integer :: propPack_i, propPack_j |
296 |
integer :: loopStart, loopEnd, loop |
297 |
integer :: i1, topoDist |
298 |
|
299 |
real(kind=dp) :: skch |
300 |
|
301 |
!! initialize local variables |
302 |
|
303 |
#ifdef IS_MPI |
304 |
pot_local = 0.0_dp |
305 |
nAtomsInRow = getNatomsInRow(plan_atom_row) |
306 |
nAtomsInCol = getNatomsInCol(plan_atom_col) |
307 |
nGroupsInRow = getNgroupsInRow(plan_group_row) |
308 |
nGroupsInCol = getNgroupsInCol(plan_group_col) |
309 |
#else |
310 |
natoms = nlocal |
311 |
#endif |
312 |
|
313 |
call doReadyCheck(localError) |
314 |
if ( localError .ne. 0 ) then |
315 |
call handleError("do_force_loop", "Not Initialized") |
316 |
error = -1 |
317 |
return |
318 |
end if |
319 |
call zero_work_arrays() |
320 |
|
321 |
! Gather all information needed by all force loops: |
322 |
|
323 |
#ifdef IS_MPI |
324 |
|
325 |
call gather(q, q_Row, plan_atom_row_3d) |
326 |
call gather(q, q_Col, plan_atom_col_3d) |
327 |
|
328 |
call gather(q_group, q_group_Row, plan_group_row_3d) |
329 |
call gather(q_group, q_group_Col, plan_group_col_3d) |
330 |
|
331 |
if (SIM_uses_DirectionalAtoms) then |
332 |
call gather(eFrame, eFrame_Row, plan_atom_row_rotation) |
333 |
call gather(eFrame, eFrame_Col, plan_atom_col_rotation) |
334 |
|
335 |
call gather(A, A_Row, plan_atom_row_rotation) |
336 |
call gather(A, A_Col, plan_atom_col_rotation) |
337 |
endif |
338 |
|
339 |
#endif |
340 |
|
341 |
!! Begin force loop timing: |
342 |
#ifdef PROFILE |
343 |
call cpu_time(forceTimeInitial) |
344 |
nloops = nloops + 1 |
345 |
#endif |
346 |
|
347 |
loopEnd = PAIR_LOOP |
348 |
if (SIM_uses_MetallicAtoms) then |
349 |
loopStart = PREPAIR_LOOP |
350 |
else |
351 |
loopStart = PAIR_LOOP |
352 |
endif |
353 |
|
354 |
do loop = loopStart, loopEnd |
355 |
|
356 |
! See if we need to update neighbor lists |
357 |
! (but only on the first time through): |
358 |
if (loop .eq. loopStart) then |
359 |
#ifdef IS_MPI |
360 |
call checkNeighborList(nGroupsInRow, q_group_row, skinThickness, & |
361 |
update_nlist) |
362 |
#else |
363 |
call checkNeighborList(nGroups, q_group, skinThickness, & |
364 |
update_nlist) |
365 |
#endif |
366 |
endif |
367 |
|
368 |
if (update_nlist) then |
369 |
!! save current configuration and construct neighbor list |
370 |
#ifdef IS_MPI |
371 |
call saveNeighborList(nGroupsInRow, q_group_row) |
372 |
#else |
373 |
call saveNeighborList(nGroups, q_group) |
374 |
#endif |
375 |
neighborListSize = size(list) |
376 |
nlist = 0 |
377 |
endif |
378 |
|
379 |
istart = 1 |
380 |
#ifdef IS_MPI |
381 |
iend = nGroupsInRow |
382 |
#else |
383 |
iend = nGroups - 1 |
384 |
#endif |
385 |
outer: do i = istart, iend |
386 |
|
387 |
if (update_nlist) point(i) = nlist + 1 |
388 |
|
389 |
n_in_i = groupStartRow(i+1) - groupStartRow(i) |
390 |
|
391 |
if (update_nlist) then |
392 |
#ifdef IS_MPI |
393 |
jstart = 1 |
394 |
jend = nGroupsInCol |
395 |
#else |
396 |
jstart = i+1 |
397 |
jend = nGroups |
398 |
#endif |
399 |
else |
400 |
jstart = point(i) |
401 |
jend = point(i+1) - 1 |
402 |
! make sure group i has neighbors |
403 |
if (jstart .gt. jend) cycle outer |
404 |
endif |
405 |
|
406 |
do jnab = jstart, jend |
407 |
if (update_nlist) then |
408 |
j = jnab |
409 |
else |
410 |
j = list(jnab) |
411 |
endif |
412 |
|
413 |
#ifdef IS_MPI |
414 |
call get_interatomic_vector(q_group_Row(:,i), & |
415 |
q_group_Col(:,j), d_grp, rgrpsq) |
416 |
#else |
417 |
call get_interatomic_vector(q_group(:,i), & |
418 |
q_group(:,j), d_grp, rgrpsq) |
419 |
#endif |
420 |
|
421 |
if (rgrpsq < rListsq) then |
422 |
if (update_nlist) then |
423 |
nlist = nlist + 1 |
424 |
|
425 |
if (nlist > neighborListSize) then |
426 |
#ifdef IS_MPI |
427 |
call expandNeighborList(nGroupsInRow, listerror) |
428 |
#else |
429 |
call expandNeighborList(nGroups, listerror) |
430 |
#endif |
431 |
if (listerror /= 0) then |
432 |
error = -1 |
433 |
write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded." |
434 |
return |
435 |
end if |
436 |
neighborListSize = size(list) |
437 |
endif |
438 |
|
439 |
list(nlist) = j |
440 |
endif |
441 |
|
442 |
if (rgrpsq < rCutsq) then |
443 |
|
444 |
if (loop .eq. PAIR_LOOP) then |
445 |
vij = 0.0_dp |
446 |
fij(1) = 0.0_dp |
447 |
fij(2) = 0.0_dp |
448 |
fij(3) = 0.0_dp |
449 |
endif |
450 |
|
451 |
call get_switch(rgrpsq, sw, dswdr,rgrp, in_switching_region) |
452 |
|
453 |
n_in_j = groupStartCol(j+1) - groupStartCol(j) |
454 |
|
455 |
do ia = groupStartRow(i), groupStartRow(i+1)-1 |
456 |
|
457 |
atom1 = groupListRow(ia) |
458 |
|
459 |
inner: do jb = groupStartCol(j), groupStartCol(j+1)-1 |
460 |
|
461 |
atom2 = groupListCol(jb) |
462 |
|
463 |
if (skipThisPair(atom1, atom2)) cycle inner |
464 |
|
465 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
466 |
d_atm(1) = d_grp(1) |
467 |
d_atm(2) = d_grp(2) |
468 |
d_atm(3) = d_grp(3) |
469 |
ratmsq = rgrpsq |
470 |
else |
471 |
#ifdef IS_MPI |
472 |
call get_interatomic_vector(q_Row(:,atom1), & |
473 |
q_Col(:,atom2), d_atm, ratmsq) |
474 |
#else |
475 |
call get_interatomic_vector(q(:,atom1), & |
476 |
q(:,atom2), d_atm, ratmsq) |
477 |
#endif |
478 |
endif |
479 |
|
480 |
topoDist = getTopoDistance(atom1, atom2) |
481 |
|
482 |
if (loop .eq. PREPAIR_LOOP) then |
483 |
#ifdef IS_MPI |
484 |
call f_do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
485 |
rgrpsq, d_grp, rCut, & |
486 |
eFrame, A, f, t, pot_local) |
487 |
#else |
488 |
call f_do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
489 |
rgrpsq, d_grp, rCut, & |
490 |
eFrame, A, f, t, pot) |
491 |
#endif |
492 |
else |
493 |
#ifdef IS_MPI |
494 |
call f_do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
495 |
eFrame, A, f, t, pot_local, particle_pot, vpair, & |
496 |
fpair, d_grp, rgrp, rCut, topoDist) |
497 |
! particle_pot will be accumulated from row & column |
498 |
! arrays later |
499 |
#else |
500 |
call f_do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
501 |
eFrame, A, f, t, pot, particle_pot, vpair, & |
502 |
fpair, d_grp, rgrp, rCut, topoDist) |
503 |
#endif |
504 |
|
505 |
vij = vij + vpair |
506 |
fij(1) = fij(1) + fpair(1) |
507 |
fij(2) = fij(2) + fpair(2) |
508 |
fij(3) = fij(3) + fpair(3) |
509 |
call add_stress_tensor(d_atm, fpair, tau) |
510 |
endif |
511 |
enddo inner |
512 |
enddo |
513 |
|
514 |
if (loop .eq. PAIR_LOOP) then |
515 |
if (in_switching_region) then |
516 |
swderiv = vij*dswdr/rgrp |
517 |
fg = swderiv*d_grp |
518 |
|
519 |
fij(1) = fij(1) + fg(1) |
520 |
fij(2) = fij(2) + fg(2) |
521 |
fij(3) = fij(3) + fg(3) |
522 |
|
523 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
524 |
call add_stress_tensor(d_atm, fg, tau) |
525 |
endif |
526 |
|
527 |
do ia=groupStartRow(i), groupStartRow(i+1)-1 |
528 |
atom1=groupListRow(ia) |
529 |
mf = mfactRow(atom1) |
530 |
! fg is the force on atom ia due to cutoff group's |
531 |
! presence in switching region |
532 |
fg = swderiv*d_grp*mf |
533 |
#ifdef IS_MPI |
534 |
f_Row(1,atom1) = f_Row(1,atom1) + fg(1) |
535 |
f_Row(2,atom1) = f_Row(2,atom1) + fg(2) |
536 |
f_Row(3,atom1) = f_Row(3,atom1) + fg(3) |
537 |
#else |
538 |
f(1,atom1) = f(1,atom1) + fg(1) |
539 |
f(2,atom1) = f(2,atom1) + fg(2) |
540 |
f(3,atom1) = f(3,atom1) + fg(3) |
541 |
#endif |
542 |
if (n_in_i .gt. 1) then |
543 |
if (SIM_uses_AtomicVirial) then |
544 |
! find the distance between the atom |
545 |
! and the center of the cutoff group: |
546 |
#ifdef IS_MPI |
547 |
call get_interatomic_vector(q_Row(:,atom1), & |
548 |
q_group_Row(:,i), dag, rag) |
549 |
#else |
550 |
call get_interatomic_vector(q(:,atom1), & |
551 |
q_group(:,i), dag, rag) |
552 |
#endif |
553 |
call add_stress_tensor(dag,fg,tau) |
554 |
endif |
555 |
endif |
556 |
enddo |
557 |
|
558 |
do jb=groupStartCol(j), groupStartCol(j+1)-1 |
559 |
atom2=groupListCol(jb) |
560 |
mf = mfactCol(atom2) |
561 |
! fg is the force on atom jb due to cutoff group's |
562 |
! presence in switching region |
563 |
fg = -swderiv*d_grp*mf |
564 |
#ifdef IS_MPI |
565 |
f_Col(1,atom2) = f_Col(1,atom2) + fg(1) |
566 |
f_Col(2,atom2) = f_Col(2,atom2) + fg(2) |
567 |
f_Col(3,atom2) = f_Col(3,atom2) + fg(3) |
568 |
#else |
569 |
f(1,atom2) = f(1,atom2) + fg(1) |
570 |
f(2,atom2) = f(2,atom2) + fg(2) |
571 |
f(3,atom2) = f(3,atom2) + fg(3) |
572 |
#endif |
573 |
if (n_in_j .gt. 1) then |
574 |
if (SIM_uses_AtomicVirial) then |
575 |
! find the distance between the atom |
576 |
! and the center of the cutoff group: |
577 |
#ifdef IS_MPI |
578 |
call get_interatomic_vector(q_Col(:,atom2), & |
579 |
q_group_Col(:,j), dag, rag) |
580 |
#else |
581 |
call get_interatomic_vector(q(:,atom2), & |
582 |
q_group(:,j), dag, rag) |
583 |
#endif |
584 |
call add_stress_tensor(dag, fg, tau) |
585 |
endif |
586 |
endif |
587 |
enddo |
588 |
endif |
589 |
!if (.not.SIM_uses_AtomicVirial) then |
590 |
! call add_stress_tensor(d_grp, fij, tau) |
591 |
!endif |
592 |
endif |
593 |
endif |
594 |
endif |
595 |
enddo |
596 |
|
597 |
enddo outer |
598 |
|
599 |
if (update_nlist) then |
600 |
#ifdef IS_MPI |
601 |
point(nGroupsInRow + 1) = nlist + 1 |
602 |
#else |
603 |
point(nGroups) = nlist + 1 |
604 |
#endif |
605 |
if (loop .eq. PREPAIR_LOOP) then |
606 |
! we just did the neighbor list update on the first |
607 |
! pass, so we don't need to do it |
608 |
! again on the second pass |
609 |
update_nlist = .false. |
610 |
endif |
611 |
endif |
612 |
|
613 |
if (loop .eq. PREPAIR_LOOP) then |
614 |
#ifdef IS_MPI |
615 |
call f_do_preforce(nlocal, pot_local, particle_pot) |
616 |
#else |
617 |
call f_do_preforce(nlocal, pot, particle_pot) |
618 |
#endif |
619 |
endif |
620 |
|
621 |
enddo |
622 |
|
623 |
!! Do timing |
624 |
#ifdef PROFILE |
625 |
call cpu_time(forceTimeFinal) |
626 |
forceTime = forceTime + forceTimeFinal - forceTimeInitial |
627 |
#endif |
628 |
|
629 |
#ifdef IS_MPI |
630 |
!!distribute forces |
631 |
|
632 |
f_temp = 0.0_dp |
633 |
call scatter(f_Row,f_temp,plan_atom_row_3d) |
634 |
do i = 1,nlocal |
635 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
636 |
end do |
637 |
|
638 |
f_temp = 0.0_dp |
639 |
call scatter(f_Col,f_temp,plan_atom_col_3d) |
640 |
do i = 1,nlocal |
641 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
642 |
end do |
643 |
|
644 |
if (SIM_uses_DirectionalAtoms) then |
645 |
t_temp = 0.0_dp |
646 |
call scatter(t_Row,t_temp,plan_atom_row_3d) |
647 |
do i = 1,nlocal |
648 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
649 |
end do |
650 |
t_temp = 0.0_dp |
651 |
call scatter(t_Col,t_temp,plan_atom_col_3d) |
652 |
|
653 |
do i = 1,nlocal |
654 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
655 |
end do |
656 |
endif |
657 |
|
658 |
! scatter/gather pot_row into the members of my column |
659 |
do i = 1,LR_POT_TYPES |
660 |
call scatter(pot_Row(i,:), pot_Temp(i,:), plan_atom_row) |
661 |
end do |
662 |
! scatter/gather pot_local into all other procs |
663 |
! add resultant to get total pot |
664 |
do i = 1, nlocal |
665 |
pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES) & |
666 |
+ pot_Temp(1:LR_POT_TYPES,i) |
667 |
enddo |
668 |
|
669 |
do i = 1,LR_POT_TYPES |
670 |
particle_pot(1:nlocal) = particle_pot(1:nlocal) + pot_Temp(i,1:nlocal) |
671 |
enddo |
672 |
|
673 |
pot_Temp = 0.0_DP |
674 |
|
675 |
do i = 1,LR_POT_TYPES |
676 |
call scatter(pot_Col(i,:), pot_Temp(i,:), plan_atom_col) |
677 |
end do |
678 |
|
679 |
do i = 1, nlocal |
680 |
pot_local(1:LR_POT_TYPES) = pot_local(1:LR_POT_TYPES)& |
681 |
+ pot_Temp(1:LR_POT_TYPES,i) |
682 |
enddo |
683 |
|
684 |
do i = 1,LR_POT_TYPES |
685 |
particle_pot(1:nlocal) = particle_pot(1:nlocal) + pot_Temp(i,1:nlocal) |
686 |
enddo |
687 |
|
688 |
ppot_Temp = 0.0_DP |
689 |
|
690 |
call scatter(ppot_Row(:), ppot_Temp(:), plan_atom_row) |
691 |
do i = 1, nlocal |
692 |
particle_pot(i) = particle_pot(i) + ppot_Temp(i) |
693 |
enddo |
694 |
|
695 |
ppot_Temp = 0.0_DP |
696 |
|
697 |
call scatter(ppot_Col(:), ppot_Temp(:), plan_atom_col) |
698 |
do i = 1, nlocal |
699 |
particle_pot(i) = particle_pot(i) + ppot_Temp(i) |
700 |
enddo |
701 |
|
702 |
#endif |
703 |
|
704 |
if (SIM_requires_skip_correction) then |
705 |
do i = 1, nlocal |
706 |
|
707 |
do i1 = 1, nSkipsForLocalAtom(i) |
708 |
j = skipsForLocalAtom(i, i1) |
709 |
|
710 |
! prevent overcounting the skips |
711 |
if (i.lt.j) then |
712 |
|
713 |
call get_interatomic_vector(q(:,i), q(:,j), d_atm, ratmsq) |
714 |
rVal = sqrt(ratmsq) |
715 |
call get_switch(ratmsq, sw, dswdr, rVal,in_switching_region) |
716 |
#ifdef IS_MPI |
717 |
call do_skip_correction(c_idents_local(i), c_idents_local(j), & |
718 |
d_atm, rVal, skipped_charge(i), skipped_charge(j), sw, & |
719 |
1.0_dp, pot_local(ELECTROSTATIC_POT), vpair, f, t(:,i), t(:,j)) |
720 |
# else |
721 |
call do_skip_correction(c_idents_local(i), c_idents_local(j), & |
722 |
d_atm, rVal, skipped_charge(i), skipped_charge(j), sw, & |
723 |
1.0_dp, pot(ELECTROSTATIC_POT), vpair, f, t(:,i), t(:,j)) |
724 |
#endif |
725 |
endif |
726 |
enddo |
727 |
enddo |
728 |
endif |
729 |
|
730 |
if (SIM_requires_self_correction) then |
731 |
|
732 |
do i = 1, nlocal |
733 |
|
734 |
#ifdef IS_MPI |
735 |
call do_self_correction(c_idents_local(i), eFrame(:,i), & |
736 |
skipped_charge(i), pot_local(ELECTROSTATIC_POT), t(:,i)) |
737 |
#else |
738 |
call do_self_correction(c_idents_local(i), eFrame(:,i), & |
739 |
skipped_charge(i), pot(ELECTROSTATIC_POT), t(:,i)) |
740 |
#endif |
741 |
enddo |
742 |
endif |
743 |
|
744 |
#ifdef IS_MPI |
745 |
#ifdef SINGLE_PRECISION |
746 |
call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_real,mpi_sum, & |
747 |
mpi_comm_world,mpi_err) |
748 |
#else |
749 |
call mpi_allreduce(pot_local, pot, LR_POT_TYPES,mpi_double_precision, & |
750 |
mpi_sum, mpi_comm_world,mpi_err) |
751 |
#endif |
752 |
#endif |
753 |
|
754 |
end subroutine do_force_loop |
755 |
|
756 |
subroutine f_do_pair(i, j, rijsq, d, sw, & |
757 |
eFrame, A, f, t, pot, particle_pot, vpair, & |
758 |
fpair, d_grp, r_grp, rCut, topoDist) |
759 |
|
760 |
real( kind = dp ) :: vpair, sw |
761 |
real( kind = dp ), dimension(LR_POT_TYPES) :: pot, ptmp |
762 |
real( kind = dp ), dimension(nLocal) :: particle_pot |
763 |
real( kind = dp ), dimension(3) :: fpair |
764 |
real( kind = dp ), dimension(nLocal) :: mfact |
765 |
real( kind = dp ), dimension(9,nLocal) :: eFrame |
766 |
real( kind = dp ), dimension(9,nLocal) :: A |
767 |
real( kind = dp ), dimension(3,nLocal) :: f |
768 |
real( kind = dp ), dimension(3,nLocal) :: t |
769 |
|
770 |
integer, intent(in) :: i, j |
771 |
real ( kind = dp ), intent(inout) :: rijsq |
772 |
real ( kind = dp ), intent(inout) :: r_grp |
773 |
real ( kind = dp ), intent(inout) :: d(3) |
774 |
real ( kind = dp ), intent(inout) :: d_grp(3) |
775 |
real ( kind = dp ), intent(inout) :: rCut |
776 |
integer, intent(inout) :: topoDist |
777 |
real ( kind = dp ) :: r, pair_pot |
778 |
real ( kind = dp ) :: a_k, b_k, c_k, d_k, dx |
779 |
|
780 |
real( kind = dp), dimension(3) :: f1, t1, t2 |
781 |
real( kind = dp), dimension(9) :: A1, A2, eF1, eF2 |
782 |
real( kind = dp) :: dfrhodrho_i, dfrhodrho_j |
783 |
real( kind = dp) :: rho_i, rho_j |
784 |
real( kind = dp) :: fshift_i, fshift_j |
785 |
real( kind = dp) :: p_vdw, p_elect, p_hb, p_met |
786 |
integer :: id1, id2, idx |
787 |
integer :: k |
788 |
integer :: c_ident_i, c_ident_j |
789 |
|
790 |
integer :: iHash |
791 |
|
792 |
r = sqrt(rijsq) |
793 |
|
794 |
vpair = 0.0_dp |
795 |
fpair(1:3) = 0.0_dp |
796 |
|
797 |
ptmp(1:4) = 0.0_dp |
798 |
|
799 |
f1(1:3) = 0.0 |
800 |
t1(1:3) = 0.0 |
801 |
t2(1:3) = 0.0 |
802 |
|
803 |
#ifdef IS_MPI |
804 |
c_ident_i = c_idents_row(i) |
805 |
c_ident_j = c_idents_col(j) |
806 |
|
807 |
A1(:) = A_Row(:,i) |
808 |
A2(:) = A_Col(:,j) |
809 |
eF1(:) = eFrame_Row(:,i) |
810 |
eF2(:) = eFrame_Col(:,j) |
811 |
|
812 |
dfrhodrho_i = dfrhodrho_row(i) |
813 |
dfrhodrho_j = dfrhodrho_col(j) |
814 |
rho_i = rho_row(i) |
815 |
rho_j = rho_col(j) |
816 |
#else |
817 |
c_ident_i = c_idents_local(i) |
818 |
c_ident_j = c_idents_local(j) |
819 |
|
820 |
A1(:) = A(:,i) |
821 |
A2(:) = A(:,j) |
822 |
eF1(:) = eFrame(:,i) |
823 |
eF2(:) = eFrame(:,j) |
824 |
|
825 |
dfrhodrho_i = dfrhodrho(i) |
826 |
dfrhodrho_j = dfrhodrho(j) |
827 |
rho_i = rho(i) |
828 |
rho_j = rho(j) |
829 |
#endif |
830 |
|
831 |
call do_pair(c_ident_i, c_ident_j, d, r, rijsq, & |
832 |
sw, topoDist, A1, A2, eF1, eF2, & |
833 |
vpair, ptmp, f1, t1, t2, & |
834 |
rho_i, rho_j, dfrhodrho_i, dfrhodrho_j, fshift_i, fshift_j) |
835 |
|
836 |
|
837 |
#ifdef IS_MPI |
838 |
id1 = AtomRowToGlobal(i) |
839 |
id2 = AtomColToGlobal(j) |
840 |
|
841 |
pot_row(VDW_POT,i) = pot_row(VDW_POT,i) + 0.5*ptmp(VDW_POT) |
842 |
pot_col(VDW_POT,j) = pot_col(VDW_POT,j) + 0.5*ptmp(VDW_POT) |
843 |
pot_row(ELECTROSTATIC_POT,i) = pot_row(ELECTROSTATIC_POT,i) + 0.5*ptmp(ELECTROSTATIC_POT) |
844 |
pot_col(ELECTROSTATIC_POT,j) = pot_col(ELECTROSTATIC_POT,j) + 0.5*ptmp(ELECTROSTATIC_POT) |
845 |
pot_row(HB_POT,i) = pot_row(HB_POT,i) + 0.5*ptmp(HB_POT) |
846 |
pot_col(HB_POT,j) = pot_col(HB_POT,j) + 0.5*ptmp(HB_POT) |
847 |
pot_Row(METALLIC_POT,i) = pot_Row(METALLIC_POT,i) + 0.5*ptmp(METALLIC_POT) |
848 |
pot_Col(METALLIC_POT,j) = pot_Col(METALLIC_POT,j) + 0.5*ptmp(METALLIC_POT) |
849 |
|
850 |
do idx = 1, 3 |
851 |
f_Row(idx,i) = f_Row(idx,i) + f1(idx) |
852 |
f_Col(idx,j) = f_Col(idx,j) - f1(idx) |
853 |
|
854 |
t_Row(idx,i) = t_Row(idx,i) + t1(idx) |
855 |
t_Col(idx,j) = t_Col(idx,j) + t2(idx) |
856 |
enddo |
857 |
! particle_pot is the difference between the full potential |
858 |
! and the full potential without the presence of a particular |
859 |
! particle (atom1). |
860 |
! |
861 |
! This reduces the density at other particle locations, so |
862 |
! we need to recompute the density at atom2 assuming atom1 |
863 |
! didn't contribute. This then requires recomputing the |
864 |
! density functional for atom2 as well. |
865 |
! |
866 |
! Most of the particle_pot heavy lifting comes from the |
867 |
! pair interaction, and will be handled by vpair. Parallel version. |
868 |
|
869 |
if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then |
870 |
ppot_row(i) = ppot_row(i) - frho_row(j) + fshift_j |
871 |
ppot_col(j) = ppot_col(j) - frho_col(i) + fshift_i |
872 |
end if |
873 |
|
874 |
#else |
875 |
id1 = i |
876 |
id2 = j |
877 |
|
878 |
pot(VDW_POT) = pot(VDW_POT) + ptmp(VDW_POT) |
879 |
pot(ELECTROSTATIC_POT) = pot(ELECTROSTATIC_POT) + ptmp(ELECTROSTATIC_POT) |
880 |
pot(HB_POT) = pot(HB_POT) + ptmp(HB_POT) |
881 |
pot(METALLIC_POT) = pot(METALLIC_POT) + ptmp(METALLIC_POT) |
882 |
|
883 |
do idx = 1, 3 |
884 |
f(idx,i) = f(idx,i) + f1(idx) |
885 |
f(idx,j) = f(idx,j) - f1(idx) |
886 |
|
887 |
t(idx,i) = t(idx,i) + t1(idx) |
888 |
t(idx,j) = t(idx,j) + t2(idx) |
889 |
enddo |
890 |
! particle_pot is the difference between the full potential |
891 |
! and the full potential without the presence of a particular |
892 |
! particle (atom1). |
893 |
! |
894 |
! This reduces the density at other particle locations, so |
895 |
! we need to recompute the density at atom2 assuming atom1 |
896 |
! didn't contribute. This then requires recomputing the |
897 |
! density functional for atom2 as well. |
898 |
! |
899 |
! Most of the particle_pot heavy lifting comes from the |
900 |
! pair interaction, and will be handled by vpair. NonParallel version. |
901 |
|
902 |
if ( (iand(iHash, EAM_PAIR).ne.0) .or. (iand(iHash, SC_PAIR).ne.0) ) then |
903 |
particle_pot(i) = particle_pot(i) - frho(j) + fshift_j |
904 |
particle_pot(j) = particle_pot(j) - frho(i) + fshift_i |
905 |
end if |
906 |
|
907 |
|
908 |
#endif |
909 |
|
910 |
if (molMembershipList(id1) .ne. molMembershipList(id2)) then |
911 |
|
912 |
fpair(1) = fpair(1) + f1(1) |
913 |
fpair(2) = fpair(2) + f1(2) |
914 |
fpair(3) = fpair(3) + f1(3) |
915 |
|
916 |
endif |
917 |
end subroutine f_do_pair |
918 |
|
919 |
subroutine f_do_prepair(i, j, rijsq, d, sw, rcijsq, dc, rCut, & |
920 |
eFrame, A, f, t, pot) |
921 |
|
922 |
real( kind = dp ) :: sw |
923 |
real( kind = dp ), dimension(LR_POT_TYPES) :: pot |
924 |
real( kind = dp ), dimension(9,nLocal) :: eFrame |
925 |
real (kind=dp), dimension(9,nLocal) :: A |
926 |
real (kind=dp), dimension(3,nLocal) :: f |
927 |
real (kind=dp), dimension(3,nLocal) :: t |
928 |
|
929 |
integer, intent(in) :: i, j |
930 |
real ( kind = dp ), intent(inout) :: rijsq, rcijsq, rCut |
931 |
real ( kind = dp ) :: r, rc |
932 |
real ( kind = dp ), intent(inout) :: d(3), dc(3) |
933 |
real ( kind = dp ) :: rho_i_at_j, rho_j_at_i |
934 |
integer :: c_ident_i, c_ident_j |
935 |
|
936 |
r = sqrt(rijsq) |
937 |
|
938 |
#ifdef IS_MPI |
939 |
c_ident_i = c_idents_row(i) |
940 |
c_ident_j = c_idents_col(j) |
941 |
#else |
942 |
c_ident_i = c_idents_local(i) |
943 |
c_ident_j = c_idents_local(j) |
944 |
#endif |
945 |
rho_i_at_j = 0.0_dp |
946 |
rho_j_at_i = 0.0_dp |
947 |
|
948 |
call do_prepair(c_ident_i, c_ident_j, r, & |
949 |
rho_i_at_j, rho_j_at_i) |
950 |
|
951 |
#ifdef IS_MPI |
952 |
rho_col(j) = rho_col(j) + rho_i_at_j |
953 |
rho_row(i) = rho_row(i) + rho_j_at_i |
954 |
#else |
955 |
rho(j) = rho(j) + rho_i_at_j |
956 |
rho(i) = rho(i) + rho_j_at_i |
957 |
#endif |
958 |
|
959 |
end subroutine f_do_prepair |
960 |
|
961 |
|
962 |
subroutine f_do_preforce(nlocal, pot, particle_pot) |
963 |
integer :: nlocal |
964 |
real( kind = dp ),dimension(LR_POT_TYPES) :: pot |
965 |
real( kind = dp ),dimension(nlocal) :: particle_pot |
966 |
integer :: sc_err = 0 |
967 |
integer :: atom, c_ident1 |
968 |
|
969 |
if (SIM_uses_MetallicAtoms) then |
970 |
|
971 |
#ifdef IS_MPI |
972 |
call scatter(rho_row,rho,plan_atom_row,sc_err) |
973 |
if (sc_err /= 0 ) then |
974 |
call handleError("do_preforce()", "Error scattering rho_row into rho") |
975 |
endif |
976 |
call scatter(rho_col,rho_tmp,plan_atom_col,sc_err) |
977 |
if (sc_err /= 0 ) then |
978 |
call handleError("do_preforce()", "Error scattering rho_col into rho") |
979 |
endif |
980 |
rho(1:nlocal) = rho(1:nlocal) + rho_tmp(1:nlocal) |
981 |
#endif |
982 |
|
983 |
|
984 |
do atom = 1, nlocal |
985 |
c_ident1 = c_idents_local(atom) |
986 |
|
987 |
call do_preforce(c_ident1, rho(atom), frho(atom), dfrhodrho(atom)) |
988 |
pot(METALLIC_POT) = pot(METALLIC_POT) + frho(atom) |
989 |
particle_pot(atom) = particle_pot(atom) + frho(atom) |
990 |
end do |
991 |
|
992 |
#ifdef IS_MPI |
993 |
!! communicate f(rho) and derivatives back into row and column arrays |
994 |
call gather(frho,frho_row,plan_atom_row, sc_err) |
995 |
if (sc_err /= 0) then |
996 |
call handleError("do_preforce()","MPI gather frho_row failure") |
997 |
endif |
998 |
call gather(dfrhodrho,dfrhodrho_row,plan_atom_row, sc_err) |
999 |
if (sc_err /= 0) then |
1000 |
call handleError("do_preforce()","MPI gather dfrhodrho_row failure") |
1001 |
endif |
1002 |
call gather(frho,frho_col,plan_atom_col, sc_err) |
1003 |
if (sc_err /= 0) then |
1004 |
call handleError("do_preforce()","MPI gather frho_col failure") |
1005 |
endif |
1006 |
call gather(dfrhodrho,dfrhodrho_col,plan_atom_col, sc_err) |
1007 |
if (sc_err /= 0) then |
1008 |
call handleError("do_preforce()","MPI gather dfrhodrho_col failure") |
1009 |
endif |
1010 |
#endif |
1011 |
|
1012 |
end if |
1013 |
end subroutine f_do_preforce |
1014 |
|
1015 |
|
1016 |
subroutine get_interatomic_vector(q_i, q_j, d, r_sq) |
1017 |
|
1018 |
real(kind = dp), dimension(3) :: q_i |
1019 |
real(kind = dp), dimension(3) :: q_j |
1020 |
real(kind = dp), intent(out) :: r_sq |
1021 |
real(kind = dp) :: d(3), scaled(3) |
1022 |
real(kind = dp) :: t |
1023 |
integer i |
1024 |
|
1025 |
d(1) = q_j(1) - q_i(1) |
1026 |
d(2) = q_j(2) - q_i(2) |
1027 |
d(3) = q_j(3) - q_i(3) |
1028 |
|
1029 |
! Wrap back into periodic box if necessary |
1030 |
if ( SIM_uses_PBC ) then |
1031 |
|
1032 |
if( .not.boxIsOrthorhombic ) then |
1033 |
! calc the scaled coordinates. |
1034 |
! scaled = matmul(HmatInv, d) |
1035 |
! unwrap the matmul and do things explicitly: |
1036 |
|
1037 |
scaled(1) = HmatInv(1,1)*d(1) + HmatInv(1,2)*d(2) + HmatInv(1,3)*d(3) |
1038 |
scaled(2) = HmatInv(2,1)*d(1) + HmatInv(2,2)*d(2) + HmatInv(2,3)*d(3) |
1039 |
scaled(3) = HmatInv(3,1)*d(1) + HmatInv(3,2)*d(2) + HmatInv(3,3)*d(3) |
1040 |
|
1041 |
! wrap the scaled coordinates (but don't use anint for speed) |
1042 |
|
1043 |
t = scaled(1) |
1044 |
if (t .ge. 0.0) then |
1045 |
scaled(1) = t - floor(t + 0.5) |
1046 |
else |
1047 |
scaled(1) = t + ceiling(t - 0.5) |
1048 |
endif |
1049 |
|
1050 |
t = scaled(2) |
1051 |
if (t .ge. 0.0) then |
1052 |
scaled(2) = t - floor(t + 0.5) |
1053 |
else |
1054 |
scaled(2) = t + ceiling(t - 0.5) |
1055 |
endif |
1056 |
|
1057 |
t = scaled(3) |
1058 |
if (t .ge. 0.0) then |
1059 |
scaled(3) = t - floor(t + 0.5) |
1060 |
else |
1061 |
scaled(3) = t + ceiling(t - 0.5) |
1062 |
endif |
1063 |
|
1064 |
! calc the wrapped real coordinates from the wrapped scaled |
1065 |
! coordinates |
1066 |
! d = matmul(Hmat,scaled) |
1067 |
! unwrap the matmul and do things explicitly: |
1068 |
|
1069 |
d(1)= Hmat(1,1)*scaled(1) + Hmat(1,2)*scaled(2) + Hmat(1,3)*scaled(3) |
1070 |
d(2)= Hmat(2,1)*scaled(1) + Hmat(2,2)*scaled(2) + Hmat(2,3)*scaled(3) |
1071 |
d(3)= Hmat(3,1)*scaled(1) + Hmat(3,2)*scaled(2) + Hmat(3,3)*scaled(3) |
1072 |
|
1073 |
else |
1074 |
! calc the scaled coordinates. |
1075 |
|
1076 |
scaled(1) = d(1) * HmatInv(1,1) |
1077 |
scaled(2) = d(2) * HmatInv(2,2) |
1078 |
scaled(3) = d(3) * HmatInv(3,3) |
1079 |
|
1080 |
! wrap the scaled coordinates (but don't use anint for speed) |
1081 |
|
1082 |
t = scaled(1) |
1083 |
if (t .ge. 0.0) then |
1084 |
scaled(1) = t - floor(t + 0.5) |
1085 |
else |
1086 |
scaled(1) = t + ceiling(t - 0.5) |
1087 |
endif |
1088 |
|
1089 |
t = scaled(2) |
1090 |
if (t .ge. 0.0) then |
1091 |
scaled(2) = t - floor(t + 0.5) |
1092 |
else |
1093 |
scaled(2) = t + ceiling(t - 0.5) |
1094 |
endif |
1095 |
|
1096 |
t = scaled(3) |
1097 |
if (t .ge. 0.0) then |
1098 |
scaled(3) = t - floor(t + 0.5) |
1099 |
else |
1100 |
scaled(3) = t + ceiling(t - 0.5) |
1101 |
endif |
1102 |
|
1103 |
! calc the wrapped real coordinates from the wrapped scaled |
1104 |
! coordinates |
1105 |
|
1106 |
d(1) = scaled(1)*Hmat(1,1) |
1107 |
d(2) = scaled(2)*Hmat(2,2) |
1108 |
d(3) = scaled(3)*Hmat(3,3) |
1109 |
|
1110 |
endif |
1111 |
|
1112 |
endif |
1113 |
|
1114 |
r_sq = d(1)*d(1) + d(2)*d(2) + d(3)*d(3) |
1115 |
|
1116 |
end subroutine get_interatomic_vector |
1117 |
|
1118 |
subroutine zero_work_arrays() |
1119 |
|
1120 |
#ifdef IS_MPI |
1121 |
|
1122 |
q_Row = 0.0_dp |
1123 |
q_Col = 0.0_dp |
1124 |
|
1125 |
q_group_Row = 0.0_dp |
1126 |
q_group_Col = 0.0_dp |
1127 |
|
1128 |
eFrame_Row = 0.0_dp |
1129 |
eFrame_Col = 0.0_dp |
1130 |
|
1131 |
A_Row = 0.0_dp |
1132 |
A_Col = 0.0_dp |
1133 |
|
1134 |
f_Row = 0.0_dp |
1135 |
f_Col = 0.0_dp |
1136 |
f_Temp = 0.0_dp |
1137 |
|
1138 |
t_Row = 0.0_dp |
1139 |
t_Col = 0.0_dp |
1140 |
t_Temp = 0.0_dp |
1141 |
|
1142 |
pot_Row = 0.0_dp |
1143 |
pot_Col = 0.0_dp |
1144 |
pot_Temp = 0.0_dp |
1145 |
ppot_Temp = 0.0_dp |
1146 |
|
1147 |
frho_row = 0.0_dp |
1148 |
frho_col = 0.0_dp |
1149 |
rho_row = 0.0_dp |
1150 |
rho_col = 0.0_dp |
1151 |
rho_tmp = 0.0_dp |
1152 |
dfrhodrho_row = 0.0_dp |
1153 |
dfrhodrho_col = 0.0_dp |
1154 |
|
1155 |
#endif |
1156 |
rho = 0.0_dp |
1157 |
frho = 0.0_dp |
1158 |
dfrhodrho = 0.0_dp |
1159 |
|
1160 |
end subroutine zero_work_arrays |
1161 |
|
1162 |
function skipThisPair(atom1, atom2) result(skip_it) |
1163 |
integer, intent(in) :: atom1 |
1164 |
integer, intent(in), optional :: atom2 |
1165 |
logical :: skip_it |
1166 |
integer :: unique_id_1, unique_id_2 |
1167 |
integer :: i |
1168 |
|
1169 |
skip_it = .false. |
1170 |
|
1171 |
!! there are a number of reasons to skip a pair or a particle |
1172 |
!! mostly we do this to exclude atoms who are involved in short |
1173 |
!! range interactions (bonds, bends, torsions), but we also need |
1174 |
!! to exclude some overcounted interactions that result from |
1175 |
!! the parallel decomposition |
1176 |
|
1177 |
#ifdef IS_MPI |
1178 |
!! in MPI, we have to look up the unique IDs for each atom |
1179 |
unique_id_1 = AtomRowToGlobal(atom1) |
1180 |
unique_id_2 = AtomColToGlobal(atom2) |
1181 |
!! this situation should only arise in MPI simulations |
1182 |
if (unique_id_1 == unique_id_2) then |
1183 |
skip_it = .true. |
1184 |
return |
1185 |
end if |
1186 |
|
1187 |
!! this prevents us from doing the pair on multiple processors |
1188 |
if (unique_id_1 < unique_id_2) then |
1189 |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
1190 |
skip_it = .true. |
1191 |
return |
1192 |
endif |
1193 |
else |
1194 |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
1195 |
skip_it = .true. |
1196 |
return |
1197 |
endif |
1198 |
endif |
1199 |
#else |
1200 |
!! in the normal loop, the atom numbers are unique |
1201 |
unique_id_1 = atom1 |
1202 |
unique_id_2 = atom2 |
1203 |
#endif |
1204 |
|
1205 |
#ifdef IS_MPI |
1206 |
do i = 1, nSkipsForRowAtom(atom1) |
1207 |
if (skipsForRowAtom(atom1, i) .eq. unique_id_2) then |
1208 |
skip_it = .true. |
1209 |
return |
1210 |
endif |
1211 |
end do |
1212 |
#else |
1213 |
do i = 1, nSkipsForLocalAtom(atom1) |
1214 |
if (skipsForLocalAtom(atom1, i) .eq. unique_id_2) then |
1215 |
skip_it = .true. |
1216 |
return |
1217 |
endif |
1218 |
end do |
1219 |
#endif |
1220 |
|
1221 |
return |
1222 |
end function skipThisPair |
1223 |
|
1224 |
function getTopoDistance(atom1, atom2) result(topoDist) |
1225 |
integer, intent(in) :: atom1 |
1226 |
integer, intent(in) :: atom2 |
1227 |
integer :: topoDist |
1228 |
integer :: unique_id_2 |
1229 |
integer :: i |
1230 |
|
1231 |
#ifdef IS_MPI |
1232 |
unique_id_2 = AtomColToGlobal(atom2) |
1233 |
#else |
1234 |
unique_id_2 = atom2 |
1235 |
#endif |
1236 |
|
1237 |
! zero is default for unconnected (i.e. normal) pair interactions |
1238 |
|
1239 |
topoDist = 0 |
1240 |
|
1241 |
do i = 1, nTopoPairsForAtom(atom1) |
1242 |
if (toposForAtom(atom1, i) .eq. unique_id_2) then |
1243 |
topoDist = topoDistance(atom1, i) |
1244 |
return |
1245 |
endif |
1246 |
end do |
1247 |
|
1248 |
return |
1249 |
end function getTopoDistance |
1250 |
|
1251 |
#ifdef PROFILE |
1252 |
function getforcetime() result(totalforcetime) |
1253 |
real(kind=dp) :: totalforcetime |
1254 |
totalforcetime = forcetime |
1255 |
end function getforcetime |
1256 |
#endif |
1257 |
|
1258 |
!! This cleans componets of force arrays belonging only to fortran |
1259 |
|
1260 |
subroutine add_stress_tensor(dpair, fpair, tau) |
1261 |
|
1262 |
real( kind = dp ), dimension(3), intent(in) :: dpair, fpair |
1263 |
real( kind = dp ), dimension(9), intent(inout) :: tau |
1264 |
|
1265 |
! because the d vector is the rj - ri vector, and |
1266 |
! because fx, fy, fz are the force on atom i, we need a |
1267 |
! negative sign here: |
1268 |
|
1269 |
tau(1) = tau(1) - dpair(1) * fpair(1) |
1270 |
tau(2) = tau(2) - dpair(1) * fpair(2) |
1271 |
tau(3) = tau(3) - dpair(1) * fpair(3) |
1272 |
tau(4) = tau(4) - dpair(2) * fpair(1) |
1273 |
tau(5) = tau(5) - dpair(2) * fpair(2) |
1274 |
tau(6) = tau(6) - dpair(2) * fpair(3) |
1275 |
tau(7) = tau(7) - dpair(3) * fpair(1) |
1276 |
tau(8) = tau(8) - dpair(3) * fpair(2) |
1277 |
tau(9) = tau(9) - dpair(3) * fpair(3) |
1278 |
|
1279 |
end subroutine add_stress_tensor |
1280 |
|
1281 |
end module doForces |