ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/development/src/parallel/ForceDecomposition.cpp
Revision: 1538
Committed: Tue Jan 11 18:58:12 2011 UTC (14 years, 3 months ago) by chuckv
File size: 4037 byte(s)
Log Message:
Adding parallel classes...

File Contents

# Content
1 /**
2 * @file ForceDecomposition.cpp
3 * @author Charles Vardeman <cvardema.at.nd.edu>
4 * @date 08/18/2010
5 * @time 11:56am
6 * @version 1.0
7 *
8 * @section LICENSE
9 * Copyright (c) 2010 The University of Notre Dame. All Rights Reserved.
10 *
11 * The University of Notre Dame grants you ("Licensee") a
12 * non-exclusive, royalty free, license to use, modify and
13 * redistribute this software in source and binary code form, provided
14 * that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the
22 * distribution.
23 *
24 * This software is provided "AS IS," without a warranty of any
25 * kind. All express or implied conditions, representations and
26 * warranties, including any implied warranty of merchantability,
27 * fitness for a particular purpose or non-infringement, are hereby
28 * excluded. The University of Notre Dame and its licensors shall not
29 * be liable for any damages suffered by licensee as a result of
30 * using, modifying or distributing the software or its
31 * derivatives. In no event will the University of Notre Dame or its
32 * licensors be liable for any lost revenue, profit or data, or for
33 * direct, indirect, special, consequential, incidental or punitive
34 * damages, however caused and regardless of the theory of liability,
35 * arising out of the use of or inability to use software, even if the
36 * University of Notre Dame has been advised of the possibility of
37 * such damages.
38 *
39 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
40 * research, please cite the appropriate papers when you publish your
41 * work. Good starting points are:
42 *
43 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
44 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
45 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
46 * [4] Vardeman & Gezelter, in progress (2009).
47 */
48
49
50
51 /* -*- c++ -*- */
52 #include "config.h"
53 #include <stdlib.h>
54 #ifdef IS_MPI
55 #include <mpi.h>
56 #endif
57
58 #include <iostream>
59 #include <vector>
60 #include <algorithm>
61 #include <cmath>
62 #include "parallel/ForceDecomposition.hpp"
63
64
65 using namespace std;
66 using namespace OpenMD;
67
68 //__static
69 #ifdef IS_MPI
70 static vector<MPI:Comm> communictors;
71 #endif
72
73 //____ MPITypeTraits
74 template<typename T>
75 struct MPITypeTraits;
76
77 #ifdef IS_MPI
78 template<>
79 struct MPITypeTraits<RealType> {
80 static const MPI::Datatype datatype;
81 };
82 const MPI_Datatype MPITypeTraits<RealType>::datatype = MY_MPI_REAL;
83
84 template<>
85 struct MPITypeTraits<int> {
86 static const MPI::Datatype datatype;
87 };
88 const MPI::Datatype MPITypeTraits<int>::datatype = MPI_INT;
89 #endif
90
91 /**
92 * Constructor for ForceDecomposition Parallel Decomposition Method
93 * Will try to construct a symmetric grid of processors. Ideally, the
94 * number of processors will be a square ex: 4, 9, 16, 25.
95 *
96 */
97
98 ForceDecomposition::ForceDecomposition() {
99
100 #ifdef IS_MPI
101 int nProcs = MPI::COMM_WORLD.Get_size();
102 int worldRank = MPI::COMM_WORLD.Get_rank();
103 #endif
104
105 // First time through, construct column stride.
106 if (communicators.size() == 0)
107 {
108 int nColumnsMax = (int) round(sqrt((float) nProcs));
109 for (int i = 0; i < nProcs; ++i)
110 {
111 if (nProcs%i==0) nColumns=i;
112 }
113
114 int nRows = nProcs/nColumns;
115 myRank_ = (int) worldRank%nColumns;
116 }
117 else
118 {
119 myRank_ = myRank/nColumns;
120 }
121 MPI::Comm newComm = MPI:COMM_WORLD.Split(myRank_,0);
122
123 isColumn_ = false;
124
125 }
126
127 ForceDecomposition::gather(sendbuf, receivebuf){
128 communicators(myIndex_).Allgatherv();
129 }
130
131
132
133 ForceDecomposition::scatter(sbuffer, rbuffer){
134 communicators(myIndex_).Reduce_scatter(sbuffer, recevbuf. recvcounts, MPI::DOUBLE, MPI::SUM);
135 }
136
137