PeriDEM 0.2.0
PeriDEM -- Peridynamics-based high-fidelity model for granular media
Loading...
Searching...
No Matches
anonymous_namespace{testParallelCompLib.cpp} Namespace Reference

Functions

void printMsg (std::string msg, int mpiRank, int printMpiRank)
 
double f1 (const double &x)
 
double f2 (const double &x)
 
void setupOwnerAndGhost (size_t mpiSize, size_t mpiRank, const std::vector< size_t > &nodePartition, const std::vector< std::vector< size_t > > &nodeNeighs, std::vector< size_t > &ownedNodes, std::vector< size_t > &ownedInternalNodes, std::vector< size_t > &ownedBdryNodes, std::vector< std::pair< std::vector< size_t >, std::vector< size_t > > > &ghostData)
 
void exchangeDispData (size_t mpiSize, size_t mpiRank, const std::vector< std::pair< std::vector< size_t >, std::vector< size_t > > > &ghostData, std::vector< std::pair< std::vector< util::Point >, std::vector< util::Point > > > &dispGhostData, std::vector< util::Point > &dispNodes)
 

Function Documentation

◆ exchangeDispData()

void anonymous_namespace{testParallelCompLib.cpp}::exchangeDispData ( size_t  mpiSize,
size_t  mpiRank,
const std::vector< std::pair< std::vector< size_t >, std::vector< size_t > > > &  ghostData,
std::vector< std::pair< std::vector< util::Point >, std::vector< util::Point > > > &  dispGhostData,
std::vector< util::Point > &  dispNodes 
)

Definition at line 164 of file testParallelCompLib.cpp.

168 {
169 // resize dispGhostData if not done
170 dispGhostData.resize(mpiSize);
171 for (size_t j_proc = 0; j_proc < mpiSize; j_proc++) {
172 auto j_data_size = ghostData[j_proc].first.size(); // same size for second
173 dispGhostData[j_proc].first.resize(j_data_size);
174 dispGhostData[j_proc].second.resize(j_data_size);
175 }
176
177 // exchange data
178 util::io::print("\n\nBegin exchange data\n\n");
179 util::io::print(fmt::format("\n\nThis processor's rank = {}\n\n", mpiRank), util::io::print_default_tab, -1);
180 MPI_Request mpiRequests[2*(mpiSize-1)];
181 size_t requestCounter = 0;
182 for (size_t j_proc=0; j_proc<mpiSize; j_proc++) {
183 auto & sendIds = ghostData[j_proc].second;
184 auto & recvIds = ghostData[j_proc].first;
185
186 if (j_proc != mpiRank and recvIds.size() != 0) {
187
188 util::io::print(fmt::format("\n\n Processing j_proc = {}\n\n", j_proc), util::io::print_default_tab, -1);
189
190 // fill in ghost displacement data that we are sending from nodal displacement data
191 for (size_t k = 0; k<sendIds.size(); k++)
192 dispGhostData[j_proc].second[k] = dispNodes[sendIds[k]];
193
194 // send data from this process to j_proc
195 MPI_Isend(dispGhostData[j_proc].second.data(), 3*sendIds.size(),
196 MPI_DOUBLE, j_proc, 0, MPI_COMM_WORLD,
197 &mpiRequests[requestCounter++]);
198
199 // receive data from j_proc
200 MPI_Irecv(dispGhostData[j_proc].first.data(), 3*recvIds.size(),
201 MPI_DOUBLE, j_proc, 0, MPI_COMM_WORLD,
202 &mpiRequests[requestCounter++]);
203 }
204 } // loop over j_proc
205
206 util::io::print("\n\nCalling MPI_Waitall\n\n");
207 MPI_Waitall(requestCounter, mpiRequests, MPI_STATUSES_IGNORE);
208
209 util::io::print("\n\nUpdate dispNodes data\n\n");
210
211 for (size_t j_proc=0; j_proc<mpiSize; j_proc++) {
212 auto & recvIds = ghostData[j_proc].first;
213 if (j_proc != mpiRank and recvIds.size() != 0) {
214 for (size_t k = 0; k<recvIds.size(); k++)
215 dispNodes[recvIds[k]] = dispGhostData[j_proc].first[k];
216 }
217 } // loop over j_proc
218 } // exchangeDispData()
const int print_default_tab
Default value of tab used in outputting formatted information.
Definition io.h:27
void print(const T &msg, int nt=print_default_tab, int printMpiRank=print_default_mpi_rank)
Prints formatted information.
Definition io.h:108
int mpiSize()
Get size (number) of processors.

References util::io::print(), and util::io::print_default_tab.

Referenced by test::testMPI().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ f1()

double anonymous_namespace{testParallelCompLib.cpp}::f1 ( const double &  x)

Definition at line 45 of file testParallelCompLib.cpp.

45 {
46 return x*x*x + std::exp(x) - std::sin(x);
47 }

Referenced by test::testTaskflow().

Here is the caller graph for this function:

◆ f2()

double anonymous_namespace{testParallelCompLib.cpp}::f2 ( const double &  x)

Definition at line 49 of file testParallelCompLib.cpp.

49 {
50 return 2*(x-0.5)*(x-0.5)*(x-0.5) + std::exp(x-0.5) - std::cos(x-0.5);
51 }

Referenced by test::testTaskflow().

Here is the caller graph for this function:

◆ printMsg()

void anonymous_namespace{testParallelCompLib.cpp}::printMsg ( std::string  msg,
int  mpiRank,
int  printMpiRank 
)

Definition at line 36 of file testParallelCompLib.cpp.

36 {
37 if (printMpiRank < 0) {
38 std::cout << msg;
39 } else {
40 if (mpiRank == printMpiRank)
41 std::cout << msg;
42 }
43 }

Referenced by test::testMPI().

Here is the caller graph for this function:

◆ setupOwnerAndGhost()

void anonymous_namespace{testParallelCompLib.cpp}::setupOwnerAndGhost ( size_t  mpiSize,
size_t  mpiRank,
const std::vector< size_t > &  nodePartition,
const std::vector< std::vector< size_t > > &  nodeNeighs,
std::vector< size_t > &  ownedNodes,
std::vector< size_t > &  ownedInternalNodes,
std::vector< size_t > &  ownedBdryNodes,
std::vector< std::pair< std::vector< size_t >, std::vector< size_t > > > &  ghostData 
)

Definition at line 53 of file testParallelCompLib.cpp.

60 {
61
62 auto numNodes = nodePartition.size();
63
64 // clear data
65 ownedNodes.clear();
66 ownedInternalNodes.clear();
67 ownedBdryNodes.clear();
68 ghostData.resize(mpiSize);
69 for (size_t i_proc=0; i_proc<mpiSize; i_proc++) {
70 ghostData[i_proc].first.clear();
71 ghostData[i_proc].second.clear();
72 }
73
74 // setup
75 for (size_t i=0; i<numNodes; i++) {
76 if (nodePartition[i] == mpiRank) {
77 // this processor owns this node
78 ownedNodes.push_back(i);
79
80 // ascertain if this node has neighboring nodes owned by other processors
81 bool ghostExist = false;
82 for (auto j : nodeNeighs[i]) {
83 auto j_proc = nodePartition[j];
84 if (j_proc != mpiRank) {
85 ghostExist = true;
86
87 // add j to ghost node list (to receive from j_proc)
88 ghostData[j_proc].first.push_back(j);
89
90 // add i to ghost node list (to send to j_proc)
91 ghostData[j_proc].second.push_back(i);
92 }
93 }
94
95 if (ghostExist)
96 ownedBdryNodes.push_back(i);
97 else
98 ownedInternalNodes.push_back(i);
99 } // loop over neighboring nodes
100 } // loop over nodes
101
103 bool debugGhostData = false;
104 if (debugGhostData and mpiRank == 0) {
105 std::vector<std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>> ghostDataAllProc(
106 mpiSize);
107 std::vector<std::vector<size_t>> numGhostDataAllProc(mpiSize);
108 for (size_t i_proc = 0; i_proc < mpiSize; i_proc++) {
109 numGhostDataAllProc[i_proc].resize(mpiSize);
110 ghostDataAllProc[i_proc].resize(mpiSize);
111 }
112
113 for (size_t i_proc = 0; i_proc < mpiSize; i_proc++) {
114 // assume we are i_proc and then create ghostData for i_proc
115 for (size_t i = 0; i < numNodes; i++) {
116 if (nodePartition[i] == i_proc) {
117 // i_proc processor owns this node
118 for (auto j: nodeNeighs[i]) {
119 auto j_proc = nodePartition[j];
120 if (j_proc != i_proc) {
121 // add to ghost node list
122 ghostDataAllProc[i_proc][j_proc].first.push_back(j);
123 }
124 }
125 } // loop over neighboring nodes
126 } // loop over nodes
127
128 // total number of ghost nodes from neighboring processors
129 for (size_t j_proc = 0; j_proc < mpiSize; j_proc++)
130 numGhostDataAllProc[i_proc][j_proc] = ghostDataAllProc[i_proc][j_proc].first.size();
131 } // loop over i_proc
132
133 // print the information
134 std::cout << "\n\nGhost data debug output\n\n";
135 bool found_asym = false;
136 for (size_t i_proc = 0; i_proc < mpiSize; i_proc++) {
137 for (size_t j_proc = 0; j_proc < mpiSize; j_proc++) {
138 std::cout << fmt::format("(i,j) = ({}, {}), num data = {}\n",
139 i_proc, j_proc,
140 numGhostDataAllProc[i_proc][j_proc]);
141
142 if (j_proc > i_proc) {
143 if (numGhostDataAllProc[i_proc][j_proc] == numGhostDataAllProc[j_proc][i_proc])
144 std::cout << fmt::format(" symmetric: data ({}, {}) = data ({}, {})\n",
145 i_proc, j_proc, j_proc, i_proc);
146 else {
147 found_asym = true;
148 std::cout << fmt::format(
149 " asymmetric: data ({}, {}) != data ({}, {})\n",
150 i_proc, j_proc, j_proc, i_proc);
151 }
152 }
153 }
154 }
155 if (found_asym)
156 std::cout << "Found asymetric ghost data\n";
157 else
158 std::cout << "No asymetric ghost data\n";
159
160 }
162 } // setupOwnerAndGhost()

Referenced by test::testMPI().

Here is the caller graph for this function: