Epetra Package Browser (Single Doxygen Collection) Development
Loading...
Searching...
No Matches
lesson02_init_map_vec.cpp
Go to the documentation of this file.
1
9// This defines useful macros like HAVE_MPI, which is defined if and
10// only if Epetra was built with MPI enabled.
11#include <Epetra_config.h>
12
13#ifdef HAVE_MPI
14# include <mpi.h>
15// Epetra's wrapper for MPI_Comm. This header file only exists if
16// Epetra was built with MPI enabled.
17# include <Epetra_MpiComm.h>
18#else
19# include <Epetra_SerialComm.h>
20#endif // HAVE_MPI
21
22#include <Epetra_Map.h>
23#include <Epetra_Vector.h>
24#include <Epetra_Version.h>
25
26#include <stdexcept>
27
28void
30 std::ostream& out)
31{
32 using std::endl;
33
34 // Print out the Epetra software version.
35 if (comm.MyPID () == 0) {
36 out << Epetra_Version () << endl << endl;
37 }
38
39 // The type of global indices. You could just set this to int,
40 // but we want the example to work for Epetra64 as well.
41#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
42 // Epetra was compiled only with 64-bit global index support, so use
43 // 64-bit global indices.
44 typedef long long global_ordinal_type;
45#else
46 // Epetra was compiled with 32-bit global index support. If
47 // EPETRA_NO_64BIT_GLOBAL_INDICES is defined, it does not also
48 // support 64-bit indices.
49 typedef int global_ordinal_type;
50#endif // EPETRA_NO_32BIT_GLOBAL_INDICES
51
53 // Create some Epetra_Map objects
55
56 //
57 // Epetra has local and global Maps. Local maps describe objects
58 // that are replicated over all participating MPI processes. Global
59 // maps describe distributed objects. You can do imports and
60 // exports between local and global maps; this is how you would turn
61 // locally replicated objects into distributed objects and vice
62 // versa.
63 //
64
65 // The total (global, i.e., over all MPI processes) number of
66 // entries in the Map. This has the same type as that of global
67 // indices, so it can represent very large values if Epetra was
68 // built with 64-bit global index support.
69 //
70 // For this example, we scale the global number of entries in the
71 // Map with the number of MPI processes. That way, you can run this
72 // example with any number of MPI processes and every process will
73 // still have a positive number of entries.
74 const global_ordinal_type numGlobalEntries = comm.NumProc () * 5;
75
76 // Tpetra can index the entries of a Map starting with 0 (C style),
77 // 1 (Fortran style), or any base you want. 1-based indexing is
78 // handy when interfacing with Fortran. We choose 0-based indexing
79 // here. This also has the same type as that of global indices.
80 const global_ordinal_type indexBase = 0;
81
82 // Construct a Map that puts the same number of equations on each
83 // (MPI) process. The Epetra_Comm is passed in by value, but that's
84 // OK, because Epetra_Comm has shallow copy semantics. (Its copy
85 // constructor and assignment operator do not call MPI_Comm_dup;
86 // they just pass along the MPI_Comm.)
87 Epetra_Map contigMap (numGlobalEntries, indexBase, comm);
88
89 // contigMap is contiguous by construction.
90 if (! contigMap.LinearMap ()) {
91 throw std::logic_error ("The supposedly contiguous Map isn't contiguous.");
92 }
93
94 // Let's create a second Map. It will have the same number of
95 // global entries per process, but will distribute them differently,
96 // in round-robin (1-D cyclic) fashion instead of contiguously.
97
98 // We'll use the version of the Map constructor that takes, on each
99 // MPI process, a list of the global indices in the Map belonging to
100 // that process. You can use this constructor to construct an
101 // overlapping (also called "not 1-to-1") Map, in which one or more
102 // entries are owned by multiple processes. We don't do that here;
103 // we make a nonoverlapping (also called "1-to-1") Map.
104 const int numGblIndsPerProc = 5;
105 global_ordinal_type* gblIndList = new global_ordinal_type [numGblIndsPerProc];
106
107 const int numProcs = comm.NumProc ();
108 const int myRank = comm.MyPID ();
109 for (int k = 0; k < numGblIndsPerProc; ++k) {
110 gblIndList[k] = myRank + k*numProcs;
111 }
112
113 Epetra_Map cyclicMap (numGlobalEntries, numGblIndsPerProc,
114 gblIndList, indexBase, comm);
115 // The above constructor makes a deep copy of the input index list,
116 // so it's safe to deallocate that list after this constructor
117 // completes.
118 if (gblIndList != NULL) {
119 delete [] gblIndList;
120 gblIndList = NULL;
121 }
122
123 // If there's more than one MPI process in the communicator,
124 // then cyclicMap is definitely NOT contiguous.
125 if (comm.NumProc () > 1 && cyclicMap.LinearMap ()) {
126 throw std::logic_error ("The cyclic Map claims to be contiguous.");
127 }
128
129 // contigMap and cyclicMap should always be compatible. However, if
130 // the communicator contains more than 1 process, then contigMap and
131 // cyclicMap are NOT the same.
132 // if (! contigMap.isCompatible (*cyclicMap)) {
133 // throw std::logic_error ("contigMap should be compatible with cyclicMap, "
134 // "but it's not.");
135 // }
136 if (comm.NumProc () > 1 && contigMap.SameAs (cyclicMap)) {
137 throw std::logic_error ("contigMap should not be the same as cyclicMap.");
138 }
139
141 // We have maps now, so we can create vectors.
143
144 // Create an Epetra_Vector with the contiguous Map we created above.
145 // This version of the constructor will fill the vector with zeros.
146 // The Vector constructor takes a Map by value, but that's OK,
147 // because Epetra_Map has shallow copy semantics. It uses reference
148 // counting internally to avoid copying data unnecessarily.
149 Epetra_Vector x (contigMap);
150
151 // The copy constructor performs a deep copy.
152 // x and y have the same Map.
153 Epetra_Vector y (x);
154
155 // Create a Vector with the 1-D cyclic Map. Calling the constructor
156 // with false for the second argument leaves the data uninitialized,
157 // so that you can fill it later without paying the cost of
158 // initially filling it with zeros.
159 Epetra_Vector z (cyclicMap, false);
160
161 // Set the entries of z to (pseudo)random numbers. Please don't
162 // consider this a good parallel pseudorandom number generator.
163 (void) z.Random ();
164
165 // Set the entries of x to all ones.
166 (void) x.PutScalar (1.0);
167
168 // Define some constants for use below.
169 const double alpha = 3.14159;
170 const double beta = 2.71828;
171 const double gamma = -10.0;
172
173 // x = beta*x + alpha*z
174 //
175 // This is a legal operation! Even though the Maps of x and z are
176 // not the same, their Maps are compatible. Whether it makes sense
177 // or not depends on your application.
178 (void) x.Update (alpha, z, beta);
179
180 (void) y.PutScalar (42.0); // Set all entries of y to 42.0
181 // y = gamma*y + alpha*x + beta*z
182 y.Update (alpha, x, beta, z, gamma);
183
184 // Compute the 2-norm of y.
185 //
186 // The norm may have a different type than scalar_type.
187 // For example, if scalar_type is complex, then the norm is real.
188 // The ScalarTraits "traits class" gives us the type of the norm.
189 double theNorm = 0.0;
190 (void) y.Norm2 (&theNorm);
191
192 // Print the norm of y on Proc 0.
193 out << "Norm of y: " << theNorm << endl;
194}
195
196//
197// The same main() driver routine as in the first Epetra lesson.
198//
199int
200main (int argc, char *argv[])
201{
202 using std::cout;
203 using std::endl;
204
205#ifdef HAVE_MPI
206 MPI_Init (&argc, &argv);
207 Epetra_MpiComm comm (MPI_COMM_WORLD);
208#else
210#endif // HAVE_MPI
211
212 if (comm.MyPID () == 0) {
213 cout << "Total number of processes: " << comm.NumProc () << endl;
214 }
215
216 // Do something with the new Epetra communicator.
217 exampleRoutine (comm, cout);
218
219 // This tells the Trilinos test framework that the test passed.
220 if (comm.MyPID () == 0) {
221 cout << "End Result: TEST PASSED" << endl;
222 }
223
224#ifdef HAVE_MPI
225 // Since you called MPI_Init, you are responsible for calling
226 // MPI_Finalize after you are done using MPI.
227 (void) MPI_Finalize ();
228#endif // HAVE_MPI
229
230 return 0;
231}
std::string Epetra_Version()
bool LinearMap() const
Returns true if the global ID space is contiguously divided (but not necessarily uniformly) across al...
bool SameAs(const Epetra_BlockMap &Map) const
Returns true if this and Map are identical maps.
Epetra_Comm: The Epetra Communication Abstract Base Class.
Definition Epetra_Comm.h:73
virtual int NumProc() const =0
Returns total number of processes.
virtual int MyPID() const =0
Return my process ID.
Epetra_Map: A class for partitioning vectors and matrices.
Definition Epetra_Map.h:119
Epetra_MpiComm: The Epetra MPI Communication Class.
int NumProc() const
Returns total number of processes.
int MyPID() const
Return my process ID.
int Update(double ScalarA, const Epetra_MultiVector &A, double ScalarThis)
Update multi-vector values with scaled values of A, this = ScalarThis*this + ScalarA*A.
int Random()
Set multi-vector values to random numbers.
int Norm2(double *Result) const
Compute 2-norm of each vector in multi-vector.
int PutScalar(double ScalarConstant)
Initialize all values in a multi-vector with constant value.
Epetra_SerialComm: The Epetra Serial Communication Class.
Epetra_Vector: A class for constructing and using dense vectors on a parallel computer.
int main(int argc, char *argv[])
void exampleRoutine(const Epetra_Comm &comm, std::ostream &out)
long long global_ordinal_type