Epetra Package Browser (Single Doxygen Collection) Development
Loading...
Searching...
No Matches
test/Bugs/Bug_6079_DistObject_CombineMode_flags/cxx_main.cpp
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2// $Id$
3//
4// Copyright (C) 2014 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// The deal.II library is free software; you can use it, redistribute
9// it, and/or modify it under the terms of the GNU Lesser General
10// Public License as published by the Free Software Foundation; either
11// version 2.1 of the License, or (at your option) any later version.
12// The full text of the license can be found in the file LICENSE at
13// the top level of the deal.II distribution.
14//
15// ---------------------------------------------------------------------
16
17
18
19// check correct behavior of sadd of Trilinos vectors
20// if they have different Epetra maps
21
22#include <iostream>
23#include <vector>
24#include <Epetra_Import.h>
25#include <Epetra_Map.h>
26#include <Epetra_FEVector.h>
27#ifdef HAVE_MPI
28#include <Epetra_MpiComm.h>
29#include <mpi.h>
30#else
31#include <Epetra_SerialComm.h>
32#endif
33
34void test ()
35{
36 int n_proc;
37#ifdef HAVE_MPI
38 MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
39#else
40 n_proc = 1;
41#endif
42 int my_id;
43#ifdef HAVE_MPI
44 MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
45#else
46 my_id = 0;
47#endif
48
49 //All processes should own 10 entries
50 const int entries_per_process = 10;
51
52 const int begin_index = my_id*entries_per_process;
53 const int end_index = (my_id+1)*entries_per_process;
54
55 const int local_begin = std::max(0, begin_index-entries_per_process/2);
56 const int local_end = entries_per_process*n_proc;
57
58 //create Epetra maps
59 std::vector<unsigned int> ghosted_indices;
60 ghosted_indices.reserve(local_end-local_begin);
61 for (int i = local_begin; i< local_end; ++i)
62 ghosted_indices.push_back(i);
63 Epetra_Map map_ghosted
64 (-1,
65 local_end-local_begin,
66 reinterpret_cast<int*>(&ghosted_indices[0]),
67 0,
68#ifdef HAVE_MPI
69 Epetra_MpiComm(MPI_COMM_WORLD));
70#else
72#endif
73
74 std::vector<unsigned int> distributed_indices;
75 distributed_indices.reserve(entries_per_process*n_proc);
76 for (int i = begin_index; i< end_index; ++i)
77 distributed_indices.push_back(i);
78 Epetra_Map map_distributed
79 (entries_per_process*n_proc,
80 entries_per_process,
81 reinterpret_cast<int*>(&distributed_indices[0]),
82 0,
83#ifdef HAVE_MPI
84 Epetra_MpiComm(MPI_COMM_WORLD));
85#else
87#endif
88
89 Epetra_FEVector v_ghosted(map_ghosted);
90 Epetra_FEVector v_distributed(map_distributed);
91
92 v_distributed.PutScalar(2.);
93 v_ghosted.PutScalar(1.);
94
95 Epetra_Import data_exchange (v_distributed.Map(), v_ghosted.Map());
96 v_distributed.Import(v_ghosted, data_exchange, Epetra_AddLocalAlso);
97
98 std::cout << "Distributed:" << std::endl;
99 for (int i=begin_index; i<end_index; ++i)
100 {
101 int trilinos_i
102 = v_distributed.Map().LID(i);
103 double value = v_distributed[0][trilinos_i];
104 std::cout<<"proc "<<my_id<<" "<< i << ": " << value << std::endl;
105 if(value != 3)
106 std::cerr << "tests FAILED: value = " << value << std::endl;
107 }
108}
109
110
111
112int main (int argc, char **argv)
113{
114#ifdef HAVE_MPI
115 MPI_Init(&argc, &argv);
116#endif
117 test();
118#ifdef HAVE_MPI
119 MPI_Finalize();
120#endif
121}
122
123
@ Epetra_AddLocalAlso
int LID(int GID) const
Returns local ID of global ID, return -1 if not found on this processor.
const Epetra_BlockMap & Map() const
Returns the address of the Epetra_BlockMap for this multi-vector.
int Import(const Epetra_SrcDistObject &A, const Epetra_Import &Importer, Epetra_CombineMode CombineMode, const Epetra_OffsetIndex *Indexor=0)
Imports an Epetra_DistObject using the Epetra_Import object.
Epetra Finite-Element Vector.
Epetra_Import: This class builds an import object for efficient importing of off-processor elements.
Epetra_Map: A class for partitioning vectors and matrices.
Definition Epetra_Map.h:119
Epetra_MpiComm: The Epetra MPI Communication Class.
int PutScalar(double ScalarConstant)
Initialize all values in a multi-vector with constant value.
Epetra_SerialComm: The Epetra Serial Communication Class.