Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
Eigen  3.4.0
 
Loading...
Searching...
No Matches
SparseSelfAdjointView.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11#define EIGEN_SPARSE_SELFADJOINTVIEW_H
12
13namespace Eigen {
14
29namespace internal {
30
31template<typename MatrixType, unsigned int Mode>
32struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33};
34
35template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37
38template<int Mode,typename MatrixType,int DestOrder>
39void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40
41}
42
43template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44 : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45{
46 public:
47
48 enum {
49 Mode = _Mode,
50 TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
51 RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
52 ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
53 };
54
56 typedef typename MatrixType::Scalar Scalar;
57 typedef typename MatrixType::StorageIndex StorageIndex;
59 typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
60 typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
61
62 explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
63 {
64 eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
65 }
66
67 inline Index rows() const { return m_matrix.rows(); }
68 inline Index cols() const { return m_matrix.cols(); }
69
71 const _MatrixTypeNested& matrix() const { return m_matrix; }
72 typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
73
79 template<typename OtherDerived>
82 {
84 }
85
91 template<typename OtherDerived> friend
94 {
96 }
97
99 template<typename OtherDerived>
102 {
104 }
105
107 template<typename OtherDerived> friend
110 {
112 }
113
122 template<typename DerivedU>
123 SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
124
126 // TODO implement twists in a more evaluator friendly fashion
127 SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
128 {
129 return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
130 }
131
132 template<typename SrcMatrixType,int SrcMode>
133 SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
134 {
135 internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
136 return *this;
137 }
138
139 SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
140 {
141 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
142 return *this = src.twistedBy(pnull);
143 }
144
145 // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
146 EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
147
148 template<typename SrcMatrixType,unsigned int SrcMode>
149 SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
150 {
151 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
152 return *this = src.twistedBy(pnull);
153 }
154
155 void resize(Index rows, Index cols)
156 {
157 EIGEN_ONLY_USED_FOR_DEBUG(rows);
158 EIGEN_ONLY_USED_FOR_DEBUG(cols);
159 eigen_assert(rows == this->rows() && cols == this->cols()
160 && "SparseSelfadjointView::resize() does not actually allow to resize.");
161 }
162
163 protected:
164
165 MatrixTypeNested m_matrix;
166 //mutable VectorI m_countPerRow;
167 //mutable VectorI m_countPerCol;
168 private:
169 template<typename Dest> void evalTo(Dest &) const;
170};
171
172/***************************************************************************
173* Implementation of SparseMatrixBase methods
174***************************************************************************/
175
176template<typename Derived>
177template<unsigned int UpLo>
178typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
179{
180 return SparseSelfAdjointView<const Derived, UpLo>(derived());
181}
182
183template<typename Derived>
184template<unsigned int UpLo>
185typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
186{
187 return SparseSelfAdjointView<Derived, UpLo>(derived());
188}
189
190/***************************************************************************
191* Implementation of SparseSelfAdjointView methods
192***************************************************************************/
193
194template<typename MatrixType, unsigned int Mode>
195template<typename DerivedU>
196SparseSelfAdjointView<MatrixType,Mode>&
197SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
198{
199 SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
200 if(alpha==Scalar(0))
201 m_matrix = tmp.template triangularView<Mode>();
202 else
203 m_matrix += alpha * tmp.template triangularView<Mode>();
204
205 return *this;
206}
207
208namespace internal {
209
210// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
211// in the future selfadjoint-ness should be defined by the expression traits
212// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
213template<typename MatrixType, unsigned int Mode>
214struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
215{
216 typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
217 typedef SparseSelfAdjointShape Shape;
218};
219
220struct SparseSelfAdjoint2Sparse {};
221
222template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
223template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
224
225template< typename DstXprType, typename SrcXprType, typename Functor>
226struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
227{
228 typedef typename DstXprType::StorageIndex StorageIndex;
229 typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
230
231 template<typename DestScalar,int StorageOrder>
232 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
233 {
234 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
235 }
236
237 // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
238 template<typename DestScalar,int StorageOrder,typename AssignFunc>
239 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
240 {
241 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
242 run(tmp, src, AssignOpType());
243 call_assignment_no_alias_no_transpose(dst, tmp, func);
244 }
245
246 template<typename DestScalar,int StorageOrder>
247 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
248 const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
249 {
250 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
251 run(tmp, src, AssignOpType());
252 dst += tmp;
253 }
254
255 template<typename DestScalar,int StorageOrder>
256 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
257 const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
258 {
259 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
260 run(tmp, src, AssignOpType());
261 dst -= tmp;
262 }
263
264 template<typename DestScalar>
265 static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
266 {
267 // TODO directly evaluate into dst;
268 SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
269 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
270 dst = tmp;
271 }
272};
273
274} // end namespace internal
275
276/***************************************************************************
277* Implementation of sparse self-adjoint time dense matrix
278***************************************************************************/
279
280namespace internal {
281
282template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
283inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
284{
285 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
286
287 typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
288 typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
289 typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
290 typedef typename LhsEval::InnerIterator LhsIterator;
291 typedef typename SparseLhsType::Scalar LhsScalar;
292
293 enum {
294 LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
295 ProcessFirstHalf =
296 ((Mode&(Upper|Lower))==(Upper|Lower))
297 || ( (Mode&Upper) && !LhsIsRowMajor)
298 || ( (Mode&Lower) && LhsIsRowMajor),
299 ProcessSecondHalf = !ProcessFirstHalf
300 };
301
302 SparseLhsTypeNested lhs_nested(lhs);
303 LhsEval lhsEval(lhs_nested);
304
305 // work on one column at once
306 for (Index k=0; k<rhs.cols(); ++k)
307 {
308 for (Index j=0; j<lhs.outerSize(); ++j)
309 {
310 LhsIterator i(lhsEval,j);
311 // handle diagonal coeff
312 if (ProcessSecondHalf)
313 {
314 while (i && i.index()<j) ++i;
315 if(i && i.index()==j)
316 {
317 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
318 ++i;
319 }
320 }
321
322 // premultiplied rhs for scatters
323 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
324 // accumulator for partial scalar product
325 typename DenseResType::Scalar res_j(0);
326 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
327 {
328 LhsScalar lhs_ij = i.value();
329 if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
330 res_j += lhs_ij * rhs.coeff(i.index(),k);
331 res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
332 }
333 res.coeffRef(j,k) += alpha * res_j;
334
335 // handle diagonal coeff
336 if (ProcessFirstHalf && i && (i.index()==j))
337 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
338 }
339 }
340}
341
342
343template<typename LhsView, typename Rhs, int ProductType>
344struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
345: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
346{
347 template<typename Dest>
348 static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
349 {
350 typedef typename LhsView::_MatrixTypeNested Lhs;
351 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
352 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
353 LhsNested lhsNested(lhsView.matrix());
354 RhsNested rhsNested(rhs);
355
356 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
357 }
358};
359
360template<typename Lhs, typename RhsView, int ProductType>
361struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
362: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
363{
364 template<typename Dest>
365 static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
366 {
367 typedef typename RhsView::_MatrixTypeNested Rhs;
368 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
369 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
370 LhsNested lhsNested(lhs);
371 RhsNested rhsNested(rhsView.matrix());
372
373 // transpose everything
374 Transpose<Dest> dstT(dst);
375 internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
376 }
377};
378
379// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
380// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
381
382template<typename LhsView, typename Rhs, int ProductTag>
383struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
384 : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
385{
386 typedef Product<LhsView, Rhs, DefaultProduct> XprType;
387 typedef typename XprType::PlainObject PlainObject;
388 typedef evaluator<PlainObject> Base;
389
390 product_evaluator(const XprType& xpr)
391 : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
392 {
393 ::new (static_cast<Base*>(this)) Base(m_result);
394 generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
395 }
396
397protected:
398 typename Rhs::PlainObject m_lhs;
399 PlainObject m_result;
400};
401
402template<typename Lhs, typename RhsView, int ProductTag>
403struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
404 : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
405{
406 typedef Product<Lhs, RhsView, DefaultProduct> XprType;
407 typedef typename XprType::PlainObject PlainObject;
408 typedef evaluator<PlainObject> Base;
409
410 product_evaluator(const XprType& xpr)
411 : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
412 {
413 ::new (static_cast<Base*>(this)) Base(m_result);
414 generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
415 }
416
417protected:
418 typename Lhs::PlainObject m_rhs;
419 PlainObject m_result;
420};
421
422} // namespace internal
423
424/***************************************************************************
425* Implementation of symmetric copies and permutations
426***************************************************************************/
427namespace internal {
428
429template<int Mode,typename MatrixType,int DestOrder>
430void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
431{
432 typedef typename MatrixType::StorageIndex StorageIndex;
433 typedef typename MatrixType::Scalar Scalar;
434 typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
435 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
436 typedef evaluator<MatrixType> MatEval;
437 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
438
439 MatEval matEval(mat);
440 Dest& dest(_dest.derived());
441 enum {
442 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
443 };
444
445 Index size = mat.rows();
446 VectorI count;
447 count.resize(size);
448 count.setZero();
449 dest.resize(size,size);
450 for(Index j = 0; j<size; ++j)
451 {
452 Index jp = perm ? perm[j] : j;
453 for(MatIterator it(matEval,j); it; ++it)
454 {
455 Index i = it.index();
456 Index r = it.row();
457 Index c = it.col();
458 Index ip = perm ? perm[i] : i;
459 if(Mode==int(Upper|Lower))
460 count[StorageOrderMatch ? jp : ip]++;
461 else if(r==c)
462 count[ip]++;
463 else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
464 {
465 count[ip]++;
466 count[jp]++;
467 }
468 }
469 }
470 Index nnz = count.sum();
471
472 // reserve space
473 dest.resizeNonZeros(nnz);
474 dest.outerIndexPtr()[0] = 0;
475 for(Index j=0; j<size; ++j)
476 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
477 for(Index j=0; j<size; ++j)
478 count[j] = dest.outerIndexPtr()[j];
479
480 // copy data
481 for(StorageIndex j = 0; j<size; ++j)
482 {
483 for(MatIterator it(matEval,j); it; ++it)
484 {
485 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
486 Index r = it.row();
487 Index c = it.col();
488
489 StorageIndex jp = perm ? perm[j] : j;
490 StorageIndex ip = perm ? perm[i] : i;
491
492 if(Mode==int(Upper|Lower))
493 {
494 Index k = count[StorageOrderMatch ? jp : ip]++;
495 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
496 dest.valuePtr()[k] = it.value();
497 }
498 else if(r==c)
499 {
500 Index k = count[ip]++;
501 dest.innerIndexPtr()[k] = ip;
502 dest.valuePtr()[k] = it.value();
503 }
504 else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
505 {
506 if(!StorageOrderMatch)
507 std::swap(ip,jp);
508 Index k = count[jp]++;
509 dest.innerIndexPtr()[k] = ip;
510 dest.valuePtr()[k] = it.value();
511 k = count[ip]++;
512 dest.innerIndexPtr()[k] = jp;
513 dest.valuePtr()[k] = numext::conj(it.value());
514 }
515 }
516 }
517}
518
519template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
520void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
521{
522 typedef typename MatrixType::StorageIndex StorageIndex;
523 typedef typename MatrixType::Scalar Scalar;
524 SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
525 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
526 typedef evaluator<MatrixType> MatEval;
527 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
528
529 enum {
530 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
531 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
532 DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
533 SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
534 };
535
536 MatEval matEval(mat);
537
538 Index size = mat.rows();
539 VectorI count(size);
540 count.setZero();
541 dest.resize(size,size);
542 for(StorageIndex j = 0; j<size; ++j)
543 {
544 StorageIndex jp = perm ? perm[j] : j;
545 for(MatIterator it(matEval,j); it; ++it)
546 {
547 StorageIndex i = it.index();
548 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
549 continue;
550
551 StorageIndex ip = perm ? perm[i] : i;
552 count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
553 }
554 }
555 dest.outerIndexPtr()[0] = 0;
556 for(Index j=0; j<size; ++j)
557 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
558 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
559 for(Index j=0; j<size; ++j)
560 count[j] = dest.outerIndexPtr()[j];
561
562 for(StorageIndex j = 0; j<size; ++j)
563 {
564
565 for(MatIterator it(matEval,j); it; ++it)
566 {
567 StorageIndex i = it.index();
568 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
569 continue;
570
571 StorageIndex jp = perm ? perm[j] : j;
572 StorageIndex ip = perm? perm[i] : i;
573
574 Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
575 dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
576
577 if(!StorageOrderMatch) std::swap(ip,jp);
578 if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
579 dest.valuePtr()[k] = numext::conj(it.value());
580 else
581 dest.valuePtr()[k] = it.value();
582 }
583 }
584}
585
586}
587
588// TODO implement twists in a more evaluator friendly fashion
589
590namespace internal {
591
592template<typename MatrixType, int Mode>
593struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
594};
595
596}
597
598template<typename MatrixType,int Mode>
599class SparseSymmetricPermutationProduct
600 : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
601{
602 public:
603 typedef typename MatrixType::Scalar Scalar;
604 typedef typename MatrixType::StorageIndex StorageIndex;
605 enum {
606 RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
607 ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
608 };
609 protected:
610 typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
611 public:
612 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
613 typedef typename MatrixType::Nested MatrixTypeNested;
614 typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
615
616 SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
617 : m_matrix(mat), m_perm(perm)
618 {}
619
620 inline Index rows() const { return m_matrix.rows(); }
621 inline Index cols() const { return m_matrix.cols(); }
622
623 const NestedExpression& matrix() const { return m_matrix; }
624 const Perm& perm() const { return m_perm; }
625
626 protected:
627 MatrixTypeNested m_matrix;
628 const Perm& m_perm;
629
630};
631
632namespace internal {
633
634template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
635struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
636{
637 typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
638 typedef typename DstXprType::StorageIndex DstIndex;
639 template<int Options>
640 static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
641 {
642 // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
643 SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
644 internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
645 dst = tmp;
646 }
647
648 template<typename DestType,unsigned int DestMode>
649 static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
650 {
651 internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
652 }
653};
654
655} // end namespace internal
656
657} // end namespace Eigen
658
659#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Derived & derived()
Definition: EigenBase.h:46
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:50
The matrix class, also used for vectors and row-vectors.
Definition: Matrix.h:180
Permutation matrix.
Definition: PermutationMatrix.h:298
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:75
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:109
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:101
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:81
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:93
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition: SparseSelfAdjointView.h:127
@ Lower
Definition: Constants.h:209
@ Upper
Definition: Constants.h:211
@ ColMajor
Definition: Constants.h:319
@ RowMajor
Definition: Constants.h:321
const unsigned int RowMajorBit
Definition: Constants.h:66
Namespace containing all symbols from the Eigen library.
Definition: Core:141
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
Definition: EigenBase.h:30
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:39
Derived & derived()
Definition: EigenBase.h:46