Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
Eigen  3.4.0
 
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
45namespace internal {
46template<typename _Scalar, int _Options, typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48{
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
51 typedef Sparse StorageKind;
52 typedef MatrixXpr XprKind;
53 enum {
54 RowsAtCompileTime = Dynamic,
55 ColsAtCompileTime = Dynamic,
56 MaxRowsAtCompileTime = Dynamic,
57 MaxColsAtCompileTime = Dynamic,
58 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59 SupportedAccessPatterns = InnerRandomAccessPattern
60 };
61};
62
63template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65{
66 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69
70 typedef _Scalar Scalar;
71 typedef Dense StorageKind;
72 typedef _StorageIndex StorageIndex;
73 typedef MatrixXpr XprKind;
74
75 enum {
76 RowsAtCompileTime = Dynamic,
77 ColsAtCompileTime = 1,
78 MaxRowsAtCompileTime = Dynamic,
79 MaxColsAtCompileTime = 1,
80 Flags = LvalueBit
81 };
82};
83
84template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87{
88 enum {
89 Flags = 0
90 };
91};
92
93} // end namespace internal
94
95template<typename _Scalar, int _Options, typename _StorageIndex>
97 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98{
100 using Base::convert_index;
101 friend class SparseVector<_Scalar,0,_StorageIndex>;
102 template<typename, typename, typename, typename, typename>
103 friend struct internal::Assignment;
104 public:
105 using Base::isCompressed;
106 using Base::nonZeros;
107 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108 using Base::operator+=;
109 using Base::operator-=;
110
114 typedef typename Base::InnerIterator InnerIterator;
115 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116
117
118 using Base::IsRowMajor;
119 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
120 enum {
121 Options = _Options
122 };
123
124 typedef typename Base::IndexVector IndexVector;
125 typedef typename Base::ScalarVector ScalarVector;
126 protected:
127 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128
129 Index m_outerSize;
130 Index m_innerSize;
131 StorageIndex* m_outerIndex;
132 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133 Storage m_data;
134
135 public:
136
138 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
140 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
143 inline Index innerSize() const { return m_innerSize; }
145 inline Index outerSize() const { return m_outerSize; }
146
150 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
154 inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
159 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
163 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164
168 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
172 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173
177 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
181 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182
184 inline Storage& data() { return m_data; }
186 inline const Storage& data() const { return m_data; }
187
190 inline Scalar coeff(Index row, Index col) const
191 {
192 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
194 const Index outer = IsRowMajor ? row : col;
195 const Index inner = IsRowMajor ? col : row;
196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198 }
199
208 inline Scalar& coeffRef(Index row, Index col)
209 {
210 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
212 const Index outer = IsRowMajor ? row : col;
213 const Index inner = IsRowMajor ? col : row;
214
215 Index start = m_outerIndex[outer];
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218 if(end<=start)
219 return insert(row,col);
220 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221 if((p<end) && (m_data.index(p)==inner))
222 return m_data.value(p);
223 else
224 return insert(row,col);
225 }
226
242 Scalar& insert(Index row, Index col);
243
244 public:
245
253 inline void setZero()
254 {
255 m_data.clear();
256 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257 if(m_innerNonZeros)
258 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259 }
260
264 inline void reserve(Index reserveSize)
265 {
266 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267 m_data.reserve(reserveSize);
268 }
269
270 #ifdef EIGEN_PARSED_BY_DOXYGEN
283 template<class SizesType>
284 inline void reserve(const SizesType& reserveSizes);
285 #else
286 template<class SizesType>
287 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289 typename
290 #endif
291 SizesType::value_type())
292 {
293 EIGEN_UNUSED_VARIABLE(enableif);
294 reserveInnerVectors(reserveSizes);
295 }
296 #endif // EIGEN_PARSED_BY_DOXYGEN
297 protected:
298 template<class SizesType>
299 inline void reserveInnerVectors(const SizesType& reserveSizes)
300 {
301 if(isCompressed())
302 {
303 Index totalReserveSize = 0;
304 // turn the matrix into non-compressed mode
305 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307
308 // temporarily use m_innerSizes to hold the new starting points.
309 StorageIndex* newOuterIndex = m_innerNonZeros;
310
311 StorageIndex count = 0;
312 for(Index j=0; j<m_outerSize; ++j)
313 {
314 newOuterIndex[j] = count;
315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316 totalReserveSize += reserveSizes[j];
317 }
318 m_data.reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(Index j=m_outerSize-1; j>=0; --j)
321 {
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323 for(Index i=innerNNZ-1; i>=0; --i)
324 {
325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327 }
328 previousOuterIndex = m_outerIndex[j];
329 m_outerIndex[j] = newOuterIndex[j];
330 m_innerNonZeros[j] = innerNNZ;
331 }
332 if(m_outerSize>0)
333 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334
335 m_data.resize(m_outerIndex[m_outerSize]);
336 }
337 else
338 {
339 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340 if (!newOuterIndex) internal::throw_std_bad_alloc();
341
342 StorageIndex count = 0;
343 for(Index j=0; j<m_outerSize; ++j)
344 {
345 newOuterIndex[j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[j];
349 }
350 newOuterIndex[m_outerSize] = count;
351
352 m_data.resize(count);
353 for(Index j=m_outerSize-1; j>=0; --j)
354 {
355 Index offset = newOuterIndex[j] - m_outerIndex[j];
356 if(offset>0)
357 {
358 StorageIndex innerNNZ = m_innerNonZeros[j];
359 for(Index i=innerNNZ-1; i>=0; --i)
360 {
361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363 }
364 }
365 }
366
367 std::swap(m_outerIndex, newOuterIndex);
368 std::free(newOuterIndex);
369 }
370
371 }
372 public:
373
374 //--- low level purely coherent filling ---
375
386 inline Scalar& insertBack(Index row, Index col)
387 {
388 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389 }
390
393 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394 {
395 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
399 m_data.append(Scalar(0), inner);
400 return m_data.value(p);
401 }
402
405 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406 {
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
409 m_data.append(Scalar(0), inner);
410 return m_data.value(p);
411 }
412
415 inline void startVec(Index outer)
416 {
417 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
420 }
421
425 inline void finalize()
426 {
427 if(isCompressed())
428 {
429 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430 Index i = m_outerSize;
431 // find the last filled column
432 while (i>=0 && m_outerIndex[i]==0)
433 --i;
434 ++i;
435 while (i<=m_outerSize)
436 {
437 m_outerIndex[i] = size;
438 ++i;
439 }
440 }
441 }
442
443 //---
444
445 template<typename InputIterators>
446 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447
448 template<typename InputIterators,typename DupFunctor>
449 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450
451 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452
453 template<typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455
456 //---
457
460 Scalar& insertByOuterInner(Index j, Index i)
461 {
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463 }
464
468 {
469 if(isCompressed())
470 return;
471
472 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473
474 Index oldStart = m_outerIndex[1];
475 m_outerIndex[1] = m_innerNonZeros[0];
476 for(Index j=1; j<m_outerSize; ++j)
477 {
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
480 if(offset>0)
481 {
482 for(Index k=0; k<m_innerNonZeros[j]; ++k)
483 {
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486 }
487 }
488 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489 oldStart = nextOldStart;
490 }
491 std::free(m_innerNonZeros);
492 m_innerNonZeros = 0;
493 m_data.resize(m_outerIndex[m_outerSize]);
494 m_data.squeeze();
495 }
496
499 {
500 if(m_innerNonZeros != 0)
501 return;
502 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503 for (Index i = 0; i < m_outerSize; i++)
504 {
505 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506 }
507 }
508
510 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511 {
512 prune(default_prunning_func(reference,epsilon));
513 }
514
522 template<typename KeepFunc>
523 void prune(const KeepFunc& keep = KeepFunc())
524 {
525 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
527
528 StorageIndex k = 0;
529 for(Index j=0; j<m_outerSize; ++j)
530 {
531 Index previousStart = m_outerIndex[j];
532 m_outerIndex[j] = k;
533 Index end = m_outerIndex[j+1];
534 for(Index i=previousStart; i<end; ++i)
535 {
536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537 {
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
540 ++k;
541 }
542 }
543 }
544 m_outerIndex[m_outerSize] = k;
545 m_data.resize(k,0);
546 }
547
557 {
558 // No change
559 if (this->rows() == rows && this->cols() == cols) return;
560
561 // If one dimension is null, then there is nothing to be preserved
562 if(rows==0 || cols==0) return resize(rows,cols);
563
564 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
568 // Deals with inner non zeros
569 if (m_innerNonZeros)
570 {
571 // Resize m_innerNonZeros
572 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
575
576 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
578 }
579 else if (innerChange < 0)
580 {
581 // Inner size decreased: allocate a new m_innerNonZeros
582 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586 for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587 m_innerNonZeros[i] = 0;
588 }
589
590 // Change the m_innerNonZeros in case of a decrease of inner size
591 if (m_innerNonZeros && innerChange < 0)
592 {
593 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594 {
595 StorageIndex &n = m_innerNonZeros[i];
596 StorageIndex start = m_outerIndex[i];
597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598 }
599 }
600
601 m_innerSize = newInnerSize;
602
603 // Re-allocate outer index structure if necessary
604 if (outerChange == 0)
605 return;
606
607 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608 if (!newOuterIndex) internal::throw_std_bad_alloc();
609 m_outerIndex = newOuterIndex;
610 if (outerChange > 0)
611 {
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614 m_outerIndex[i] = lastIdx;
615 }
616 m_outerSize += outerChange;
617 }
618
627 {
628 const Index outerSize = IsRowMajor ? rows : cols;
629 m_innerSize = IsRowMajor ? cols : rows;
630 m_data.clear();
631 if (m_outerSize != outerSize || m_outerSize==0)
632 {
633 std::free(m_outerIndex);
634 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635 if (!m_outerIndex) internal::throw_std_bad_alloc();
636
637 m_outerSize = outerSize;
638 }
639 if(m_innerNonZeros)
640 {
641 std::free(m_innerNonZeros);
642 m_innerNonZeros = 0;
643 }
644 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645 }
646
649 void resizeNonZeros(Index size)
650 {
651 m_data.resize(size);
652 }
653
656
662
665 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666 {
667 check_template_parameters();
668 resize(0, 0);
669 }
670
673 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674 {
675 check_template_parameters();
676 resize(rows, cols);
677 }
678
680 template<typename OtherDerived>
682 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683 {
684 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686 check_template_parameters();
687 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688 if (needToTranspose)
689 *this = other.derived();
690 else
691 {
692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694 #endif
695 internal::call_assignment_no_alias(*this, other.derived());
696 }
697 }
698
700 template<typename OtherDerived, unsigned int UpLo>
702 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703 {
704 check_template_parameters();
705 Base::operator=(other);
706 }
707
709 inline SparseMatrix(const SparseMatrix& other)
710 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711 {
712 check_template_parameters();
713 *this = other.derived();
714 }
715
717 template<typename OtherDerived>
718 SparseMatrix(const ReturnByValue<OtherDerived>& other)
719 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720 {
721 check_template_parameters();
722 initAssignment(other);
723 other.evalTo(*this);
724 }
725
727 template<typename OtherDerived>
728 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
729 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730 {
731 check_template_parameters();
732 *this = other.derived();
733 }
734
737 inline void swap(SparseMatrix& other)
738 {
739 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740 std::swap(m_outerIndex, other.m_outerIndex);
741 std::swap(m_innerSize, other.m_innerSize);
742 std::swap(m_outerSize, other.m_outerSize);
743 std::swap(m_innerNonZeros, other.m_innerNonZeros);
744 m_data.swap(other.m_data);
745 }
746
749 inline void setIdentity()
750 {
751 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752 this->m_data.resize(rows());
753 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756 std::free(m_innerNonZeros);
757 m_innerNonZeros = 0;
758 }
759 inline SparseMatrix& operator=(const SparseMatrix& other)
760 {
761 if (other.isRValue())
762 {
763 swap(other.const_cast_derived());
764 }
765 else if(this!=&other)
766 {
767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769 #endif
770 initAssignment(other);
771 if(other.isCompressed())
772 {
773 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774 m_data = other.m_data;
775 }
776 else
777 {
778 Base::operator=(other);
779 }
780 }
781 return *this;
782 }
783
784#ifndef EIGEN_PARSED_BY_DOXYGEN
785 template<typename OtherDerived>
786 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
787 { return Base::operator=(other.derived()); }
788
789 template<typename Lhs, typename Rhs>
790 inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791#endif // EIGEN_PARSED_BY_DOXYGEN
792
793 template<typename OtherDerived>
794 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
795
796 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797 {
798 EIGEN_DBG_SPARSE(
799 s << "Nonzero entries:\n";
800 if(m.isCompressed())
801 {
802 for (Index i=0; i<m.nonZeros(); ++i)
803 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804 }
805 else
806 {
807 for (Index i=0; i<m.outerSize(); ++i)
808 {
809 Index p = m.m_outerIndex[i];
810 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811 Index k=p;
812 for (; k<pe; ++k) {
813 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814 }
815 for (; k<m.m_outerIndex[i+1]; ++k) {
816 s << "(_,_) ";
817 }
818 }
819 }
820 s << std::endl;
821 s << std::endl;
822 s << "Outer pointers:\n";
823 for (Index i=0; i<m.outerSize(); ++i) {
824 s << m.m_outerIndex[i] << " ";
825 }
826 s << " $" << std::endl;
827 if(!m.isCompressed())
828 {
829 s << "Inner non zeros:\n";
830 for (Index i=0; i<m.outerSize(); ++i) {
831 s << m.m_innerNonZeros[i] << " ";
832 }
833 s << " $" << std::endl;
834 }
835 s << std::endl;
836 );
837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838 return s;
839 }
840
843 {
844 std::free(m_outerIndex);
845 std::free(m_innerNonZeros);
846 }
847
849 Scalar sum() const;
850
851# ifdef EIGEN_SPARSEMATRIX_PLUGIN
852# include EIGEN_SPARSEMATRIX_PLUGIN
853# endif
854
855protected:
856
857 template<typename Other>
858 void initAssignment(const Other& other)
859 {
860 resize(other.rows(), other.cols());
861 if(m_innerNonZeros)
862 {
863 std::free(m_innerNonZeros);
864 m_innerNonZeros = 0;
865 }
866 }
867
870 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871
874 class SingletonVector
875 {
876 StorageIndex m_index;
877 StorageIndex m_value;
878 public:
879 typedef StorageIndex value_type;
880 SingletonVector(Index i, Index v)
881 : m_index(convert_index(i)), m_value(convert_index(v))
882 {}
883
884 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885 };
886
889 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890
891public:
894 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
895 {
896 const Index outer = IsRowMajor ? row : col;
897 const Index inner = IsRowMajor ? col : row;
898
899 eigen_assert(!isCompressed());
900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901
902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903 m_data.index(p) = convert_index(inner);
904 return (m_data.value(p) = Scalar(0));
905 }
906protected:
907 struct IndexPosPair {
908 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
909 Index i;
910 Index p;
911 };
912
926 template<typename DiagXpr, typename Func>
927 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928 {
929 Index n = diagXpr.size();
930
931 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
932 if(overwrite)
933 {
934 if((this->rows()!=n) || (this->cols()!=n))
935 this->resize(n, n);
936 }
937
938 if(m_data.size()==0 || overwrite)
939 {
940 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941 this->makeCompressed();
942 this->resizeNonZeros(n);
943 Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944 Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945 Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946 values.setZero();
947 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948 }
949 else
950 {
951 bool isComp = isCompressed();
952 internal::evaluator<DiagXpr> diaEval(diagXpr);
953 std::vector<IndexPosPair> newEntries;
954
955 // 1 - try in-place update and record insertion failures
956 for(Index i = 0; i<n; ++i)
957 {
958 internal::LowerBoundIndex lb = this->lower_bound(i,i);
959 Index p = lb.value;
960 if(lb.found)
961 {
962 // the coeff already exists
963 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964 }
965 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966 {
967 // non compressed mode with local room for inserting one element
968 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969 m_innerNonZeros[i]++;
970 m_data.value(p) = Scalar(0);
971 m_data.index(p) = StorageIndex(i);
972 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973 }
974 else
975 {
976 // defer insertion
977 newEntries.push_back(IndexPosPair(i,p));
978 }
979 }
980 // 2 - insert deferred entries
981 Index n_entries = Index(newEntries.size());
982 if(n_entries>0)
983 {
984 Storage newData(m_data.size()+n_entries);
985 Index prev_p = 0;
986 Index prev_i = 0;
987 for(Index k=0; k<n_entries;++k)
988 {
989 Index i = newEntries[k].i;
990 Index p = newEntries[k].p;
991 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993 for(Index j=prev_i;j<i;++j)
994 m_outerIndex[j+1] += k;
995 if(!isComp)
996 m_innerNonZeros[i]++;
997 prev_p = p;
998 prev_i = i;
999 newData.value(p+k) = Scalar(0);
1000 newData.index(p+k) = StorageIndex(i);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002 }
1003 {
1004 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006 for(Index j=prev_i+1;j<=m_outerSize;++j)
1007 m_outerIndex[j] += n_entries;
1008 }
1009 m_data.swap(newData);
1010 }
1011 }
1012 }
1013
1014private:
1015 static void check_template_parameters()
1016 {
1017 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019 }
1020
1021 struct default_prunning_func {
1022 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023 inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024 {
1025 return !internal::isMuchSmallerThan(value, reference, epsilon);
1026 }
1027 Scalar reference;
1028 RealScalar epsilon;
1029 };
1030};
1031
1032namespace internal {
1033
1034template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036{
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038 typedef typename SparseMatrixType::Scalar Scalar;
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041
1042 if(begin!=end)
1043 {
1044 // pass 1: count the nnz per inner-vector
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046 wi.setZero();
1047 for(InputIterator it(begin); it!=end; ++it)
1048 {
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1051 }
1052
1053 // pass 2: insert all the elements into trMat
1054 trMat.reserve(wi);
1055 for(InputIterator it(begin); it!=end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
1058 // pass 3:
1059 trMat.collapseDuplicates(dup_func);
1060 }
1061
1062 // pass 4: transposed copy -> implicit sorting
1063 mat = trMat;
1064}
1065
1066}
1067
1068
1106template<typename Scalar, int _Options, typename _StorageIndex>
1107template<typename InputIterators>
1108void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109{
1110 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111}
1112
1122template<typename Scalar, int _Options, typename _StorageIndex>
1123template<typename InputIterators,typename DupFunctor>
1124void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125{
1126 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127}
1128
1130template<typename Scalar, int _Options, typename _StorageIndex>
1131template<typename DupFunctor>
1133{
1134 eigen_assert(!isCompressed());
1135 // TODO, in practice we should be able to use m_innerNonZeros for that task
1136 IndexVector wi(innerSize());
1137 wi.fill(-1);
1138 StorageIndex count = 0;
1139 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140 for(Index j=0; j<outerSize(); ++j)
1141 {
1142 StorageIndex start = count;
1143 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145 {
1146 Index i = m_data.index(k);
1147 if(wi(i)>=start)
1148 {
1149 // we already meet this entry => accumulate it
1150 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151 }
1152 else
1153 {
1154 m_data.value(count) = m_data.value(k);
1155 m_data.index(count) = m_data.index(k);
1156 wi(i) = count;
1157 ++count;
1158 }
1159 }
1160 m_outerIndex[j] = start;
1161 }
1162 m_outerIndex[m_outerSize] = count;
1163
1164 // turn the matrix into compressed form
1165 std::free(m_innerNonZeros);
1166 m_innerNonZeros = 0;
1167 m_data.resize(m_outerIndex[m_outerSize]);
1168}
1169
1170template<typename Scalar, int _Options, typename _StorageIndex>
1171template<typename OtherDerived>
1172EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173{
1174 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
1177 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179 #endif
1180
1181 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182 if (needToTranspose)
1183 {
1184 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186 #endif
1187 // two passes algorithm:
1188 // 1 - compute the number of coeffs per dest inner vector
1189 // 2 - do the actual copy/eval
1190 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194 OtherCopy otherCopy(other.derived());
1195 OtherCopyEval otherCopyEval(otherCopy);
1196
1197 SparseMatrix dest(other.rows(),other.cols());
1198 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
1200 // pass 1
1201 // FIXME the above copy could be merged with that pass
1202 for (Index j=0; j<otherCopy.outerSize(); ++j)
1203 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204 ++dest.m_outerIndex[it.index()];
1205
1206 // prefix sum
1207 StorageIndex count = 0;
1208 IndexVector positions(dest.outerSize());
1209 for (Index j=0; j<dest.outerSize(); ++j)
1210 {
1211 StorageIndex tmp = dest.m_outerIndex[j];
1212 dest.m_outerIndex[j] = count;
1213 positions[j] = count;
1214 count += tmp;
1215 }
1216 dest.m_outerIndex[dest.outerSize()] = count;
1217 // alloc
1218 dest.m_data.resize(count);
1219 // pass 2
1220 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221 {
1222 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223 {
1224 Index pos = positions[it.index()]++;
1225 dest.m_data.index(pos) = j;
1226 dest.m_data.value(pos) = it.value();
1227 }
1228 }
1229 this->swap(dest);
1230 return *this;
1231 }
1232 else
1233 {
1234 if(other.isRValue())
1235 {
1236 initAssignment(other.derived());
1237 }
1238 // there is no special optimization
1239 return Base::operator=(other.derived());
1240 }
1241}
1242
1243template<typename _Scalar, int _Options, typename _StorageIndex>
1244typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245{
1246 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
1248 const Index outer = IsRowMajor ? row : col;
1249 const Index inner = IsRowMajor ? col : row;
1250
1251 if(isCompressed())
1252 {
1253 if(nonZeros()==0)
1254 {
1255 // reserve space if not already done
1256 if(m_data.allocatedSize()==0)
1257 m_data.reserve(2*m_innerSize);
1258
1259 // turn the matrix into non-compressed mode
1260 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
1263 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
1265 // pack all inner-vectors to the end of the pre-allocated space
1266 // and allocate the entire free-space to the first inner-vector
1267 StorageIndex end = convert_index(m_data.allocatedSize());
1268 for(Index j=1; j<=m_outerSize; ++j)
1269 m_outerIndex[j] = end;
1270 }
1271 else
1272 {
1273 // turn the matrix into non-compressed mode
1274 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276 for(Index j=0; j<m_outerSize; ++j)
1277 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278 }
1279 }
1280
1281 // check whether we can do a fast "push back" insertion
1282 Index data_end = m_data.allocatedSize();
1283
1284 // First case: we are filling a new inner vector which is packed at the end.
1285 // We assume that all remaining inner-vectors are also empty and packed to the end.
1286 if(m_outerIndex[outer]==data_end)
1287 {
1288 eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
1290 // pack previous empty inner-vectors to end of the used-space
1291 // and allocate the entire free-space to the current inner-vector.
1292 StorageIndex p = convert_index(m_data.size());
1293 Index j = outer;
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] = p;
1296
1297 // push back the new element
1298 ++m_innerNonZeros[outer];
1299 m_data.append(Scalar(0), inner);
1300
1301 // check for reallocation
1302 if(data_end != m_data.allocatedSize())
1303 {
1304 // m_data has been reallocated
1305 // -> move remaining inner-vectors back to the end of the free-space
1306 // so that the entire free-space is allocated to the current inner-vector.
1307 eigen_internal_assert(data_end < m_data.allocatedSize());
1308 StorageIndex new_end = convert_index(m_data.allocatedSize());
1309 for(Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1312 }
1313 return m_data.value(p);
1314 }
1315
1316 // Second case: the next inner-vector is packed to the end
1317 // and the current inner-vector end match the used-space.
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319 {
1320 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
1322 // add space for the new element
1323 ++m_innerNonZeros[outer];
1324 m_data.resize(m_data.size()+1);
1325
1326 // check for reallocation
1327 if(data_end != m_data.allocatedSize())
1328 {
1329 // m_data has been reallocated
1330 // -> move remaining inner-vectors back to the end of the free-space
1331 // so that the entire free-space is allocated to the current inner-vector.
1332 eigen_internal_assert(data_end < m_data.allocatedSize());
1333 StorageIndex new_end = convert_index(m_data.allocatedSize());
1334 for(Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1337 }
1338
1339 // and insert it at the right position (sorted insertion)
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.index(p-1) > inner) )
1343 {
1344 m_data.index(p) = m_data.index(p-1);
1345 m_data.value(p) = m_data.value(p-1);
1346 --p;
1347 }
1348
1349 m_data.index(p) = convert_index(inner);
1350 return (m_data.value(p) = Scalar(0));
1351 }
1352
1353 if(m_data.size() != m_data.allocatedSize())
1354 {
1355 // make sure the matrix is compatible to random un-compressed insertion:
1356 m_data.resize(m_data.allocatedSize());
1357 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358 }
1359
1360 return insertUncompressed(row,col);
1361}
1362
1363template<typename _Scalar, int _Options, typename _StorageIndex>
1364EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365{
1366 eigen_assert(!isCompressed());
1367
1368 const Index outer = IsRowMajor ? row : col;
1369 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
1371 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372 StorageIndex innerNNZ = m_innerNonZeros[outer];
1373 if(innerNNZ>=room)
1374 {
1375 // this inner vector is full, we need to reallocate the whole buffer :(
1376 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377 }
1378
1379 Index startId = m_outerIndex[outer];
1380 Index p = startId + m_innerNonZeros[outer];
1381 while ( (p > startId) && (m_data.index(p-1) > inner) )
1382 {
1383 m_data.index(p) = m_data.index(p-1);
1384 m_data.value(p) = m_data.value(p-1);
1385 --p;
1386 }
1387 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
1389 m_innerNonZeros[outer]++;
1390
1391 m_data.index(p) = inner;
1392 return (m_data.value(p) = Scalar(0));
1393}
1394
1395template<typename _Scalar, int _Options, typename _StorageIndex>
1396EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397{
1398 eigen_assert(isCompressed());
1399
1400 const Index outer = IsRowMajor ? row : col;
1401 const Index inner = IsRowMajor ? col : row;
1402
1403 Index previousOuter = outer;
1404 if (m_outerIndex[outer+1]==0)
1405 {
1406 // we start a new inner vector
1407 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408 {
1409 m_outerIndex[previousOuter] = convert_index(m_data.size());
1410 --previousOuter;
1411 }
1412 m_outerIndex[outer+1] = m_outerIndex[outer];
1413 }
1414
1415 // here we have to handle the tricky case where the outerIndex array
1416 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417 // the 2nd inner vector...
1418 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
1421 std::size_t startId = m_outerIndex[outer];
1422 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423 std::size_t p = m_outerIndex[outer+1];
1424 ++m_outerIndex[outer+1];
1425
1426 double reallocRatio = 1;
1427 if (m_data.allocatedSize()<=m_data.size())
1428 {
1429 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430 if (m_data.size()==0)
1431 {
1432 m_data.reserve(32);
1433 }
1434 else
1435 {
1436 // we need to reallocate the data, to reduce multiple reallocations
1437 // we use a smart resize algorithm based on the current filling ratio
1438 // in addition, we use double to avoid integers overflows
1439 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441 // furthermore we bound the realloc ratio to:
1442 // 1) reduce multiple minor realloc when the matrix is almost filled
1443 // 2) avoid to allocate too much memory when the matrix is almost empty
1444 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445 }
1446 }
1447 m_data.resize(m_data.size()+1,reallocRatio);
1448
1449 if (!isLastVec)
1450 {
1451 if (previousOuter==-1)
1452 {
1453 // oops wrong guess.
1454 // let's correct the outer offsets
1455 for (Index k=0; k<=(outer+1); ++k)
1456 m_outerIndex[k] = 0;
1457 Index k=outer+1;
1458 while(m_outerIndex[k]==0)
1459 m_outerIndex[k++] = 1;
1460 while (k<=m_outerSize && m_outerIndex[k]!=0)
1461 m_outerIndex[k++]++;
1462 p = 0;
1463 --k;
1464 k = m_outerIndex[k]-1;
1465 while (k>0)
1466 {
1467 m_data.index(k) = m_data.index(k-1);
1468 m_data.value(k) = m_data.value(k-1);
1469 k--;
1470 }
1471 }
1472 else
1473 {
1474 // we are not inserting into the last inner vec
1475 // update outer indices:
1476 Index j = outer+2;
1477 while (j<=m_outerSize && m_outerIndex[j]!=0)
1478 m_outerIndex[j++]++;
1479 --j;
1480 // shift data of last vecs:
1481 Index k = m_outerIndex[j]-1;
1482 while (k>=Index(p))
1483 {
1484 m_data.index(k) = m_data.index(k-1);
1485 m_data.value(k) = m_data.value(k-1);
1486 k--;
1487 }
1488 }
1489 }
1490
1491 while ( (p > startId) && (m_data.index(p-1) > inner) )
1492 {
1493 m_data.index(p) = m_data.index(p-1);
1494 m_data.value(p) = m_data.value(p-1);
1495 --p;
1496 }
1497
1498 m_data.index(p) = inner;
1499 return (m_data.value(p) = Scalar(0));
1500}
1501
1502namespace internal {
1503
1504template<typename _Scalar, int _Options, typename _StorageIndex>
1505struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507{
1508 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510 evaluator() : Base() {}
1511 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512};
1513
1514}
1515
1516} // end namespace Eigen
1517
1518#endif // EIGEN_SPARSEMATRIX_H
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:47
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
Sparse matrix.
Definition: MappedSparseMatrix.h:34
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:38
Index nonZeros() const
Definition: SparseCompressedBase.h:56
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
internal::traits< Derived >::StorageIndex StorageIndex
Definition: SparseMatrixBase.h:43
Index size() const
Definition: SparseMatrixBase.h:181
@ Flags
Definition: SparseMatrixBase.h:95
A versatible sparse matrix representation.
Definition: SparseMatrix.h:98
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:510
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:523
Index innerSize() const
Definition: SparseMatrix.h:143
void reserve(Index reserveSize)
Definition: SparseMatrix.h:264
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:728
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:172
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:177
~SparseMatrix()
Definition: SparseMatrix.h:842
Scalar * valuePtr()
Definition: SparseMatrix.h:154
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:655
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:718
Index outerSize() const
Definition: SparseMatrix.h:145
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:681
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:190
void makeCompressed()
Definition: SparseMatrix.h:467
Index rows() const
Definition: SparseMatrix.h:138
SparseMatrix()
Definition: SparseMatrix.h:664
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:672
void uncompress()
Definition: SparseMatrix.h:498
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:159
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:168
bool isCompressed() const
Definition: SparseCompressedBase.h:107
void setIdentity()
Definition: SparseMatrix.h:749
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:556
Index cols() const
Definition: SparseMatrix.h:140
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:701
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1244
const Scalar * valuePtr() const
Definition: SparseMatrix.h:150
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:181
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:163
void reserve(const SizesType &reserveSizes)
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1108
void setZero()
Definition: SparseMatrix.h:253
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1124
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:208
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:737
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:709
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:661
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:626
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
a sparse vector class
Definition: SparseVector.h:66
const unsigned int LvalueBit
Definition: Constants.h:144
const unsigned int RowMajorBit
Definition: Constants.h:66
const unsigned int CompressedAccessBit
Definition: Constants.h:191
Namespace containing all symbols from the Eigen library.
Definition: Core:141
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
const int Dynamic
Definition: Constants.h:22
Derived & derived()
Definition: EigenBase.h:46
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:233