10#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
11#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
23template<
typename Generator,
typename XprType>
24struct traits<TensorGeneratorOp<Generator, XprType> > :
public traits<XprType>
26 typedef typename XprType::Scalar Scalar;
27 typedef traits<XprType> XprTraits;
28 typedef typename XprTraits::StorageKind StorageKind;
29 typedef typename XprTraits::Index
Index;
30 typedef typename XprType::Nested Nested;
31 typedef typename remove_reference<Nested>::type _Nested;
32 static const int NumDimensions = XprTraits::NumDimensions;
33 static const int Layout = XprTraits::Layout;
34 typedef typename XprTraits::PointerType PointerType;
37template<
typename Generator,
typename XprType>
38struct eval<TensorGeneratorOp<Generator, XprType>,
Eigen::Dense>
40 typedef const TensorGeneratorOp<Generator, XprType>& type;
43template<
typename Generator,
typename XprType>
44struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
46 typedef TensorGeneratorOp<Generator, XprType> type;
53template<
typename Generator,
typename XprType>
57 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
59 typedef typename XprType::CoeffReturnType CoeffReturnType;
60 typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
61 typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
62 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
64 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorGeneratorOp(
const XprType& expr,
const Generator& generator)
65 : m_xpr(expr), m_generator(generator) {}
68 const Generator& generator()
const {
return m_generator; }
71 const typename internal::remove_all<typename XprType::Nested>::type&
72 expression()
const {
return m_xpr; }
75 typename XprType::Nested m_xpr;
76 const Generator m_generator;
81template<
typename Generator,
typename ArgType,
typename Device>
85 typedef typename XprType::Index
Index;
86 typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
87 static const int NumDims = internal::array_size<Dimensions>::value;
88 typedef typename XprType::Scalar Scalar;
89 typedef typename XprType::CoeffReturnType CoeffReturnType;
90 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
91 typedef StorageMemory<CoeffReturnType, Device> Storage;
92 typedef typename Storage::Type EvaluatorPointerType;
95 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
97 PreferBlockAccess =
true,
103 typedef internal::TensorIntDivisor<Index> IndexDivisor;
106 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
107 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
109 typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
114 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
115 : m_device(device), m_generator(op.generator())
117 TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
118 m_dimensions = argImpl.dimensions();
120 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
123 for (
int i = 1; i < NumDims; ++i) {
124 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
125 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
128 m_strides[NumDims - 1] = 1;
130 for (
int i = NumDims - 2; i >= 0; --i) {
131 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
132 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
137 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
139 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
142 EIGEN_STRONG_INLINE
void cleanup() {
145 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
147 array<Index, NumDims> coords;
148 extract_coordinates(index, coords);
149 return m_generator(coords);
152 template<
int LoadMode>
153 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
155 const int packetSize = PacketType<CoeffReturnType, Device>::size;
156 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
157 eigen_assert(index+packetSize-1 < dimensions().TotalSize());
159 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[packetSize];
160 for (
int i = 0; i < packetSize; ++i) {
161 values[i] = coeff(index+i);
163 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
167 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
168 internal::TensorBlockResourceRequirements getResourceRequirements()
const {
169 const size_t target_size = m_device.firstLevelCacheSize();
171 return internal::TensorBlockResourceRequirements::skewed<Scalar>(
175 struct BlockIteratorState {
182 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
183 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
184 bool =
false)
const {
185 static const bool is_col_major =
186 static_cast<int>(Layout) ==
static_cast<int>(
ColMajor);
189 array<Index, NumDims> coords;
190 extract_coordinates(desc.offset(), coords);
191 array<Index, NumDims> initial_coords = coords;
198 array<BlockIteratorState, NumDims> it;
199 for (
int i = 0; i < NumDims; ++i) {
200 const int dim = is_col_major ? i : NumDims - 1 - i;
201 it[i].size = desc.dimension(dim);
202 it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
203 it[i].span = it[i].stride * (it[i].size - 1);
206 eigen_assert(it[0].stride == 1);
209 const typename TensorBlock::Storage block_storage =
210 TensorBlock::prepareStorage(desc, scratch);
212 CoeffReturnType* block_buffer = block_storage.data();
214 static const int packet_size = PacketType<CoeffReturnType, Device>::size;
216 static const int inner_dim = is_col_major ? 0 : NumDims - 1;
217 const Index inner_dim_size = it[0].size;
218 const Index inner_dim_vectorized = inner_dim_size - packet_size;
220 while (it[NumDims - 1].count < it[NumDims - 1].size) {
223 for (; i <= inner_dim_vectorized; i += packet_size) {
224 for (Index j = 0; j < packet_size; ++j) {
225 array<Index, NumDims> j_coords = coords;
226 j_coords[inner_dim] += j;
227 *(block_buffer + offset + i + j) = m_generator(j_coords);
229 coords[inner_dim] += packet_size;
232 for (; i < inner_dim_size; ++i) {
233 *(block_buffer + offset + i) = m_generator(coords);
236 coords[inner_dim] = initial_coords[inner_dim];
239 if (NumDims == 1)
break;
242 for (i = 1; i < NumDims; ++i) {
243 if (++it[i].count < it[i].size) {
244 offset += it[i].stride;
245 coords[is_col_major ? i : NumDims - 1 - i]++;
248 if (i != NumDims - 1) it[i].count = 0;
249 coords[is_col_major ? i : NumDims - 1 - i] =
250 initial_coords[is_col_major ? i : NumDims - 1 - i];
251 offset -= it[i].span;
255 return block_storage.AsTensorMaterializedBlock();
258 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
259 costPerCoeff(
bool)
const {
262 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
263 TensorOpCost::MulCost<Scalar>());
266 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
270 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler&)
const {}
274 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
275 void extract_coordinates(Index index, array<Index, NumDims>& coords)
const {
276 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
277 for (
int i = NumDims - 1; i > 0; --i) {
278 const Index idx = index / m_fast_strides[i];
279 index -= idx * m_strides[i];
284 for (
int i = 0; i < NumDims - 1; ++i) {
285 const Index idx = index / m_fast_strides[i];
286 index -= idx * m_strides[i];
289 coords[NumDims-1] = index;
293 const Device EIGEN_DEVICE_REF m_device;
294 Dimensions m_dimensions;
295 array<Index, NumDims> m_strides;
296 array<IndexDivisor, NumDims> m_fast_strides;
297 Generator m_generator;
The tensor base class.
Definition: TensorForwardDeclarations.h:56
Tensor generator class.
Definition: TensorGenerator.h:55
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:29