129 #ifndef VIGRA_MULTI_ARRAY_CHUNKED_HXX
130 #define VIGRA_MULTI_ARRAY_CHUNKED_HXX
135 #include "multi_fwd.hxx"
136 #include "multi_handle.hxx"
137 #include "multi_array.hxx"
138 #include "memory.hxx"
139 #include "metaprogramming.hxx"
140 #include "threading.hxx"
141 #include "compression.hxx"
151 # include "windows.h"
156 # include <sys/stat.h>
157 # include <sys/mman.h>
162 #ifdef VIGRA_CHECK_BOUNDS
163 #define VIGRA_ASSERT_INSIDE(diff) \
164 vigra_precondition(this->isInside(diff), "Index out of bounds")
166 #define VIGRA_ASSERT_INSIDE(diff)
172 #define VIGRA_NO_SPARSE_FILE
178 void winErrorToException(std::string message =
"")
181 DWORD dw = GetLastError();
184 FORMAT_MESSAGE_ALLOCATE_BUFFER |
185 FORMAT_MESSAGE_FROM_SYSTEM |
186 FORMAT_MESSAGE_IGNORE_INSERTS,
189 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
193 message += (
char*)lpMsgBuf;
196 throw std::runtime_error(message);
200 std::string winTempFileName(std::string path =
"")
204 TCHAR default_path[MAX_PATH];
205 if(!GetTempPath(MAX_PATH, default_path))
206 winErrorToException(
"winTempFileName(): ");
210 TCHAR name[MAX_PATH];
211 if(!GetTempFileName(path.c_str(), TEXT(
"vigra"), 0, name))
212 winErrorToException(
"winTempFileName(): ");
214 return std::string(name);
218 std::size_t winClusterSize()
221 ::GetSystemInfo(&info);
222 return info.dwAllocationGranularity;
230 std::size_t mmap_alignment = winClusterSize();
232 std::size_t mmap_alignment = sysconf(_SC_PAGE_SIZE);
237 template <
unsigned int N,
class T>
238 class IteratorChunkHandle;
242 template <
unsigned int N>
245 template <
class T,
int M>
246 static void chunkIndex(TinyVector<T, M>
const & p,
247 TinyVector<T, M>
const & bits,
248 TinyVector<T, M> & index)
250 typedef std::size_t UI;
251 ChunkIndexing<N-1>::chunkIndex(p, bits, index);
252 index[N-1] = (UI)p[N-1] >> bits[N-1];
255 template <
class T,
int M>
256 static std::size_t chunkOffset(TinyVector<T, M>
const & p,
257 TinyVector<T, M>
const & bits,
258 TinyVector<T, M>
const & strides)
260 typedef std::size_t UI;
261 return ChunkIndexing<N-1>::chunkOffset(p, bits, strides) +
262 ((UI)p[N-1] >> bits[N-1]) * strides[N-1];
265 template <
class T,
int M>
266 static std::size_t offsetInChunk(TinyVector<T, M>
const & p,
267 TinyVector<T, M>
const & mask,
268 TinyVector<T, M>
const & strides)
270 typedef std::size_t UI;
271 return ChunkIndexing<N-1>::offsetInChunk(p, mask, strides) +
272 ((UI)p[N-1] & (UI)mask[N-1]) * strides[N-1];
277 struct ChunkIndexing<1>
279 template <
class T,
int M>
280 static void chunkIndex(TinyVector<T, M>
const & p,
281 TinyVector<T, M>
const & bits,
282 TinyVector<T, M> & index)
284 typedef std::size_t UI;
285 index[0] = (UI)p[0] >> bits[0];
288 template <
class T,
int M>
289 static std::size_t chunkOffset(TinyVector<T, M>
const & p,
290 TinyVector<T, M>
const & bits,
291 TinyVector<T, M>
const & strides)
293 typedef std::size_t UI;
294 return ((UI)p[0] >> bits[0]) * strides[0];
297 template <
class T,
int M>
298 static std::size_t offsetInChunk(TinyVector<T, M>
const & p,
299 TinyVector<T, M>
const & mask,
300 TinyVector<T, M>
const & strides)
302 typedef std::size_t UI;
303 return ((UI)p[0] & (UI)mask[0]) * strides[0];
307 template <
class T,
int M>
308 inline TinyVector<T, M>
309 computeChunkArrayShape(TinyVector<T, M> shape,
310 TinyVector<T, M>
const & bits,
311 TinyVector<T, M>
const & mask)
313 for(
int k=0; k<M; ++k)
314 shape[k] = (shape[k] + mask[k]) >> bits[k];
318 template <
class T,
int M>
320 defaultCacheSize(TinyVector<T, M>
const & shape)
323 for(
int k=0; k<M-1; ++k)
324 for(
int j=k+1; j<M; ++j)
325 res = std::max(res, shape[k]*shape[j]);
331 template <
unsigned int N,
class T>
335 typedef typename MultiArrayShape<N>::type shape_type;
336 typedef T value_type;
344 ChunkBase(shape_type
const & strides, pointer p = 0)
349 typename MultiArrayShape<N>::type strides_;
353 template <
unsigned int N,
class T>
354 class SharedChunkHandle
357 typedef typename MultiArrayShape<N>::type shape_type;
359 static const long chunk_asleep = -2;
360 static const long chunk_uninitialized = -3;
361 static const long chunk_locked = -4;
362 static const long chunk_failed = -5;
368 chunk_state_ = chunk_uninitialized;
371 SharedChunkHandle(SharedChunkHandle
const & rhs)
372 : pointer_(rhs.pointer_)
375 chunk_state_ = chunk_uninitialized;
378 shape_type
const & strides()
const
380 return pointer_->strides_;
383 ChunkBase<N, T> * pointer_;
384 mutable threading::atomic_long chunk_state_;
387 SharedChunkHandle & operator=(SharedChunkHandle
const & rhs);
390 template <
unsigned int N,
class T>
391 class ChunkedArrayBase
394 enum ActualDimension{ actual_dimension = (N == 0) ? 1 : N };
395 typedef typename MultiArrayShape<N>::type shape_type;
396 typedef T value_type;
397 typedef value_type * pointer;
398 typedef value_type & reference;
399 typedef ChunkBase<N, T> Chunk;
406 ChunkedArrayBase(shape_type
const & shape, shape_type
const & chunk_shape)
408 , chunk_shape_(
prod(chunk_shape) > 0 ? chunk_shape : detail::ChunkShape<N, T>::defaultShape())
411 virtual ~ChunkedArrayBase()
414 virtual void unrefChunk(IteratorChunkHandle<N, T> * h)
const = 0;
416 virtual pointer chunkForIterator(shape_type
const & point,
417 shape_type & strides, shape_type & upper_bound,
418 IteratorChunkHandle<N, T> * h) = 0;
420 virtual pointer chunkForIterator(shape_type
const & point,
421 shape_type & strides, shape_type & upper_bound,
422 IteratorChunkHandle<N, T> * h)
const = 0;
424 virtual std::string backend()
const = 0;
426 virtual shape_type chunkArrayShape()
const = 0;
428 virtual bool isReadOnly()
const
438 shape_type
const & shape()
const
448 shape_type
const & chunkShape()
const
455 return chunk_shape_[d];
458 bool isInside(shape_type
const & p)
const
460 for(
int d=0; d<N; ++d)
461 if(p[d] < 0 || p[d] >= shape_[d])
466 shape_type shape_, chunk_shape_;
469 template <
unsigned int N,
class T>
472 struct ChunkUnrefProxyBase
474 virtual ~ChunkUnrefProxyBase() {}
477 template <
unsigned int N,
class T_MaybeConst>
478 class MultiArrayView<N, T_MaybeConst, ChunkedArrayTag>
479 :
public ChunkedArrayBase<N, typename UnqualifiedType<T_MaybeConst>::type>
482 enum ActualDimension { actual_dimension = (N==0) ? 1 : N };
483 typedef typename UnqualifiedType<T_MaybeConst>::type T;
484 typedef T value_type;
485 typedef T_MaybeConst & reference;
486 typedef const value_type &const_reference;
487 typedef T_MaybeConst * pointer;
488 typedef const value_type *const_pointer;
490 typedef difference_type key_type;
491 typedef difference_type size_type;
492 typedef difference_type shape_type;
494 typedef ChunkIterator<actual_dimension, T_MaybeConst> chunk_iterator;
495 typedef ChunkIterator<actual_dimension, T const> chunk_const_iterator;
496 typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T_MaybeConst>, T_MaybeConst&, T_MaybeConst*> iterator;
497 typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T const>, T
const &, T
const *> const_iterator;
498 typedef MultiArrayView<N, T_MaybeConst, ChunkedArrayTag> view_type;
499 typedef MultiArrayView<N, T const, ChunkedArrayTag> const_view_type;
500 typedef ChunkedArrayTag StrideTag;
501 typedef ChunkBase<N, T> Chunk;
503 typedef MultiArray<N, Chunk> ChunkHolder;
506 :
public ChunkUnrefProxyBase
508 UnrefProxy(
int size, ChunkedArray<N, T> * array)
516 array_->unrefChunks(chunks_);
519 ArrayVector<SharedChunkHandle<N, T> *> chunks_;
520 ChunkedArray<N, T> * array_;
523 virtual shape_type chunkArrayShape()
const
525 return chunks_.shape();
528 shape_type chunkStart(shape_type
const & global_start)
const
530 shape_type chunk_start(SkipInitialization);
531 detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
535 shape_type chunkStop(shape_type global_stop)
const
537 global_stop -= shape_type(1);
538 shape_type chunk_stop(SkipInitialization);
539 detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
540 chunk_stop += shape_type(1);
544 virtual void unrefChunk(IteratorChunkHandle<N, T> *)
const {}
546 virtual T* chunkForIterator(shape_type
const & point,
547 shape_type & strides, shape_type & upper_bound,
548 IteratorChunkHandle<N, T> * h)
550 return const_cast<MultiArrayView
const *
>(
this)->chunkForIterator(point, strides, upper_bound, h);
553 virtual T* chunkForIterator(shape_type
const & point,
554 shape_type & strides, shape_type & upper_bound,
555 IteratorChunkHandle<N, T> * h)
const
557 shape_type global_point = point + h->offset_;
559 if(!this->isInside(global_point))
561 upper_bound = point + this->chunk_shape_;
565 global_point += offset_;
566 shape_type coffset = offset_ + h->offset_;
568 shape_type chunkIndex = chunkStart(global_point);
569 Chunk
const * chunk = &chunks_[chunkIndex];
570 strides = chunk->strides_;
571 upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - coffset;
572 std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
573 return const_cast<T*
>(chunk->pointer_ + offset);
576 virtual std::string backend()
const
578 return "MultiArrayView<ChunkedArrayTag>";
582 : ChunkedArrayBase<N, T>()
585 MultiArrayView(shape_type
const & shape, shape_type
const & chunk_shape)
586 : ChunkedArrayBase<N, T>(shape, chunk_shape)
589 MultiArrayView & operator=(MultiArrayView
const & rhs)
595 ChunkedArrayBase<N, T>::operator=(rhs);
596 chunks_ = rhs.chunks_;
597 offset_ = rhs.offset_;
604 vigra_precondition(this->shape() == rhs.shape(),
605 "MultiArrayView::operator=(): shape mismatch.");
606 iterator i = begin(), ie = end();
607 const_iterator j = rhs.begin();
608 for(; i != ie; ++i, ++j)
615 #define VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(op) \
616 template<class U, class C1> \
617 MultiArrayView & operator op(MultiArrayView<N, U, C1> const & rhs) \
619 vigra_precondition(this->shape() == rhs.shape(), \
620 "MultiArrayView::operator" #op "(): shape mismatch."); \
621 iterator i = begin(), ie = end(); \
622 typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin(); \
623 for(; i != ie; ++i, ++j) \
624 *i op detail::RequiresExplicitCast<value_type>::cast(*j); \
628 MultiArrayView & operator op(value_type const & v) \
632 iterator i = begin(), ie = end(); \
633 for(; i != ie; ++i) \
639 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(=)
640 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(+=)
641 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(-=)
642 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(*=)
643 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(/=)
645 #undef VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN
694 reference operator[](shape_type point)
696 VIGRA_ASSERT_INSIDE(point);
698 Chunk * chunk = chunks_.data() +
699 detail::ChunkIndexing<N>::chunkOffset(point, bits_, chunks_.stride());
700 return *(chunk->pointer_ +
701 detail::ChunkIndexing<N>::offsetInChunk(point, mask_, chunk->strides_));
704 const_reference operator[](shape_type
const & point)
const
706 return const_cast<MultiArrayView *
>(
this)->
operator[](point);
710 MultiArrayView <N-M, T, ChunkedArrayTag>
711 operator[](
const TinyVector<MultiArrayIndex, M> &d)
const
716 reference operator[](difference_type_1 d)
718 return operator[](scanOrderIndexToCoordinate(d));
721 const_reference operator[](difference_type_1 d)
const
723 return operator[](scanOrderIndexToCoordinate(d));
726 difference_type scanOrderIndexToCoordinate(difference_type_1 d)
const
728 difference_type coord(SkipInitialization);
729 detail::ScanOrderToCoordinate<actual_dimension>::exec(d, this->shape_, coord);
735 difference_type_1 coordinateToScanOrderIndex(
const difference_type &d)
const
737 return detail::CoordinateToScanOrder<actual_dimension>::exec(this->shape_, d);
825 MultiArrayView & init(
const U & init)
827 return operator=(init);
830 template <
class U,
class CN>
831 void copy(
const MultiArrayView <N, U, CN>& rhs)
836 template <
class T2,
class C2>
837 void swapData(MultiArrayView <N, T2, C2> rhs)
841 vigra_precondition(this->shape() == rhs.shape(),
842 "MultiArrayView::swapData(): shape mismatch.");
843 iterator i = begin(), ie = end();
844 typename MultiArrayView<N, T2, C2>::iterator j = rhs.begin();
845 for(; i != ie; ++i, ++j)
849 bool isUnstrided(
unsigned int dimension = N-1)
const
851 if(chunks_.size() > 1)
853 difference_type s = vigra::detail::defaultStride<actual_dimension>(this->shape());
854 for(
unsigned int k = 0; k <= dimension; ++k)
855 if(chunks_.data()->strides_[k] != s[k])
860 MultiArrayView<N-1, value_type, ChunkedArrayTag>
863 MultiArrayView<N-1, value_type, ChunkedArrayTag> res(this->shape_.dropIndex(m), this->chunk_shape_.dropIndex(m));
864 res.offset_ = offset_.dropIndex(m);
865 res.bits_ = bits_.dropIndex(m);
866 res.mask_ = mask_.dropIndex(m);
867 res.chunks_.reshape(chunks_.shape().dropIndex(m));
870 typedef std::size_t UI;
871 UI start = offset_[m] + d;
872 UI chunk_start = start >> bits_[m];
873 UI startInChunk = start - chunk_start * this->chunk_shape_[m];
875 MultiArrayView<N-1, Chunk> view(chunks_.bindAt(m, chunk_start));
876 MultiCoordinateIterator<N-1> i(view.shape()),
877 end(i.getEndIterator());
880 res.chunks_[*i].pointer_ = view[*i].pointer_ + startInChunk*view[*i].strides_[m];
881 res.chunks_[*i].strides_ = view[*i].strides_.dropIndex(m);
887 template <
unsigned int M>
888 MultiArrayView <N-1, value_type, ChunkedArrayTag>
889 bind (difference_type_1 d)
const
894 MultiArrayView <N-1, value_type, ChunkedArrayTag>
895 bindOuter (difference_type_1 d)
const
897 return bindAt(N-1, d);
900 template <
int M,
class Index>
901 MultiArrayView <N-M, value_type, ChunkedArrayTag>
902 bindOuter(
const TinyVector <Index, M> &d)
const
904 return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
907 template <
class Index>
908 MultiArrayView <N-1, value_type, ChunkedArrayTag>
909 bindOuter(
const TinyVector <Index, 1> &d)
const
911 return bindAt(N-1, d[0]);
914 MultiArrayView <N-1, value_type, ChunkedArrayTag>
915 bindInner (difference_type_1 d)
const
920 template <
int M,
class Index>
921 MultiArrayView <N-M, value_type, ChunkedArrayTag>
922 bindInner(
const TinyVector <Index, M> &d)
const
924 return bindAt(0, d[0]).bindInner(d.dropIndex(0));
927 template <
class Index>
928 MultiArrayView <N-1, value_type, ChunkedArrayTag>
929 bindInner(
const TinyVector <Index, 1> &d)
const
931 return bindAt(0, d[0]);
960 checkSubarrayBounds(shape_type
const & start, shape_type
const & stop,
961 std::string message)
const
963 message +=
": subarray out of bounds.";
970 MultiArrayView<N, value_type, ChunkedArrayTag>
971 subarray(shape_type start, shape_type stop)
973 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::subarray()");
976 shape_type chunk_start(chunkStart(start));
978 MultiArrayView<N, value_type, ChunkedArrayTag> view(stop-start, this->chunk_shape_);
979 view.chunks_ = chunks_.subarray(chunk_start, chunkStop(stop));
980 view.offset_ = start - chunk_start * this->chunk_shape_;
983 view.unref_ = unref_;
1002 MultiArrayView <N, value_type, ChunkedArrayTag>
1008 MultiArrayView <N, value_type, ChunkedArrayTag>
1009 transpose(
const difference_type &permutation)
const
1011 MultiArrayView<N, value_type, ChunkedArrayTag>
1012 view(vigra::transpose(this->shape_, permutation), vigra::transpose(this->chunk_shape_, permutation));
1013 view.chunks_ = chunks_.transpose(permutation);
1017 view.unref_ = unref_;
1019 iend = view.chunks_.end();
1020 for(; i != iend; ++i)
1021 i->strides_ = vigra::transpose(i->strides_, permutation);
1055 template <
class U,
class C1>
1056 bool operator==(MultiArrayView<N, U, C1>
const & rhs)
const
1058 if(this->shape() != rhs.shape())
1060 const_iterator i = begin(), ie = end();
1061 typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin();
1062 for(; i != ie; ++i, ++j)
1068 template <
class U,
class C1>
1069 bool operator!=(MultiArrayView<N, U, C1>
const & rhs)
const
1164 bool hasData ()
const
1166 return chunks_.hasData();
1171 return createCoupledIterator(*
this);
1176 return begin().getEndIterator();
1179 const_iterator cbegin()
const
1181 return createCoupledIterator(const_cast<MultiArrayView const &>(*
this));
1184 const_iterator cend()
const
1186 return cbegin().getEndIterator();
1189 const_iterator begin()
const
1191 return createCoupledIterator(*
this);
1194 const_iterator end()
const
1196 return begin().getEndIterator();
1199 chunk_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
1201 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
1202 return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1205 chunk_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
1207 return chunk_begin(start, stop).getEndIterator();
1210 chunk_const_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
const
1212 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
1213 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1216 chunk_const_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
const
1218 return chunk_begin(start, stop).getEndIterator();
1221 chunk_const_iterator chunk_cbegin(shape_type
const & start, shape_type
const & stop)
const
1223 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_cbegin()");
1224 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1227 chunk_const_iterator chunk_cend(shape_type
const & start, shape_type
const & stop)
const
1229 return chunk_cbegin(start, stop).getEndIterator();
1237 MultiArray<N, Chunk> chunks_;
1238 shape_type offset_, bits_, mask_;
1239 VIGRA_SHARED_PTR<ChunkUnrefProxyBase> unref_;
1242 template <
unsigned int N,
class T>
1243 typename MultiArrayView<N, T, ChunkedArrayTag>::iterator
1244 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag> & m)
1246 typedef typename MultiArrayView<N, T, ChunkedArrayTag>::iterator IteratorType;
1247 typedef typename IteratorType::handle_type P1;
1248 typedef typename P1::base_type P0;
1250 return IteratorType(P1(m,
1254 template <
unsigned int N,
class T>
1255 typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator
1256 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag>
const & m)
1258 typedef typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator IteratorType;
1259 typedef typename IteratorType::handle_type P1;
1260 typedef typename P1::base_type P0;
1262 return IteratorType(P1(m,
1282 , compression_method(DEFAULT_COMPRESSION)
1321 compression_method = v;
1332 CompressionMethod compression_method;
1525 template <
unsigned int N,
class T>
1527 :
public ChunkedArrayBase<N, T>
1603 typedef ChunkedArrayBase<N, T> base_type;
1604 typedef typename MultiArrayShape<N>::type shape_type;
1605 typedef typename shape_type::value_type difference_type_1;
1606 typedef T value_type;
1607 typedef value_type * pointer;
1608 typedef value_type
const * const_pointer;
1609 typedef value_type & reference;
1610 typedef value_type
const & const_reference;
1611 typedef ChunkIterator<N, T> chunk_iterator;
1612 typedef ChunkIterator<N, T const> chunk_const_iterator;
1613 typedef StridedScanOrderIterator<N, ChunkedMemory<T>, reference, pointer> iterator;
1614 typedef StridedScanOrderIterator<N, ChunkedMemory<T const>, const_reference, const_pointer> const_iterator;
1615 typedef SharedChunkHandle<N, T> Handle;
1616 typedef ChunkBase<N, T> Chunk;
1617 typedef MultiArrayView<N, T, ChunkedArrayTag> view_type;
1618 typedef MultiArrayView<N, T const, ChunkedArrayTag> const_view_type;
1619 typedef std::queue<Handle*> CacheType;
1621 static const long chunk_asleep = Handle::chunk_asleep;
1622 static const long chunk_uninitialized = Handle::chunk_uninitialized;
1623 static const long chunk_locked = Handle::chunk_locked;
1624 static const long chunk_failed = Handle::chunk_failed;
1627 explicit ChunkedArray(shape_type
const & shape,
1628 shape_type
const & chunk_shape = shape_type(),
1629 ChunkedArrayOptions
const & options = ChunkedArrayOptions())
1630 : ChunkedArrayBase<N, T>(shape, chunk_shape)
1631 , bits_(initBitMask(this->chunk_shape_))
1632 , mask_(this->chunk_shape_ -shape_type(1))
1633 , cache_max_size_(options.cache_max)
1634 , chunk_lock_(new threading::mutex())
1635 , fill_value_(T(options.fill_value))
1636 , fill_scalar_(options.fill_value)
1637 , handle_array_(detail::computeChunkArrayShape(shape, bits_, mask_))
1639 , overhead_bytes_(handle_array_.size()*sizeof(Handle))
1641 fill_value_chunk_.pointer_ = &fill_value_;
1642 fill_value_handle_.pointer_ = &fill_value_chunk_;
1643 fill_value_handle_.chunk_state_.store(1);
1647 static shape_type initBitMask(shape_type
const & chunk_shape)
1650 for(
unsigned int k=0; k<N; ++k)
1654 "ChunkedArray: chunk_shape elements must be powers of 2.");
1660 virtual ~ChunkedArray()
1669 return cache_.size();
1686 return overhead_bytes_;
1693 return handle_array_.shape();
1696 virtual std::size_t dataBytes(Chunk * c)
const = 0;
1702 return prod(this->chunk_shape_)*
sizeof(T);
1707 virtual std::size_t overheadBytesPerChunk()
const = 0;
1713 shape_type chunk_start(SkipInitialization);
1714 detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
1727 global_stop -= shape_type(1);
1728 shape_type chunk_stop(SkipInitialization);
1729 detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
1730 chunk_stop += shape_type(1);
1741 return min(this->chunk_shape_,
1742 this->shape_ - chunk_index*this->chunk_shape_);
1745 using base_type::chunkShape;
1753 shape_type
const & chunkShape()
const;
1757 shape_type
const & shape()
const;
1765 bool isInside(shape_type
const & p)
const;
1769 std::string backend()
const;
1774 checkSubarrayBounds(shape_type
const & start, shape_type
const & stop,
1775 std::string message)
const
1777 message +=
": subarray out of bounds.";
1778 vigra_precondition(
allLessEqual(shape_type(), start) &&
1786 template <
class U,
class C1>
1789 if(this->shape() != rhs.shape())
1791 const_iterator i = begin(), ie = end();
1793 for(; i != ie; ++i, ++j)
1801 template <
class U,
class C1>
1808 virtual pointer loadChunk(Chunk ** chunk, shape_type
const & chunk_index) = 0;
1813 virtual bool unloadHandle(Handle * handle,
bool destroy =
false)
1815 if(handle == &fill_value_handle_)
1817 return unloadChunk(handle->pointer_, destroy);
1820 virtual bool unloadChunk(Chunk * chunk,
bool destroy =
false) = 0;
1822 Handle * lookupHandle(shape_type
const & index)
1824 return &handle_array_[index];
1829 virtual void unrefChunk(IteratorChunkHandle<N, T> * h)
const
1831 unrefChunk(h->chunk_);
1836 void unrefChunk(Handle * chunk)
const
1840 long rc = chunk->chunk_state_.fetch_sub(1);
1841 #ifdef VIGRA_CHECK_BOUNDS
1842 vigra_invariant(rc >= 0,
1843 "ChunkedArray::unrefChunk(): chunk refcount got negative!");
1849 void unrefChunks(ArrayVector<Handle*>
const & chunks)
1851 for(
unsigned int k=0; k<chunks.size(); ++k)
1852 unrefChunk(chunks[k]);
1854 if(cacheMaxSize() > 0)
1856 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1857 cleanCache(cache_.size());
1863 long acquireRef(Handle * handle)
const
1871 long rc = handle->chunk_state_.load(threading::memory_order_acquire);
1876 if(handle->chunk_state_.compare_exchange_weak(rc, rc+1, threading::memory_order_seq_cst))
1883 if(rc == chunk_failed)
1885 vigra_precondition(
false,
1886 "ChunkedArray::acquireRef() attempt to access failed chunk.");
1888 else if(rc == chunk_locked)
1891 threading::this_thread::yield();
1892 rc = handle->chunk_state_.load(threading::memory_order_acquire);
1894 else if(handle->chunk_state_.compare_exchange_weak(rc, chunk_locked, threading::memory_order_seq_cst))
1903 getChunk(Handle * handle,
bool isConst,
bool insertInCache, shape_type
const & chunk_index)
const
1905 ChunkedArray *
self =
const_cast<ChunkedArray *
>(
this);
1907 long rc = acquireRef(handle);
1909 return handle->pointer_->pointer_;
1911 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1914 T * p =
self->loadChunk(&handle->pointer_, chunk_index);
1915 Chunk * chunk = handle->pointer_;
1916 if(!isConst && rc == chunk_uninitialized)
1917 std::fill(p, p +
prod(chunkShape(chunk_index)), this->fill_value_);
1919 self->data_bytes_ += dataBytes(chunk);
1921 if(cacheMaxSize() > 0 && insertInCache)
1924 self->cache_.push(handle);
1928 self->cleanCache(2);
1930 handle->chunk_state_.store(1, threading::memory_order_release);
1935 handle->chunk_state_.store(chunk_failed);
1942 chunkForIteratorImpl(shape_type
const & point,
1943 shape_type & strides, shape_type & upper_bound,
1944 IteratorChunkHandle<N, T> * h,
1947 ChunkedArray *
self =
const_cast<ChunkedArray *
>(
this);
1949 unrefChunk(h->chunk_);
1952 shape_type global_point = point + h->offset_;
1954 if(!this->isInside(global_point))
1956 upper_bound = point + this->chunk_shape_;
1960 shape_type chunkIndex(chunkStart(global_point));
1962 bool insertInCache =
true;
1963 Handle * handle =
self->lookupHandle(chunkIndex);
1964 if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
1966 handle = &
self->fill_value_handle_;
1967 insertInCache =
false;
1970 pointer p = getChunk(handle, isConst, insertInCache, chunkIndex);
1971 strides = handle->strides();
1972 upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - h->offset_;
1973 std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
1980 virtual pointer chunkForIterator(shape_type
const & point,
1981 shape_type & strides, shape_type & upper_bound,
1982 IteratorChunkHandle<N, T> * h)
1984 return chunkForIteratorImpl(point, strides, upper_bound, h,
false);
1987 virtual pointer chunkForIterator(shape_type
const & point,
1988 shape_type & strides, shape_type & upper_bound,
1989 IteratorChunkHandle<N, T> * h)
const
1991 return chunkForIteratorImpl(point, strides, upper_bound, h,
true);
1996 long releaseChunk(Handle * handle,
bool destroy =
false)
1999 bool mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
2000 if(!mayUnload && destroy)
2003 mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
2010 vigra_invariant(handle != &fill_value_handle_,
2011 "ChunkedArray::releaseChunk(): attempt to release fill_value_handle_.");
2012 Chunk * chunk = handle->pointer_;
2013 this->data_bytes_ -= dataBytes(chunk);
2014 int didDestroy = unloadChunk(chunk, destroy);
2015 this->data_bytes_ += dataBytes(chunk);
2017 handle->chunk_state_.store(chunk_uninitialized);
2019 handle->chunk_state_.store(chunk_asleep);
2023 handle->chunk_state_.store(chunk_failed);
2031 void cleanCache(
int how_many = -1)
2034 how_many = cache_.size();
2035 for(; cache_.size() > cacheMaxSize() && how_many > 0; --how_many)
2037 Handle * handle = cache_.front();
2039 long rc = releaseChunk(handle);
2041 cache_.push(handle);
2053 void releaseChunks(shape_type
const & start, shape_type
const & stop,
bool destroy =
false)
2055 checkSubarrayBounds(start, stop,
"ChunkedArray::releaseChunks()");
2058 end(i.getEndIterator());
2059 for(; i != end; ++i)
2061 shape_type chunkOffset = *i * this->chunk_shape_;
2063 !
allLessEqual(min(chunkOffset+this->chunk_shape_, this->shape()), stop))
2069 Handle * handle = this->lookupHandle(*i);
2070 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
2071 releaseChunk(handle, destroy);
2075 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
2076 int cache_size = cache_.size();
2077 for(
int k=0; k < cache_size; ++k)
2079 Handle * handle = cache_.front();
2081 if(handle->chunk_state_.load() >= 0)
2082 cache_.push(handle);
2092 template <
class U,
class Str
ide>
2097 shape_type stop = start + subarray.shape();
2099 checkSubarrayBounds(start, stop,
"ChunkedArray::checkoutSubarray()");
2101 chunk_const_iterator i = chunk_cbegin(start, stop);
2102 for(; i.isValid(); ++i)
2104 subarray.subarray(i.chunkStart()-start, i.chunkStop()-start) = *i;
2114 template <
class U,
class Str
ide>
2119 shape_type stop = start + subarray.shape();
2121 vigra_precondition(!this->isReadOnly(),
2122 "ChunkedArray::commitSubarray(): array is read-only.");
2123 checkSubarrayBounds(start, stop,
"ChunkedArray::commitSubarray()");
2125 chunk_iterator i = chunk_begin(start, stop);
2126 for(; i.isValid(); ++i)
2128 *i = subarray.subarray(i.chunkStart()-start, i.chunkStop()-start);
2133 template <
class View>
2134 void subarrayImpl(shape_type
const & start, shape_type
const & stop,
2138 vigra_precondition(isConst || !this->isReadOnly(),
2139 "ChunkedArray::subarray(): array is read-only.");
2140 checkSubarrayBounds(start, stop,
"ChunkedArray::subarray()");
2141 shape_type chunk_start(chunkStart(start)), chunk_stop(chunkStop(stop));
2143 view.shape_ = stop-start;
2144 view.chunk_shape_ = this->chunk_shape_;
2145 view.chunks_.reshape(chunk_stop-chunk_start);
2146 view.offset_ = start - chunk_start * this->chunk_shape_;
2150 typedef typename View::UnrefProxy Unref;
2152 Unref * unref =
new Unref(view.chunks_.size(),
self);
2153 view.unref_ = VIGRA_SHARED_PTR<Unref>(unref);
2156 end(i.getEndIterator());
2157 for(; i != end; ++i)
2159 Handle * handle =
self->lookupHandle(*i);
2161 if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
2162 handle = &self->fill_value_handle_;
2166 pointer p = getChunk(handle, isConst,
true, *i);
2168 ChunkBase<N, T> * mini_chunk = &view.chunks_[*i - chunk_start];
2169 mini_chunk->pointer_ = p;
2170 mini_chunk->strides_ = handle->strides();
2171 unref->chunks_[i.scanOrderIndex()] = handle;
2182 subarray(shape_type
const & start, shape_type
const & stop)
2185 subarrayImpl(start, stop, view,
false);
2196 subarray(shape_type
const & start, shape_type
const & stop)
const
2198 const_view_type view;
2199 subarrayImpl(start, stop, view,
true);
2212 const_view_type view;
2213 subarrayImpl(start, stop, view,
true);
2223 value_type
getItem(shape_type
const & point)
const
2225 vigra_precondition(this->isInside(point),
2226 "ChunkedArray::getItem(): index out of bounds.");
2229 shape_type chunk_index(chunkStart(point));
2230 Handle * handle =
self->lookupHandle(chunk_index);
2231 if(handle->chunk_state_.load() == chunk_uninitialized)
2233 pointer p =
self->getChunk(handle,
true,
false, chunk_index);
2234 value_type res = *(p +
2235 detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides()));
2236 self->unrefChunk(handle);
2246 void setItem(shape_type
const & point, value_type
const & v)
2248 vigra_precondition(!this->isReadOnly(),
2249 "ChunkedArray::setItem(): array is read-only.");
2250 vigra_precondition(this->isInside(point),
2251 "ChunkedArray::setItem(): index out of bounds.");
2253 shape_type chunk_index(chunkStart(point));
2254 Handle * handle = lookupHandle(chunk_index);
2255 pointer p = getChunk(handle,
false,
false, chunk_index);
2256 *(p + detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides())) = v;
2269 shape_type start, stop(this->shape());
2271 stop[dim] = index+1;
2272 return subarray(start, stop).bindAt(dim, 0);
2281 template <
unsigned int M>
2283 bind (difference_type_1 index)
const
2285 return bindAt(M, index);
2297 return bindAt(N-1, index);
2305 template <
int M,
class Index>
2309 return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
2313 template <
class Index>
2317 return bindAt(N-1, d[0]);
2326 MultiArrayView <N-1, T, ChunkedArrayTag>
2329 return bindAt(0, index);
2337 template <
int M,
class Index>
2341 return bindAt(0, d[0]).bindInner(d.dropIndex(0));
2345 template <
class Index>
2349 return bindAt(0, d[0]);
2361 if(cache_max_size_ < 0)
2362 const_cast<int &
>(cache_max_size_) = detail::defaultCacheSize(this->chunkArrayShape());
2363 return cache_max_size_;
2373 cache_max_size_ = c;
2374 if(c < cache_.size())
2376 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
2385 return createCoupledIterator(*
this);
2393 return begin().getEndIterator();
2401 return createCoupledIterator(const_cast<ChunkedArray const &>(*
this));
2409 return cbegin().getEndIterator();
2417 return createCoupledIterator(*
this);
2425 return begin().getEndIterator();
2430 chunk_iterator
chunk_begin(shape_type
const & start, shape_type
const & stop)
2432 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_begin()");
2433 return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2439 chunk_iterator
chunk_end(shape_type
const & start, shape_type
const & stop)
2441 return chunk_begin(start, stop).getEndIterator();
2447 chunk_const_iterator
chunk_begin(shape_type
const & start, shape_type
const & stop)
const
2449 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_begin()");
2450 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2456 chunk_const_iterator
chunk_end(shape_type
const & start, shape_type
const & stop)
const
2458 return chunk_begin(start, stop).getEndIterator();
2464 chunk_const_iterator
chunk_cbegin(shape_type
const & start, shape_type
const & stop)
const
2466 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_cbegin()");
2467 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2473 chunk_const_iterator
chunk_cend(shape_type
const & start, shape_type
const & stop)
const
2475 return chunk_cbegin(start, stop).getEndIterator();
2478 shape_type bits_, mask_;
2479 int cache_max_size_;
2480 VIGRA_SHARED_PTR<threading::mutex> chunk_lock_;
2482 Chunk fill_value_chunk_;
2483 Handle fill_value_handle_;
2484 value_type fill_value_;
2485 double fill_scalar_;
2487 std::size_t data_bytes_, overhead_bytes_;
2492 template <
unsigned int N,
class T>
2493 typename ChunkedArray<N, T>::iterator
2497 typedef typename IteratorType::handle_type P1;
2498 typedef typename P1::base_type P0;
2500 return IteratorType(P1(m,
2504 template <
unsigned int N,
class T>
2505 typename ChunkedArray<N, T>::const_iterator
2506 createCoupledIterator(ChunkedArray<N, T>
const & m)
2508 typedef typename ChunkedArray<N, T>::const_iterator IteratorType;
2509 typedef typename IteratorType::handle_type P1;
2510 typedef typename P1::base_type P0;
2512 return IteratorType(P1(m,
2521 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2543 typedef typename ChunkedArray<N, T>::Chunk Chunk;
2545 static shape_type computeChunkShape(shape_type s)
2547 for(
int k=0; k<N; ++k)
2552 using Storage::subarray;
2553 using Storage::bindOuter;
2554 using Storage::bindInner;
2555 using Storage::bind;
2556 using Storage::bindAt;
2557 using Storage::isInside;
2558 using Storage::shape;
2559 using Storage::size;
2560 using Storage::begin;
2563 #ifndef DOXYGEN // doxygen doesn't understand this
2564 using Storage::operator==;
2565 using Storage::operator!=;
2573 Alloc
const & alloc = Alloc())
2574 :
ChunkedArray<N, T>(shape, computeChunkShape(shape), options.cacheMax(0)),
2575 Storage(shape, this->fill_value_, alloc),
2576 upper_bound_(shape),
2577 chunk_(detail::defaultStride(shape), this->data())
2579 this->handle_array_[0].pointer_ = &chunk_;
2580 this->handle_array_[0].chunk_state_.store(1);
2581 this->data_bytes_ = size()*
sizeof(T);
2582 this->overhead_bytes_ = overheadBytesPerChunk();
2588 upper_bound_(rhs.upper_bound_),
2589 chunk_(detail::defaultStride(shape), this->data())
2591 this->handle_array_[0].pointer_ = &chunk_;
2592 this->handle_array_[0].chunk_state_.store(1);
2595 ChunkedArrayFull & operator=(ChunkedArrayFull
const & rhs)
2599 ChunkedArray<N, T>::operator=(rhs);
2600 Storage::operator=(rhs);
2601 upper_bound_ = rhs.upper_bound_;
2611 return shape_type(1);
2614 virtual pointer loadChunk(ChunkBase<N, T> **, shape_type
const &)
2616 return this->data();
2619 virtual bool unloadChunk(ChunkBase<N, T> *,
bool )
2624 virtual std::size_t dataBytes(Chunk * c)
const
2626 return prod(this->shape());
2631 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2634 virtual pointer chunkForIterator(shape_type
const & point,
2635 shape_type & strides, shape_type & upper_bound,
2636 IteratorChunkHandle<N, T> * h)
const
2638 shape_type global_point = point + h->offset_;
2640 if(!this->isInside(global_point))
2642 upper_bound = point + this->chunk_shape_;
2646 strides = this->stride();
2647 upper_bound = upper_bound_;
2648 return const_cast<pointer
>(&Storage::operator[](global_point));
2651 virtual pointer chunkForIterator(shape_type
const & point,
2652 shape_type & strides, shape_type & upper_bound,
2653 IteratorChunkHandle<N, T> * h)
2655 shape_type global_point = point + h->offset_;
2657 if(!this->isInside(global_point))
2659 upper_bound = point + this->chunk_shape_;
2663 strides = this->stride();
2664 upper_bound = upper_bound_;
2665 return &Storage::operator[](global_point);
2668 virtual std::string backend()
const
2670 return "ChunkedArrayFull";
2673 shape_type upper_bound_;
2686 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2693 :
public ChunkBase<N, T>
2697 typedef T value_type;
2698 typedef value_type * pointer;
2699 typedef value_type & reference;
2701 Chunk(shape_type
const & shape, Alloc
const & alloc = Alloc())
2702 : ChunkBase<N, T>(detail::defaultStride(shape))
2703 , size_(
prod(shape))
2714 if(this->pointer_ == 0)
2715 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
2716 return this->pointer_;
2721 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2729 Chunk & operator=(Chunk
const &);
2734 typedef T value_type;
2735 typedef value_type * pointer;
2736 typedef value_type & reference;
2742 shape_type
const & chunk_shape=shape_type(),
2744 Alloc
const & alloc = Alloc())
2745 :
ChunkedArray<N, T>(shape, chunk_shape, options.cacheMax(0))
2751 typename ChunkStorage::iterator i = this->handle_array_.
begin(),
2752 end = this->handle_array_.end();
2753 for(; i != end; ++i)
2756 delete static_cast<Chunk*
>(i->pointer_);
2761 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
2765 *p =
new Chunk(this->chunkShape(index));
2766 this->overhead_bytes_ +=
sizeof(Chunk);
2768 return static_cast<Chunk *
>(*p)->allocate();
2771 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool destroy)
2774 static_cast<Chunk *
>(chunk)->deallocate();
2778 virtual std::string backend()
const
2780 return "ChunkedArrayLazy";
2783 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
2785 return c->pointer_ == 0
2787 :
static_cast<Chunk*
>(c)->size_*
sizeof(T);
2792 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2808 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2815 :
public ChunkBase<N, T>
2819 typedef T value_type;
2820 typedef value_type * pointer;
2821 typedef value_type & reference;
2823 Chunk(shape_type
const & shape)
2824 : ChunkBase<N, T>(detail::defaultStride(shape))
2826 , size_(
prod(shape))
2836 if(this->pointer_ == 0)
2837 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
2838 return this->pointer_;
2843 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2845 compressed_.clear();
2848 void compress(CompressionMethod method)
2850 if(this->pointer_ != 0)
2852 vigra_invariant(compressed_.size() == 0,
2853 "ChunkedArrayCompressed::Chunk::compress(): compressed and uncompressed pointer are both non-zero.");
2855 ::vigra::compress((
char const *)this->pointer_, size_*
sizeof(T), compressed_, method);
2858 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2865 if(this->pointer_ == 0)
2867 if(compressed_.size())
2869 this->pointer_ = alloc_.allocate((
typename Alloc::size_type)size_);
2871 ::vigra::uncompress(compressed_.data(), compressed_.size(),
2872 (
char*)this->pointer_, size_*
sizeof(T), method);
2873 compressed_.clear();
2877 this->pointer_ = allocate();
2882 vigra_invariant(compressed_.size() == 0,
2883 "ChunkedArrayCompressed::Chunk::uncompress(): compressed and uncompressed pointer are both non-zero.");
2885 return this->pointer_;
2893 Chunk & operator=(Chunk
const &);
2898 typedef T value_type;
2899 typedef value_type * pointer;
2900 typedef value_type & reference;
2915 shape_type
const & chunk_shape=shape_type(),
2918 compression_method_(options.compression_method)
2920 if(compression_method_ == DEFAULT_COMPRESSION)
2921 compression_method_ = LZ4;
2926 typename ChunkStorage::iterator i = this->handle_array_.
begin(),
2927 end = this->handle_array_.end();
2928 for(; i != end; ++i)
2931 delete static_cast<Chunk*
>(i->pointer_);
2936 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
2940 *p =
new Chunk(this->chunkShape(index));
2941 this->overhead_bytes_ +=
sizeof(Chunk);
2943 return static_cast<Chunk *
>(*p)->uncompress(compression_method_);
2946 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool destroy)
2949 static_cast<Chunk *
>(chunk)->deallocate();
2951 static_cast<Chunk *
>(chunk)->
compress(compression_method_);
2955 virtual std::string backend()
const
2957 switch(compression_method_)
2960 return "ChunkedArrayCompressed<ZLIB>";
2962 return "ChunkedArrayCompressed<ZLIB_NONE>";
2964 return "ChunkedArrayCompressed<ZLIB_FAST>";
2966 return "ChunkedArrayCompressed<ZLIB_BEST>";
2968 return "ChunkedArrayCompressed<LZ4>";
2974 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
2976 return c->pointer_ == 0
2977 ?
static_cast<Chunk*
>(c)->compressed_.size()
2978 :
static_cast<Chunk*
>(c)->size_*
sizeof(T);
2983 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2986 CompressionMethod compression_method_;
3007 template <
unsigned int N,
class T>
3025 typedef HANDLE FileHandle;
3027 typedef int FileHandle;
3031 :
public ChunkBase<N, T>
3035 typedef T value_type;
3036 typedef value_type * pointer;
3037 typedef value_type & reference;
3039 Chunk(shape_type
const & shape,
3040 std::size_t offset,
size_t alloc_size,
3042 : ChunkBase<N, T>(detail::defaultStride(shape))
3044 , alloc_size_(alloc_size)
3055 if(this->pointer_ == 0)
3058 static const std::size_t bits =
sizeof(DWORD)*8,
3059 mask = (std::size_t(1) << bits) - 1;
3060 this->pointer_ = (pointer)MapViewOfFile(file_, FILE_MAP_ALL_ACCESS,
3061 std::size_t(offset_) >> bits, offset_ & mask, alloc_size_);
3062 if(this->pointer_ == 0)
3063 winErrorToException(
"ChunkedArrayChunk::map(): ");
3065 this->pointer_ = (pointer)mmap(0, alloc_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
3067 if(this->pointer_ == 0)
3068 throw std::runtime_error(
"ChunkedArrayChunk::map(): mmap() failed.");
3071 return this->pointer_;
3076 if(this->pointer_ != 0)
3079 ::UnmapViewOfFile(this->pointer_);
3081 munmap(this->pointer_, alloc_size_);
3087 std::size_t offset_, alloc_size_;
3091 Chunk & operator=(Chunk
const &);
3097 typedef T value_type;
3098 typedef value_type * pointer;
3099 typedef value_type & reference;
3101 static std::size_t computeAllocSize(shape_type
const & shape)
3103 std::size_t size =
prod(shape)*
sizeof(T);
3104 std::size_t mask = mmap_alignment - 1;
3105 return (size + mask) & ~mask;
3115 shape_type
const & chunk_shape=shape_type(),
3117 std::string
const & path =
"")
3119 #ifndef VIGRA_NO_SPARSE_FILE
3120 , offset_array_(this->chunkArrayShape())
3125 #ifdef VIGRA_NO_SPARSE_FILE
3126 file_capacity_ = 4*
prod(this->chunk_shape_)*
sizeof(T);
3130 end = offset_array_.end();
3131 std::size_t size = 0;
3132 for(; i != end; ++i)
3135 size += computeAllocSize(this->chunkShape(i.point()));
3137 file_capacity_ = size;
3138 this->overhead_bytes_ += offset_array_.size()*
sizeof(std::size_t);
3144 file_ = ::CreateFile(winTempFileName(path).c_str(), GENERIC_READ | GENERIC_WRITE,
3145 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE, NULL);
3146 if (file_ == INVALID_HANDLE_VALUE)
3147 winErrorToException(
"ChunkedArrayTmpFile(): ");
3151 if(!::DeviceIoControl(file_, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, &dwTemp, NULL))
3152 winErrorToException(
"ChunkedArrayTmpFile(): ");
3158 static const std::size_t bits =
sizeof(LONG)*8, mask = (std::size_t(1) << bits) - 1;
3159 mappedFile_ = CreateFileMapping(file_, NULL, PAGE_READWRITE,
3160 file_capacity_ >> bits, file_capacity_ & mask, NULL);
3162 winErrorToException(
"ChunkedArrayTmpFile(): ");
3164 mappedFile_ = file_ = fileno(tmpfile());
3166 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to open file.");
3167 lseek(file_, file_capacity_-1, SEEK_SET);
3168 if(write(file_,
"0", 1) == -1)
3169 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
3175 typename ChunkStorage::iterator i = this->handle_array_.
begin(),
3176 end = this->handle_array_.end();
3177 for(; i != end; ++i)
3180 delete static_cast<Chunk*
>(i->pointer_);
3184 ::CloseHandle(mappedFile_);
3185 ::CloseHandle(file_);
3191 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
3195 shape_type shape = this->chunkShape(index);
3196 std::size_t chunk_size = computeAllocSize(shape);
3197 #ifdef VIGRA_NO_SPARSE_FILE
3198 std::size_t offset = file_size_;
3199 if(offset + chunk_size > file_capacity_)
3201 file_capacity_ = max<std::size_t>(offset+chunk_size, file_capacity_ * 120 / 100);
3202 if(lseek(file_, file_capacity_-1, SEEK_SET) == -1)
3203 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to reset file size.");
3204 if(write(file_,
"0", 1) == -1)
3205 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
3207 file_size_ += chunk_size;
3209 std::size_t offset = offset_array_[index];
3211 *p =
new Chunk(shape, offset, chunk_size, mappedFile_);
3212 this->overhead_bytes_ +=
sizeof(Chunk);
3214 return static_cast<Chunk*
>(*p)->map();
3217 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool )
3219 static_cast<Chunk *
>(chunk)->unmap();
3223 virtual std::string backend()
const
3225 return "ChunkedArrayTmpFile";
3228 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
3230 return c->pointer_ == 0
3232 :
static_cast<Chunk*
>(c)->alloc_size_;
3237 #ifdef VIGRA_NO_SPARSE_FILE
3238 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
3240 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>) +
sizeof(std::size_t);
3244 #ifndef VIGRA_NO_SPARSE_FILE
3245 OffsetStorage offset_array_;
3247 FileHandle file_, mappedFile_;
3248 std::size_t file_size_, file_capacity_;
3251 template<
unsigned int N,
class U>
3253 :
public MultiCoordinateIterator<N>
3254 ,
private MultiArrayView<N, typename UnqualifiedType<U>::type>
3257 typedef typename UnqualifiedType<U>::type T;
3258 typedef MultiCoordinateIterator<N> base_type;
3259 typedef MultiArrayView<N, T> base_type2;
3261 typedef typename base_type::shape_type shape_type;
3262 typedef typename base_type::difference_type difference_type;
3263 typedef ChunkIterator iterator;
3264 typedef std::random_access_iterator_tag iterator_category;
3266 typedef MultiArrayView<N, T> value_type;
3267 typedef MultiArrayView<N, T> & reference;
3268 typedef MultiArrayView<N, T>
const & const_reference;
3269 typedef MultiArrayView<N, T> * pointer;
3270 typedef MultiArrayView<N, T>
const * const_pointer;
3272 typedef typename IfBool<UnqualifiedType<U>::isConst,
3273 ChunkedArrayBase<N, T>
const,
3274 ChunkedArrayBase<N, T> >::type array_type;
3275 typedef IteratorChunkHandle<N, T> Chunk;
3283 ChunkIterator(array_type * array,
3284 shape_type
const & start, shape_type
const & end,
3285 shape_type
const & chunk_start, shape_type
const & chunk_end,
3286 shape_type
const & chunk_shape)
3287 : base_type(chunk_start, chunk_end)
3289 , chunk_(chunk_start * chunk_shape)
3290 , start_(start - chunk_.offset_)
3291 , stop_(end - chunk_.offset_)
3292 , chunk_shape_(chunk_shape)
3297 ChunkIterator(ChunkIterator
const & rhs)
3300 , array_(rhs.array_)
3301 , chunk_(rhs.chunk_)
3302 , start_(rhs.start_)
3304 , chunk_shape_(rhs.chunk_shape_)
3309 ChunkIterator & operator=(ChunkIterator
const & rhs)
3313 base_type::operator=(rhs);
3314 array_ = rhs.array_;
3315 chunk_ = rhs.chunk_;
3316 start_ = rhs.start_;
3318 chunk_shape_ = rhs.chunk_shape_;
3324 reference operator*()
3329 const_reference operator*()
const
3334 pointer operator->()
3339 const_pointer operator->()
const
3346 return *(ChunkIterator(*
this) += i);
3349 value_type operator[](
const shape_type &coordOffset)
const
3351 return *(ChunkIterator(*
this) += coordOffset);
3358 shape_type array_point = max(start_, this->point()*chunk_shape_),
3359 upper_bound(SkipInitialization);
3360 this->m_ptr = array_->chunkForIterator(array_point, this->m_stride, upper_bound, &chunk_);
3361 this->m_shape = min(upper_bound, stop_) - array_point;
3365 shape_type chunkStart()
const
3367 return max(start_, this->point()*chunk_shape_) + chunk_.offset_;
3370 shape_type chunkStop()
const
3372 return chunkStart() + this->m_shape;
3375 ChunkIterator & operator++()
3377 base_type::operator++();
3382 ChunkIterator operator++(
int)
3384 ChunkIterator res(*
this);
3396 ChunkIterator &
operator+=(
const shape_type &coordOffset)
3403 ChunkIterator & operator--()
3405 base_type::operator--();
3410 ChunkIterator operator--(
int)
3412 ChunkIterator res(*
this);
3422 ChunkIterator &
operator-=(
const shape_type &coordOffset)
3427 ChunkIterator getEndIterator()
const
3429 ChunkIterator res(*
this);
3430 static_cast<base_type &
>(res) = base_type::getEndIterator();
3437 return ChunkIterator(*
this) += d;
3442 return ChunkIterator(*
this) -= d;
3445 ChunkIterator
operator+(
const shape_type &coordOffset)
const
3447 return ChunkIterator(*
this) += coordOffset;
3450 ChunkIterator
operator-(
const shape_type &coordOffset)
const
3452 return ChunkIterator(*
this) -= coordOffset;
3460 #ifndef DOXYGEN // doxygen doesn't understand this
3461 using base_type::operator==;
3462 using base_type::operator!=;
3464 using base_type::shape;
3466 array_type * array_;
3468 shape_type start_, stop_, chunk_shape_, array_point_;
3475 #undef VIGRA_ASSERT_INSIDE
std::size_t cacheMaxSize() const
Get the number of chunks the cache will hold.
Definition: multi_array_chunked.hxx:2359
chunk_iterator chunk_end(shape_type const &start, shape_type const &stop)
Create the end iterator for iteration over all chunks intersected by the given ROI.
Definition: multi_array_chunked.hxx:2439
Sequential iterator for MultiArrayView.
Definition: multi_fwd.hxx:161
MultiArrayView< N-1, T, ChunkedArrayTag > bindOuter(difference_type_1 index) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2295
std::size_t dataBytes() const
Bytes of main memory occupied by the array's data.
Definition: multi_array_chunked.hxx:1677
ChunkedArrayTmpFile(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions(), std::string const &path="")
Construct with given 'shape', 'chunk_shape' and 'options'.
Definition: multi_array_chunked.hxx:3114
const_iterator end() const
Create the end iterator for read-only scan-order iteration over the entire chunked array...
Definition: multi_array_chunked.hxx:2423
void commitSubarray(shape_type const &start, MultiArrayView< N, U, Stride > const &subarray)
Copy an ordinary MultiArrayView into an ROI of the chunked array.
Definition: multi_array_chunked.hxx:2116
Option object for ChunkedArray construction.
Definition: multi_array_chunked.hxx:1274
view_type::pointer pointer
Definition: multi_array.hxx:2450
void transpose(const MultiArrayView< 2, T, C1 > &v, MultiArrayView< 2, T, C2 > &r)
Definition: matrix.hxx:963
void releaseChunks(shape_type const &start, shape_type const &stop, bool destroy=false)
Definition: multi_array_chunked.hxx:2053
ChunkedArrayCompressed(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions())
Construct with given 'shape', 'chunk_shape' and 'options'.
Definition: multi_array_chunked.hxx:2914
Diff2D operator-(Diff2D const &a, Diff2D const &b)
Definition: diff2d.hxx:711
Definition: multi_array_chunked.hxx:2809
const_iterator begin() const
Create a read-only scan-order iterator for the entire chunked array.
Definition: multi_array_chunked.hxx:2415
chunk_iterator chunk_begin(shape_type const &start, shape_type const &stop)
Create an iterator over all chunks intersected by the given ROI.
Definition: multi_array_chunked.hxx:2430
MultiArrayView< N-1, T, ChunkedArrayTag > bind(difference_type_1 index) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2283
ChunkedArrayFull(shape_type const &shape, ChunkedArrayOptions const &options=ChunkedArrayOptions(), Alloc const &alloc=Alloc())
Construct with given 'shape' and 'options', using the allocator 'alloc' to manage the memory...
Definition: multi_array_chunked.hxx:2571
void linearSequence(Iterator first, Iterator last, Value start, Value step)
Fill an array with a sequence of numbers.
Definition: algorithm.hxx:208
view_type::iterator iterator
Definition: multi_array.hxx:2496
iterator begin()
Create a scan-order iterator for the entire chunked array.
Definition: multi_array_chunked.hxx:2383
shape_type chunkStart(shape_type const &global_start) const
Find the chunk that contains array element 'global_start'.
Definition: multi_array_chunked.hxx:1711
const_iterator cbegin() const
Create a read-only scan-order iterator for the entire chunked array.
Definition: multi_array_chunked.hxx:2399
void compress(char const *source, std::size_t size, ArrayVector< char > &dest, CompressionMethod method)
Diff2D operator+(Diff2D const &a, Diff2D const &b)
Definition: diff2d.hxx:739
ChunkedArrayLazy(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions(), Alloc const &alloc=Alloc())
Construct with given 'shape', 'chunk_shape' and 'options', using the allocator 'alloc' to manage the ...
Definition: multi_array_chunked.hxx:2741
Interface and base class for chunked arrays.
Definition: multi_array_chunked.hxx:470
std::ptrdiff_t MultiArrayIndex
Definition: multi_fwd.hxx:60
value_type getItem(shape_type const &point) const
Read the array element at index 'point'.
Definition: multi_array_chunked.hxx:2223
bool allLess(TinyVectorBase< V1, SIZE, D1, D2 > const &l, TinyVectorBase< V2, SIZE, D3, D4 > const &r)
pointwise less-than
Definition: tinyvector.hxx:1375
Definition: multi_array_chunked.hxx:2522
view_type::difference_type difference_type
Definition: multi_array.hxx:2470
int cacheSize() const
Number of chunks currently fitting into the cache.
Definition: multi_array_chunked.hxx:1667
FFTWComplex< R > & operator-=(FFTWComplex< R > &a, const FFTWComplex< R > &b)
subtract-assignment
Definition: fftw3.hxx:867
MultiArrayView< N-M, T, ChunkedArrayTag > bindOuter(const TinyVector< Index, M > &d) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2307
MultiArrayView< N-M, T, ChunkedArrayTag > bindInner(const TinyVector< Index, M > &d) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2339
ChunkedArrayOptions & compression(CompressionMethod v)
Compress inactive chunks with the given method.
Definition: multi_array_chunked.hxx:1319
chunk_const_iterator chunk_cbegin(shape_type const &start, shape_type const &stop) const
Create a read-only iterator over all chunks intersected by the given ROI.
Definition: multi_array_chunked.hxx:2464
void setCacheMaxSize(std::size_t c)
Set the number of chunks the cache will hold.
Definition: multi_array_chunked.hxx:2371
const_view_type const_subarray(shape_type const &start, shape_type const &stop) const
Create a read-only view to the specified ROI.
Definition: multi_array_chunked.hxx:2210
std::size_t overheadBytes() const
Bytes of main memory needed to manage the chunked storage.
Definition: multi_array_chunked.hxx:1684
FFTWComplex< R > & operator+=(FFTWComplex< R > &a, const FFTWComplex< R > &b)
add-assignment
Definition: fftw3.hxx:859
view_type::reference reference
Definition: multi_array.hxx:2458
Int32 log2i(UInt32 x)
Compute the base-2 logarithm of an integer.
Definition: mathutil.hxx:344
NumericTraits< V >::Promote prod(TinyVectorBase< V, SIZE, D1, D2 > const &l)
product of the vector's elements
Definition: tinyvector.hxx:2097
bool operator!=(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
not equal
Definition: fftw3.hxx:841
bool operator!=(MultiArrayView< N, U, C1 > const &rhs) const
Check if two arrays differ in at least one element.
Definition: multi_array_chunked.hxx:1802
iterator end()
Create the end iterator for scan-order iteration over the entire chunked array.
Definition: multi_array_chunked.hxx:2391
ChunkedArrayOptions & fillValue(double v)
Element value for read-only access of uninitialized chunks.
Definition: multi_array_chunked.hxx:1289
void checkoutSubarray(shape_type const &start, MultiArrayView< N, U, Stride > &subarray) const
Copy an ROI of the chunked array into an ordinary MultiArrayView.
Definition: multi_array_chunked.hxx:2094
bool operator==(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
equal
Definition: fftw3.hxx:825
shape_type chunkStop(shape_type global_stop) const
Find the chunk that is beyond array element 'global_stop'.
Definition: multi_array_chunked.hxx:1725
virtual std::size_t overheadBytesPerChunk() const
Bytes of main memory needed to manage a single chunk.
Definition: multi_array_chunked.hxx:3235
TinyVector< MultiArrayIndex, N > type
Definition: multi_shape.hxx:250
view_type::size_type size_type
Definition: multi_array.hxx:2466
const_view_type subarray(shape_type const &start, shape_type const &stop) const
Create a read-only view to the specified ROI.
Definition: multi_array_chunked.hxx:2196
std::size_t dataBytesPerChunk() const
Number of data bytes in an uncompressed chunk.
Definition: multi_array_chunked.hxx:1700
ChunkedArrayOptions & cacheMax(int v)
Maximum number of chunks in the cache.
Definition: multi_array_chunked.hxx:1304
view_type::const_pointer const_pointer
Definition: multi_array.hxx:2454
view_type::value_type value_type
Definition: multi_array.hxx:2446
void setItem(shape_type const &point, value_type const &v)
Write the array element at index 'point'.
Definition: multi_array_chunked.hxx:2246
shape_type chunkShape(shape_type const &chunk_index) const
Find the shape of the chunk indexed by 'chunk_index'.
Definition: multi_array_chunked.hxx:1739
Definition: multi_array_chunked.hxx:2687
Class for fixed size vectors.This class contains an array of size SIZE of the specified VALUETYPE...
Definition: accessor.hxx:940
MultiArrayView< N-1, T, ChunkedArrayTag > bindInner(difference_type_1 index) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2327
Definition: metaprogramming.hxx:119
chunk_const_iterator chunk_cend(shape_type const &start, shape_type const &stop) const
Create the end iterator for read-only iteration over all chunks intersected by the given ROI...
Definition: multi_array_chunked.hxx:2473
chunk_const_iterator chunk_begin(shape_type const &start, shape_type const &stop) const
Create a read-only iterator over all chunks intersected by the given ROI.
Definition: multi_array_chunked.hxx:2447
view_type::difference_type_1 difference_type_1
Definition: multi_array.hxx:2474
virtual shape_type chunkArrayShape() const
Number of chunks along each coordinate direction.
Definition: multi_array_chunked.hxx:1691
virtual std::size_t overheadBytesPerChunk() const
Bytes of main memory needed to manage a single chunk.
Definition: multi_array_chunked.hxx:2629
detail::SelectIntegerType< 32, detail::UnsignedIntTypes >::type UInt32
32-bit unsigned int
Definition: sized_int.hxx:183
bool operator==(MultiArrayView< N, U, C1 > const &rhs) const
Check if two arrays are elementwise equal.
Definition: multi_array_chunked.hxx:1787
chunk_const_iterator chunk_end(shape_type const &start, shape_type const &stop) const
Create the end iterator for read-only iteration over all chunks intersected by the given ROI...
Definition: multi_array_chunked.hxx:2456
Definition: multi_array_chunked.hxx:3008
Base class for, and view to, vigra::MultiArray.
Definition: multi_array.hxx:652
void uncompress(char const *source, std::size_t srcSize, char *dest, std::size_t destSize, CompressionMethod method)
virtual std::size_t overheadBytesPerChunk() const
Bytes of main memory needed to manage a single chunk.
Definition: multi_array_chunked.hxx:2790
bool allLessEqual(TinyVectorBase< V1, SIZE, D1, D2 > const &l, TinyVectorBase< V2, SIZE, D3, D4 > const &r)
pointwise less-equal
Definition: tinyvector.hxx:1399
view_type subarray(shape_type const &start, shape_type const &stop)
Create a view to the specified ROI.
Definition: multi_array_chunked.hxx:2182
UInt32 ceilPower2(UInt32 x)
Round up to the nearest power of 2.
Definition: mathutil.hxx:278
virtual std::size_t overheadBytesPerChunk() const
Bytes of main memory needed to manage a single chunk.
Definition: multi_array_chunked.hxx:2981
TinyVector< V, SIZE > transpose(TinyVector< V, SIZE > const &t, TinyVector< T, SIZE > const &permutation)
transposed copy
Definition: tinyvector.hxx:2251
const_iterator cend() const
Create the end iterator for read-only scan-order iteration over the entire chunked array...
Definition: multi_array_chunked.hxx:2407
MultiArrayView< N-1, T, ChunkedArrayTag > bindAt(MultiArrayIndex dim, MultiArrayIndex index) const
Create a lower dimensional view to the chunked array.
Definition: multi_array_chunked.hxx:2267
Iterate over a virtual array where each element contains its coordinate.
Definition: multi_fwd.hxx:157
difference_type key_type
Definition: multi_array.hxx:691
virtual shape_type chunkArrayShape() const
Number of chunks along each coordinate direction.
Definition: multi_array_chunked.hxx:2609
view_type::const_reference const_reference
Definition: multi_array.hxx:2462
ChunkedArrayOptions()
Initialize options with defaults.
Definition: multi_array_chunked.hxx:1279