Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
concurrent_vector.cpp
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#if (_MSC_VER)
18 //MSVC 10 "deprecated" application of some std:: algorithms to raw pointers as not safe.
19 //The reason is that destination is not checked against bounds/having enough place.
20 #define _SCL_SECURE_NO_WARNINGS
21#endif
22
25#include "tbb/tbb_exception.h"
26#include "tbb_misc.h"
27#include "itt_notify.h"
28
29#include <cstring>
30#include <memory> //for uninitialized_fill_n
31
32#if defined(_MSC_VER) && defined(_Wp64)
33 // Workaround for overzealous compiler warnings in /Wp64 mode
34 #pragma warning (disable: 4267)
35#endif
36
37namespace tbb {
38
39namespace internal {
41public:
43 static const size_type page_size = 4096;
44
45 inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp
46 return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details
47 }
48
53 while( k < u && (s[k].load<relaxed>()==segment_allocated() ))
54 ++k;
55 return k;
56 }
57
58 // TODO: optimize accesses to my_first_block
61 if( !v.my_first_block ) {
62 /* There was a suggestion to set first segment according to incompact_predicate:
63 while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )
64 --k; // while previous vector size is compact, decrement
65 // reasons to not do it:
66 // * constructor(n) is not ready to accept fragmented segments
67 // * backward compatibility due to that constructor
68 // * current version gives additional guarantee and faster init.
69 // * two calls to reserve() will give the same effect.
70 */
71 v.my_first_block.compare_and_swap(k+1, 0); // store number of segments
72 }
73 }
74
76 void *ptr = v.vector_allocator_ptr(v, n);
77 if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception
78 return ptr;
79 }
80
82 template<typename argument_type>
83 inline static void publish_segment( segment_t& s, argument_type rhs ) {
84 // see also itt_store_pointer_with_release_v3()
86 s.store<release>(rhs);
87 }
88
89 static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure = false);
90
91 // TODO: rename as get_segments_table() and return segment pointer
95 }
96
98
101 segment_not_used_predicate(segment_t &segment) : s(segment) {}
102 bool operator()() const { return s.load<relaxed>() == segment_not_used ();}
103 };
105 segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument
106 if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state
107 if( owner ) {
108 enable_segment( v, index, element_size );
109 } else {
110 ITT_NOTIFY(sync_prepare, &s);
112 ITT_NOTIFY(sync_acquired, &s);
113 }
114 } else {
115 ITT_NOTIFY(sync_acquired, &s);
116 }
117 enforce_segment_allocated(s.load<relaxed>()); //it's hard to recover correctly after segment_allocation_failed state
118 return s;
119 }
120
122 segment_t *table;// TODO: review all segment_index_t as just short type
124 helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw()
125 : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {}
126 inline void first_segment() throw() {
127 __TBB_ASSERT( start <= finish, NULL );
128 __TBB_ASSERT( first_block || !finish, NULL );
129 if( k < first_block ) k = 0; // process solid segment at a time
130 size_type base = segment_base( k );
131 __TBB_ASSERT( base <= start, NULL );
132 finish -= base; start -= base; // rebase as offsets from segment k
133 sz = k ? base : segment_size( first_block ); // sz==base for k>0
134 }
135 inline void next_segment() throw() {
136 finish -= sz; start = 0; // offsets from next segment
137 if( !k ) k = first_block;
138 else { ++k; sz = segment_size( k ); }
139 }
140 template<typename F>
141 inline size_type apply(const F &func) {
143 while( sz < finish ) { // work for more than one segment
144 //TODO: remove extra load() of table[k] inside func
145 func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start );
146 next_segment();
147 }
148 func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start );
149 return k;
150 }
151 inline segment_value_t get_segment_value(size_type index, bool wait) {
152 segment_t &s = table[index];
153 if( wait && (s.load<acquire>() == segment_not_used()) ) {
154 ITT_NOTIFY(sync_prepare, &s);
156 ITT_NOTIFY(sync_acquired, &s);
157 }
158 return s.load<relaxed>();
159 }
161 if( sz >= finish ) return; // the work is done correctly
162 cleanup();
163 }
164
166 void cleanup();
167
169 struct init_body {
171 const void *arg;
172 init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
173 void operator()(segment_t &, void *begin, size_type n) const {
174 func( begin, arg, n );
175 }
176 };
179 const void *arg;
180 safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
181 void operator()(segment_t &s, void *begin, size_type n) const {
183 func( begin, arg, n );
184 }
185 };
188 destroy_body(internal_array_op1 destroy) : func(destroy) {}
189 void operator()(segment_t &s, void *begin, size_type n) const {
190 if(s.load<relaxed>() == segment_allocated())
191 func( begin, n );
192 }
193 };
194}; // class helper
195
198 // If other threads are trying to set pointers in the short segment, wait for them to finish their
199 // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it
200 for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){
201 if(v.my_storage[i].load<relaxed>() == segment_not_used()) {
202 ITT_NOTIFY(sync_prepare, &v.my_storage[i]);
203 atomic_backoff backoff(true);
204 while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) )
205 backoff.pause();
206 ITT_NOTIFY(sync_acquired, &v.my_storage[i]);
207 }
208 }
209 if( v.my_segment != v.my_storage ) return;
210
211 segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );
212 __TBB_ASSERT(new_segment_table, "NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" );
213 std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table
214 //TODO: replace with static assert
215 __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, "size of the big table should be not lesser than of the small one, as we copy values to it" );
216 std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used
217 if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage )
218 NFS_Free( new_segment_table );
219 // else TODO: add ITT_NOTIFY signals for v.my_segment?
220}
221
223 bool mark_as_not_used_on_failure ) {
224
225 struct segment_scope_guard : no_copy{
226 segment_t* my_segment_ptr;
227 bool my_mark_as_not_used;
228 segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}
229 void dismiss(){ my_segment_ptr = 0;}
230 ~segment_scope_guard(){
231 if (my_segment_ptr){
232 if (!my_mark_as_not_used){
233 publish_segment(*my_segment_ptr, segment_allocation_failed());
234 }else{
235 publish_segment(*my_segment_ptr, segment_not_used());
236 }
237 }
238 }
239 };
240
241 segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block
242 __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), "concurrent operation during growth?");
243
244 size_type size_of_enabled_segment = segment_size(k);
245 size_type size_to_allocate = size_of_enabled_segment;
246 if( !k ) {
247 assign_first_segment_if_necessary(v, default_initial_segments-1);
248 size_of_enabled_segment = 2 ;
249 size_to_allocate = segment_size(v.my_first_block);
250
251 } else {
253 }
254
255 if( k && (k < v.my_first_block)){ //no need to allocate anything
256 // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory
257 segment_value_t array0 = s[0].load<acquire>();
258 if(array0 == segment_not_used()){
259 // sync_prepare called only if there is a wait
260 ITT_NOTIFY(sync_prepare, &s[0]);
262 array0 = s[0].load<acquire>();
263 }
264 ITT_NOTIFY(sync_acquired, &s[0]);
265
266 segment_scope_guard k_segment_guard(s[k], false);
267 enforce_segment_allocated(array0); // initial segment should be allocated
268 k_segment_guard.dismiss();
269
270 publish_segment( s[k],
271 static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size )
272 );
273 } else {
274 segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure);
275 publish_segment(s[k], allocate_segment(v, size_to_allocate));
276 k_segment_guard.dismiss();
277 }
278 return size_of_enabled_segment;
279}
280
282 if( !sz ) { // allocation failed, restore the table
283 segment_index_t k_start = k, k_end = segment_index_of(finish-1);
284 if( segment_base( k_start ) < start )
285 get_segment_value(k_start++, true); // wait
286 if( k_start < first_block ) {
287 segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary
288 if((segment0 != segment_not_used()) && !k_start ) ++k_start;
289 if(segment0 != segment_allocated())
290 for(; k_start < first_block && k_start <= k_end; ++k_start )
291 publish_segment(table[k_start], segment_allocation_failed());
292 else for(; k_start < first_block && k_start <= k_end; ++k_start )
293 publish_segment(table[k_start], static_cast<void*>(
294 (segment0.pointer<char>()) + segment_base(k_start)*element_size) );
295 }
296 for(; k_start <= k_end; ++k_start ) // not in first block
297 if(table[k_start].load<acquire>() == segment_not_used())
298 publish_segment(table[k_start], segment_allocation_failed());
299 // fill allocated items
300 first_segment();
301 goto recover;
302 }
303 while( sz <= finish ) { // there is still work for at least one segment
304 next_segment();
305recover:
306 segment_value_t array = table[k].load<relaxed>();
307 if(array == segment_allocated())
308 std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
309 else __TBB_ASSERT( array == segment_allocation_failed(), NULL );
310 }
311}
312
315 if( s != my_storage ) {
316#if TBB_USE_ASSERT
317 //to please assert in segment_t destructor
318 std::fill_n(my_storage,size_t(pointers_per_short_table),segment_t());
319#endif /* TBB_USE_ASSERT */
320#if TBB_USE_DEBUG
321 for( segment_index_t i = 0; i < pointers_per_long_table; i++)
322 __TBB_ASSERT( my_segment[i].load<relaxed>() != segment_allocated(), "Segment should have been freed. Please recompile with new TBB before using exceptions.");
323#endif
325 NFS_Free( s );
326 }
327}
328
330 return segment_base( helper::find_segment_end(*this) );
331}
332
335 __TBB_ASSERT(t < sizeof(ids) / sizeof(exception_id), NULL);
336 throw_exception(ids[t]);
337}
338
340 if( n>max_size )
342 __TBB_ASSERT( n, NULL );
345
346 for( ; segment_base(k)<n; ++k ) {
348 if(my_segment[k].load<relaxed>() != segment_allocated())
349 helper::enable_segment(*this, k, element_size, true ); //in case of failure mark segments as not used
350 }
351}
352
353//TODO: Looks like atomic loads can be done relaxed here, as the only place this method is called from
354//is the constructor, which does not require synchronization (for more details see comment in the
355// concurrent_vector_base constructor).
357 size_type n = src.my_early_size;
359 if( n ) {
361 size_type b;
362 for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
363 if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)
364 || (src.my_segment[k].load<relaxed>() != segment_allocated())) {
365 my_early_size = b; break;
366 }
368 size_type m = helper::enable_segment(*this, k, element_size);
369 if( m > n-b ) m = n-b;
370 my_early_size = b+m;
371 copy( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), m );
372 }
373 }
374}
375
377 size_type n = src.my_early_size;
378 while( my_early_size>n ) { // TODO: improve
381 size_type new_end = b>=n ? b : n;
382 __TBB_ASSERT( my_early_size>new_end, NULL );
383 enforce_segment_allocated(my_segment[k].load<relaxed>()); //if vector was broken before
384 // destructors are supposed to not throw any exceptions
385 destroy( my_segment[k].load<relaxed>().pointer<char>() + element_size*(new_end-b), my_early_size-new_end );
386 my_early_size = new_end;
387 }
388 size_type dst_initialized_size = my_early_size;
389 my_early_size = n;
391 size_type b;
392 for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
393 if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)
394 || src.my_segment[k].load<relaxed>() != segment_allocated() ) { // if source is damaged
395 my_early_size = b; break; // TODO: it may cause undestructed items
396 }
398 if( my_segment[k].load<relaxed>() == segment_not_used())
399 helper::enable_segment(*this, k, element_size);
400 else
401 enforce_segment_allocated(my_segment[k].load<relaxed>());
402 size_type m = k? segment_size(k) : 2;
403 if( m > n-b ) m = n-b;
404 size_type a = 0;
405 if( dst_initialized_size>b ) {
406 a = dst_initialized_size-b;
407 if( a>m ) a = m;
408 assign( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), a );
409 m -= a;
410 a *= element_size;
411 }
412 if( m>0 )
413 copy( my_segment[k].load<relaxed>().pointer<char>() + a, src.my_segment[k].load<relaxed>().pointer<char>() + a, m );
414 }
415 __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" );
416}
417
419 __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL );
420 size_type tmp = my_early_size.fetch_and_increment<acquire>();
421 index = tmp;
422 segment_index_t k_old = segment_index_of( tmp );
423 size_type base = segment_base(k_old);
424 helper::extend_table_if_necessary(*this, k_old, tmp);
425 segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp);
426 size_type j_begin = tmp-base;
427 return (void*)(s.load<relaxed>().pointer<char>() + element_size*j_begin);
428}
429
431 internal_grow_to_at_least_with_result( new_size, element_size, init, src );
432}
433
436 while( e<new_size ) {
437 size_type f = my_early_size.compare_and_swap(new_size,e);
438 if( f==e ) {
439 internal_grow( e, new_size, element_size, init, src );
440 break;
441 }
442 e = f;
443 }
444 // Check/wait for segments allocation completes
446 if( k_old >= pointers_per_short_table && my_segment == my_storage ) {
448 }
449 for( i = 0; i <= k_old; ++i ) {
450 segment_t &s = my_segment[i];
451 if(s.load<relaxed>() == segment_not_used()) {
452 ITT_NOTIFY(sync_prepare, &s);
453 atomic_backoff backoff(true);
454 while( my_segment[i].load<acquire>() == segment_not_used() ) // my_segment may change concurrently
455 backoff.pause();
456 ITT_NOTIFY(sync_acquired, &s);
457 }
458 enforce_segment_allocated(my_segment[i].load<relaxed>());
459 }
460#if TBB_USE_DEBUG
461 size_type capacity = internal_capacity();
462 __TBB_ASSERT( capacity >= new_size, NULL);
463#endif
464 return e;
465}
466
468 size_type result = my_early_size.fetch_and_add(delta);
469 internal_grow( result, result+delta, element_size, init, src );
470 return result;
471}
472
473void concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) {
474 __TBB_ASSERT( start<finish, "start must be less than finish" );
475 segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1);
477 helper::extend_table_if_necessary(*this, k_end, start);
478 helper range(my_segment, my_first_block, element_size, k_start, start, finish);
479 for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order
480 helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/);
481 for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order
482 helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start );
483 range.apply( helper::init_body(init, src) );
484}
485
486void concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
487 internal_array_op1 destroy, internal_array_op2 init ) {
489 if( n > j ) { // construct items
490 internal_reserve(n, element_size, max_size);
491 my_early_size = n;
492 helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n);
493 for_each.apply( helper::safe_init_body(init, src) );
494 } else {
495 my_early_size = n;
496 helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j);
497 for_each.apply( helper::destroy_body(destroy) );
498 }
499}
500
502 __TBB_ASSERT( my_segment, NULL );
504 my_early_size = 0;
505 helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero
506 j = for_each.apply( helper::destroy_body(destroy) );
508 return j < i? i : j+1;
509}
510
512{
513 const size_type my_size = my_early_size;
514 const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments
515 const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;..
516 const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics
517
518 segment_index_t k = first_block;
519 if(k_stop < first_block)
520 k = k_stop;
521 else
522 while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++;
523 if(k_stop == k_end && k == first_block)
524 return NULL;
525
526 segment_t *const segment_table = my_segment;
527 internal_segments_table &old = *static_cast<internal_segments_table*>( table );
528 //this call is left here for sake of backward compatibility, and as a placeholder for table initialization
529 std::fill_n(old.table,sizeof(old.table)/sizeof(old.table[0]),segment_t());
530 old.first_block=0;
531
532 if ( k != first_block && k ) // first segment optimization
533 {
534 // exception can occur here
535 void *seg = helper::allocate_segment(*this, segment_size(k));
536 old.table[0].store<relaxed>(seg);
537 old.first_block = k; // fill info for freeing new segment if exception occurs
538 // copy items to the new segment
539 size_type my_segment_size = segment_size( first_block );
540 for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
541 __TBB_ASSERT( segment_table[i].load<relaxed>() == segment_allocated(), NULL);
542 void *s = static_cast<void*>(
543 static_cast<char*>(seg) + segment_base(i)*element_size );
544 //TODO: refactor to use std::min
545 if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
546 __TBB_TRY { // exception can occur here
547 copy( s, segment_table[i].load<relaxed>().pointer<void>(), my_segment_size );
548 } __TBB_CATCH(...) { // destroy all the already copied items
549 helper for_each(&old.table[0], old.first_block, element_size,
550 0, 0, segment_base(i)+ my_segment_size);
551 for_each.apply( helper::destroy_body(destroy) );
553 }
554 my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
555 }
556 // commit the changes
557 std::copy(segment_table,segment_table + k,old.table);
558 for (segment_index_t i = 0; i < k; i++) {
559 segment_table[i].store<relaxed>(static_cast<void*>(
560 static_cast<char*>(seg) + segment_base(i)*element_size ));
561 }
562 old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block
563 // destroy original copies
564 my_segment_size = segment_size( first_block ); // old.first_block actually
565 for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
566 if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
567 // destructors are supposed to not throw any exceptions
568 destroy( old.table[i].load<relaxed>().pointer<void>(), my_segment_size );
569 my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
570 }
571 }
572 // free unnecessary segments allocated by reserve() call
573 if ( k_stop < k_end ) {
574 old.first_block = first_block;
575 std::copy(segment_table+k_stop, segment_table+k_end, old.table+k_stop );
576 std::fill_n(segment_table+k_stop, (k_end-k_stop), segment_t());
577 if( !k ) my_first_block = 0;
578 }
579 return table;
580}
581
583{
584 size_type my_sz = my_early_size.load<acquire>();
585 size_type v_sz = v.my_early_size.load<relaxed>();
586 if(!my_sz && !v_sz) return;
587
588 bool my_was_short = (my_segment.load<relaxed>() == my_storage);
589 bool v_was_short = (v.my_segment.load<relaxed>() == v.my_storage);
590
591 //In C++11, this would be: swap(my_storage, v.my_storage);
592 for (int i=0; i < pointers_per_short_table; ++i){
593 swap(my_storage[i], v.my_storage[i]);
594 }
595 tbb::internal::swap<relaxed>(my_first_block, v.my_first_block);
596 tbb::internal::swap<relaxed>(my_segment, v.my_segment);
597 if (my_was_short){
598 v.my_segment.store<relaxed>(v.my_storage);
599 }
600 if(v_was_short){
602 }
603
604 my_early_size.store<relaxed>(v_sz);
605 v.my_early_size.store<release>(my_sz);
606}
607
608} // namespace internal
609
610} // tbb
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
The graph class.
@ release
Release.
Definition: atomic.h:59
@ relaxed
No ordering.
Definition: atomic.h:61
@ acquire
Acquire.
Definition: atomic.h:57
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
@ eid_reservation_length_error
Definition: tbb_exception.h:83
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:405
Base class of concurrent vector implementation.
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
static segment_index_t segment_index_of(size_type index)
atomic< size_type > my_early_size
Requested size of vector.
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
static size_type segment_size(segment_index_t k)
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
static segment_index_t segment_base(segment_index_t k)
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
@ pointers_per_short_table
Number of slots for segment pointers inside the class.
atomic< segment_t * > my_segment
Pointer to the segments table.
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
atomic< size_type > my_first_block
count of segments in the first block
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
Class that implements exponential backoff.
Definition: tbb_machine.h:345
void pause()
Pause for a while.
Definition: tbb_machine.h:360
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
void cleanup()
Out of line code to assists destructor in infrequent cases.
helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f)
static void * allocate_segment(concurrent_vector_base_v3 &v, size_type n)
static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start)
static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k)
assign first segment size. k - is index of last segment to be allocated, not a count of segments
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
static segment_t & acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner)
static const size_type page_size
memory page size
static size_type find_segment_end(const concurrent_vector_base_v3 &v)
static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start)
segment_value_t get_segment_value(size_type index, bool wait)
TODO: turn into lambda functions when available.
void operator()(segment_t &, void *begin, size_type n) const
void operator()(segment_t &s, void *begin, size_type n) const
void operator()(segment_t &s, void *begin, size_type n) const

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.