Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
arena.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef _TBB_arena_H
18#define _TBB_arena_H
19
20#include "tbb/tbb_stddef.h"
21#include "tbb/atomic.h"
22
23#include "tbb/tbb_machine.h"
24
25#include "scheduler_common.h"
26#include "intrusive_list.h"
27#if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
29#else
30#include "task_stream.h"
31#endif
32#include "../rml/include/rml_tbb.h"
33#include "mailbox.h"
34#include "observer_proxy.h"
35#include "market.h"
36#include "governor.h"
37#include "concurrent_monitor.h"
38
39#if __TBB_PREVIEW_RESUMABLE_TASKS
40#include "tbb/spin_mutex.h"
41#endif
42
43namespace tbb {
44
45class task_group_context;
46class allocate_root_with_context_proxy;
47
48namespace internal {
49
50#if __TBB_NUMA_SUPPORT
51class numa_binding_observer;
52#endif /*__TBB_NUMA_SUPPORT*/
53
54#if __TBB_PREVIEW_RESUMABLE_TASKS
56class arena_co_cache {
58 generic_scheduler** my_co_scheduler_cache;
60 unsigned my_head;
62 unsigned my_max_index;
64 tbb::spin_mutex my_co_cache_mutex;
65
66 unsigned next_index() {
67 return ( my_head == my_max_index ) ? 0 : my_head + 1;
68 }
69
70 unsigned prev_index() {
71 return ( my_head == 0 ) ? my_max_index : my_head - 1;
72 }
73
74 bool internal_empty() {
75 return my_co_scheduler_cache[prev_index()] == NULL;
76 }
77
78 void internal_scheduler_cleanup(generic_scheduler* to_cleanup) {
79 to_cleanup->my_arena_slot = NULL;
80 // Needed by cleanup_worker function, as well as arena slot clearing
81 governor::assume_scheduler(to_cleanup);
82 generic_scheduler::cleanup_worker(to_cleanup, true);
83 }
84
85public:
86 void init(unsigned cache_capacity) {
87 size_t alloc_size = cache_capacity * sizeof(generic_scheduler*);
88 my_co_scheduler_cache = (generic_scheduler**)NFS_Allocate(1, alloc_size, NULL);
89 memset( my_co_scheduler_cache, 0, alloc_size );
90 my_head = 0;
91 my_max_index = cache_capacity - 1;
92 }
93
94 void cleanup() {
95 generic_scheduler* current = governor::local_scheduler_if_initialized();
96 while (generic_scheduler* to_cleanup = pop()) {
97 internal_scheduler_cleanup(to_cleanup);
98 }
99 governor::assume_scheduler(current);
100 NFS_Free(my_co_scheduler_cache);
101 }
102
105 void push(generic_scheduler* s) {
106 generic_scheduler* to_cleanup = NULL;
107 {
108 tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex);
109 // Check if we are replacing some existing buffer entrance
110 if (my_co_scheduler_cache[my_head] != NULL) {
111 to_cleanup = my_co_scheduler_cache[my_head];
112 }
113 // Store the cached value
114 my_co_scheduler_cache[my_head] = s;
115 // Move head index to the next slot
116 my_head = next_index();
117 }
118 // Cleanup replaced buffer if any
119 if (to_cleanup) {
120 generic_scheduler* current = governor::local_scheduler_if_initialized();
121 internal_scheduler_cleanup(to_cleanup);
122 governor::assume_scheduler(current);
123 }
124 }
125
127 generic_scheduler* pop() {
128 tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex);
129 // No cached coroutine
130 if (internal_empty()) return NULL;
131 // Move head index to the currently available value
132 my_head = prev_index();
133 // Retrieve the value from the buffer
134 generic_scheduler* to_return = my_co_scheduler_cache[my_head];
135 // Clear the previous entrance value
136 my_co_scheduler_cache[my_head] = NULL;
137 return to_return;
138 }
139};
140#endif // __TBB_PREVIEW_RESUMABLE_TASKS
141
143
145struct arena_base : padded<intrusive_list_node> {
147 unsigned my_num_workers_allotted; // heavy use in stealing loop
148
150
153 atomic<unsigned> my_references; // heavy use in stealing loop
154
155#if __TBB_TASK_PRIORITY
157 volatile intptr_t my_top_priority; // heavy use in stealing loop
158#endif /* !__TBB_TASK_PRIORITY */
159
161 atomic<unsigned> my_limit; // heavy use in stealing loop
162
164
169#if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
171#else
173#endif
174
175#if __TBB_PREVIEW_CRITICAL_TASKS
177
180 // used on the hot path of the task dispatch loop
181 task_stream<1, back_nonnull_accessor> my_critical_task_stream;
182#endif
183
186
189
191
195 tbb::atomic<uintptr_t> my_pool_state;
196
197#if __TBB_ARENA_OBSERVER
199 observer_list my_observers;
200#endif
201
202#if __TBB_NUMA_SUPPORT
204 numa_binding_observer* my_numa_binding_observer;
205#endif /*__TBB_NUMA_SUPPORT*/
206
207#if __TBB_TASK_PRIORITY
209 intptr_t my_bottom_priority;
210
212
214 uintptr_t my_reload_epoch;
215
217 task* my_orphaned_tasks;
218
220 tbb::atomic<uintptr_t> my_abandonment_epoch;
221
223
226 tbb::atomic<intptr_t> my_skipped_fifo_priority;
227#endif /* !__TBB_TASK_PRIORITY */
228
229 // Below are rarely modified members
230
233
235 uintptr_t my_aba_epoch;
236
237#if !__TBB_FP_CONTEXT
240#endif
241
242#if __TBB_TASK_GROUP_CONTEXT
244
246 task_group_context* my_default_ctx;
247#endif /* __TBB_TASK_GROUP_CONTEXT */
248
250 unsigned my_num_slots;
251
254
255#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
256 // arena needs an extra worker despite the arena limit
257 bool my_local_concurrency_mode;
258 // arena needs an extra worker despite a global limit
259 bool my_global_concurrency_mode;
260#endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
261
264
265#if __TBB_PREVIEW_RESUMABLE_TASKS
267 arena_co_cache my_co_cache;
268#endif
269
270#if TBB_USE_ASSERT
272 uintptr_t my_guard;
273#endif /* TBB_USE_ASSERT */
274}; // struct arena_base
275
276class arena: public padded<arena_base>
277{
280public:
282
288 };
289
291 arena ( market&, unsigned max_num_workers, unsigned num_reserved_slots );
292
294 static arena& allocate_arena( market&, unsigned num_slots, unsigned num_reserved_slots );
295
296 static int unsigned num_arena_slots ( unsigned num_slots ) {
297 return max(2u, num_slots);
298 }
299
300 static int allocation_size ( unsigned num_slots ) {
301 return sizeof(base_type) + num_slots * (sizeof(mail_outbox) + sizeof(arena_slot));
302 }
303
306 __TBB_ASSERT( 0<id, "affinity id must be positive integer" );
307 __TBB_ASSERT( id <= my_num_slots, "affinity id out of bounds" );
308
309 return ((mail_outbox*)this)[-(int)id];
310 }
311
313 void free_arena ();
314
315 typedef uintptr_t pool_state_t;
316
318 static const pool_state_t SNAPSHOT_EMPTY = 0;
319
322
324 static const unsigned ref_external_bits = 12; // up to 4095 external and 1M workers
325
327 static const unsigned ref_external = 1;
328 static const unsigned ref_worker = 1<<ref_external_bits;
329
331 static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; }
332
334 unsigned num_workers_active() const {
335 return my_references >> ref_external_bits;
336 }
337
339 bool is_recall_requested() const {
340 return num_workers_active() > my_num_workers_allotted;
341 }
342
344 template<arena::new_work_type work_type> void advertise_new_work();
345
347
348 bool is_out_of_work();
349
351 void enqueue_task( task&, intptr_t, FastRandom & );
352
354 void process( generic_scheduler& );
355
357 template<unsigned ref_param>
358 inline void on_thread_leaving ( );
359
360#if __TBB_STATISTICS
362 void dump_arena_statistics ();
363#endif /* __TBB_STATISTICS */
364
365#if __TBB_TASK_PRIORITY
367
368 inline bool may_have_tasks ( generic_scheduler*, bool& tasks_present, bool& dequeuing_possible );
369
371 void orphan_offloaded_tasks ( generic_scheduler& s );
372#endif /* __TBB_TASK_PRIORITY */
373
374#if __TBB_COUNT_TASK_NODES
376 intptr_t workers_task_node_count();
377#endif
378
380 bool has_enqueued_tasks();
381
382 static const size_t out_of_arena = ~size_t(0);
384 template <bool as_worker>
387 size_t occupy_free_slot_in_range( generic_scheduler& s, size_t lower, size_t upper );
388
391}; // class arena
392
393template<unsigned ref_param>
395 //
396 // Implementation of arena destruction synchronization logic contained various
397 // bugs/flaws at the different stages of its evolution, so below is a detailed
398 // description of the issues taken into consideration in the framework of the
399 // current design.
400 //
401 // In case of using fire-and-forget tasks (scheduled via task::enqueue())
402 // master thread is allowed to leave its arena before all its work is executed,
403 // and market may temporarily revoke all workers from this arena. Since revoked
404 // workers never attempt to reset arena state to EMPTY and cancel its request
405 // to RML for threads, the arena object is destroyed only when both the last
406 // thread is leaving it and arena's state is EMPTY (that is its master thread
407 // left and it does not contain any work).
408 // Thus resetting arena to EMPTY state (as earlier TBB versions did) should not
409 // be done here (or anywhere else in the master thread to that matter); doing so
410 // can result either in arena's premature destruction (at least without
411 // additional costly checks in workers) or in unnecessary arena state changes
412 // (and ensuing workers migration).
413 //
414 // A worker that checks for work presence and transitions arena to the EMPTY
415 // state (in snapshot taking procedure arena::is_out_of_work()) updates
416 // arena::my_pool_state first and only then arena::my_num_workers_requested.
417 // So the check for work absence must be done against the latter field.
418 //
419 // In a time window between decrementing the active threads count and checking
420 // if there is an outstanding request for workers. New worker thread may arrive,
421 // finish remaining work, set arena state to empty, and leave decrementing its
422 // refcount and destroying. Then the current thread will destroy the arena
423 // the second time. To preclude it a local copy of the outstanding request
424 // value can be stored before decrementing active threads count.
425 //
426 // But this technique may cause two other problem. When the stored request is
427 // zero, it is possible that arena still has threads and they can generate new
428 // tasks and thus re-establish non-zero requests. Then all the threads can be
429 // revoked (as described above) leaving this thread the last one, and causing
430 // it to destroy non-empty arena.
431 //
432 // The other problem takes place when the stored request is non-zero. Another
433 // thread may complete the work, set arena state to empty, and leave without
434 // arena destruction before this thread decrements the refcount. This thread
435 // cannot destroy the arena either. Thus the arena may be "orphaned".
436 //
437 // In both cases we cannot dereference arena pointer after the refcount is
438 // decremented, as our arena may already be destroyed.
439 //
440 // If this is the master thread, the market is protected by refcount to it.
441 // In case of workers market's liveness is ensured by the RML connection
442 // rundown protocol, according to which the client (i.e. the market) lives
443 // until RML server notifies it about connection termination, and this
444 // notification is fired only after all workers return into RML.
445 //
446 // Thus if we decremented refcount to zero we ask the market to check arena
447 // state (including the fact if it is alive) under the lock.
448 //
449 uintptr_t aba_epoch = my_aba_epoch;
450 market* m = my_market;
451 __TBB_ASSERT(my_references >= ref_param, "broken arena reference counter");
452#if __TBB_STATISTICS_EARLY_DUMP
453 // While still holding a reference to the arena, compute how many external references are left.
454 // If just one, dump statistics.
455 if ( modulo_power_of_two(my_references,ref_worker)==ref_param ) // may only be true with ref_external
456 GATHER_STATISTIC( dump_arena_statistics() );
457#endif
458#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
459 // When there is no workers someone must free arena, as
460 // without workers, no one calls is_out_of_work().
461 // Skip workerless arenas because they have no demand for workers.
462 // TODO: consider more strict conditions for the cleanup,
463 // because it can create the demand of workers,
464 // but the arena can be already empty (and so ready for destroying)
465 // TODO: Fix the race: while we check soft limit and it might be changed.
466 if( ref_param==ref_external && my_num_slots != my_num_reserved_slots
467 && 0 == m->my_num_workers_soft_limit && !my_global_concurrency_mode ) {
468 bool is_out = false;
469 for (int i=0; i<num_priority_levels; i++) {
470 is_out = is_out_of_work();
471 if (is_out)
472 break;
473 }
474 // We expect, that in worst case it's enough to have num_priority_levels-1
475 // calls to restore priorities and yet another is_out_of_work() to conform
476 // that no work was found. But as market::set_active_num_workers() can be called
477 // concurrently, can't guarantee last is_out_of_work() return true.
478 }
479#endif
480 if ( (my_references -= ref_param ) == 0 )
481 m->try_destroy_arena( this, aba_epoch );
482}
483
484template<arena::new_work_type work_type> void arena::advertise_new_work() {
485 if( work_type == work_enqueued ) {
486#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
487 if ( as_atomic(my_market->my_num_workers_soft_limit) == 0 && as_atomic(my_global_concurrency_mode) == false )
488 my_market->enable_mandatory_concurrency(this);
489
490 if ( my_max_num_workers == 0 && my_num_reserved_slots == 1 ) {
491 __TBB_ASSERT(!my_local_concurrency_mode, NULL);
492 my_local_concurrency_mode = true;
493 my_pool_state = SNAPSHOT_FULL;
494 my_max_num_workers = 1;
495 my_market->adjust_demand(*this, my_max_num_workers);
496 return;
497 }
498#endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
499 // Local memory fence here and below is required to avoid missed wakeups; see the comment below.
500 // Starvation resistant tasks require concurrency, so missed wakeups are unacceptable.
501 atomic_fence();
502 }
503 else if( work_type == wakeup ) {
504 __TBB_ASSERT(my_max_num_workers!=0, "Unexpected worker wakeup request");
505 atomic_fence();
506 }
507 // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.
508 // Technically, to avoid missed wakeups, there should be a full memory fence between the point we
509 // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a
510 // fence might hurt overall performance more than it helps, because the fence would be executed
511 // on every task pool release, even when stealing does not occur. Since TBB allows parallelism,
512 // but never promises parallelism, the missed wakeup is not a correctness problem.
513 pool_state_t snapshot = my_pool_state;
514 if( is_busy_or_empty(snapshot) ) {
515 // Attempt to mark as full. The compare_and_swap below is a little unusual because the
516 // result is compared to a value that can be different than the comparand argument.
517 if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {
518 if( snapshot!=SNAPSHOT_EMPTY ) {
519 // This thread read "busy" into snapshot, and then another thread transitioned
520 // my_pool_state to "empty" in the meantime, which caused the compare_and_swap above
521 // to fail. Attempt to transition my_pool_state from "empty" to "full".
522 if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {
523 // Some other thread transitioned my_pool_state from "empty", and hence became
524 // responsible for waking up workers.
525 return;
526 }
527 }
528 // This thread transitioned pool from empty to full state, and thus is responsible for
529 // telling the market that there is work to do.
530#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
531 if( work_type == work_spawned ) {
532 if( my_local_concurrency_mode ) {
533 __TBB_ASSERT(my_max_num_workers==1, "");
534 __TBB_ASSERT(!governor::local_scheduler()->is_worker(), "");
535 // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks.
536 // Now a single active thread (must be the master) supposedly starts a new parallel region
537 // with relaxed sequential semantics, and oversubscription should be avoided.
538 // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it.
539 my_max_num_workers = 0;
540 my_local_concurrency_mode = false;
541 return;
542 }
543 if ( as_atomic(my_global_concurrency_mode) == true )
544 my_market->mandatory_concurrency_disable( this );
545 }
546#endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
547 // TODO: investigate adjusting of arena's demand by a single worker.
548 my_market->adjust_demand( *this, my_max_num_workers );
549 }
550 }
551}
552
553} // namespace internal
554} // namespace tbb
555
556#endif /* _TBB_arena_H */
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition tbb_stddef.h:165
#define GATHER_STATISTIC(x)
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
The graph class.
void atomic_fence()
Sequentially consistent full memory fence.
unsigned short affinity_id
An id as used for specifying affinity.
Definition task.h:139
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
Definition tbb_misc.h:119
atomic< T > & as_atomic(T &t)
Definition atomic.h:572
static const intptr_t num_priority_levels
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
Definition tbb_stddef.h:382
A lock that occupies a single byte.
Definition spin_mutex.h:39
Represents acquisition of a mutex.
Definition spin_mutex.h:53
Used to form groups of tasks.
Definition task.h:358
Base class for user-defined tasks.
Definition task.h:615
Pads type T to fill out to a multiple of cache line size.
Definition tbb_stddef.h:261
The structure of an arena, except the array of slots.
Definition arena.h:145
atomic< unsigned > my_limit
The maximal number of currently busy slots.
Definition arena.h:161
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
Definition arena.h:253
uintptr_t my_aba_epoch
ABA prevention marker.
Definition arena.h:235
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
Definition arena.h:263
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
Definition arena.h:172
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
Definition arena.h:195
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
Definition arena.h:147
unsigned my_num_slots
The number of slots in the arena.
Definition arena.h:250
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Definition arena.h:185
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
Definition arena.h:188
market * my_market
The market that owns this arena.
Definition arena.h:232
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
Definition arena.h:239
atomic< unsigned > my_references
Reference counter for the arena.
Definition arena.h:153
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
Definition arena.h:318
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
Definition arena.cpp:115
static const unsigned ref_external
Reference increment values for externals and workers.
Definition arena.h:327
unsigned num_workers_active() const
The number of workers active in the arena.
Definition arena.h:334
new_work_type
Types of work advertised by advertise_new_work()
Definition arena.h:284
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available,...
Definition arena.cpp:130
void free_arena()
Completes arena shutdown, destructs and deallocates it.
Definition arena.cpp:296
static int unsigned num_arena_slots(unsigned num_slots)
Definition arena.h:296
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
Definition arena.cpp:597
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
Definition arena.h:331
arena_slot my_slots[1]
Definition arena.h:390
static const unsigned ref_external_bits
The number of least significant bits for external references.
Definition arena.h:324
static const pool_state_t SNAPSHOT_FULL
At least one task has been offered for stealing since the last snapshot started.
Definition arena.h:321
bool is_recall_requested() const
Check if the recall is requested by the market.
Definition arena.h:339
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
Definition arena.cpp:434
static int allocation_size(unsigned num_slots)
Definition arena.h:300
bool is_out_of_work()
Check if there is job anywhere in arena.
Definition arena.cpp:454
static const size_t out_of_arena
Definition arena.h:382
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
Definition arena.h:305
uintptr_t pool_state_t
Definition arena.h:315
padded< arena_base > base_type
Definition arena.h:281
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
Definition arena.cpp:426
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
Definition arena.h:484
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
Definition arena.cpp:285
void on_thread_leaving()
Notification that worker or master leaves its arena.
Definition arena.h:394
static const unsigned ref_worker
Definition arena.h:328
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
Definition arena.cpp:146
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Definition governor.h:129
Class representing where mail is put.
Definition mailbox.h:99
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
Definition market.cpp:333
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
Definition market.h:78
Work stealing task scheduler.
Definition scheduler.h:140
The container for "fairness-oriented" aka "enqueued" tasks.
Definition task_stream.h:69
A fast random number generator.
Definition tbb_misc.h:135

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.