28 #if __TBB_STATISTICS_STDOUT 47 #if __TBB_TASK_GROUP_CONTEXT 52 #if __TBB_TASK_PRIORITY 59 my_ref_top_priority = &a->my_top_priority;
60 my_ref_reload_epoch = &a->my_reload_epoch;
62 my_local_reload_epoch = *my_ref_reload_epoch;
68 return !slot &&
as_atomic( slot ).compare_and_swap( &s, NULL ) == NULL;
72 if ( lower >= upper )
return out_of_arena;
75 if ( index < lower || index >= upper ) index = s.
my_random.
get() % (upper - lower) + lower;
78 for (
size_t i = index; i < upper; ++i )
79 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
80 for (
size_t i = lower; i < index; ++i )
81 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
85 template <
bool as_worker>
88 size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( s, 0, my_num_reserved_slots );
89 if ( index == out_of_arena ) {
91 index = occupy_free_slot_in_range( s, my_num_reserved_slots, my_num_slots );
93 if ( index == out_of_arena )
98 atomic_update( my_limit, (
unsigned)(index + 1), std::less<unsigned>() );
110 size_t index = occupy_free_slot<
true>(
s );
111 if ( index == out_of_arena )
114 __TBB_ASSERT( index >= my_num_reserved_slots,
"Workers cannot occupy reserved slots" );
117 #if !__TBB_FP_CONTEXT 118 my_cpu_ctl_env.set_env();
121 #if __TBB_ARENA_OBSERVER 122 __TBB_ASSERT( !s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
123 my_observers.notify_entry_observers( s.my_last_local_observer,
true );
138 "Worker cannot leave arena while its task pool is not reset" );
142 if ( num_workers_active() > my_num_workers_allotted
144 || recall_by_mandatory_request()
159 #if __TBB_ARENA_OBSERVER 160 my_observers.notify_exit_observers( s.my_last_local_observer,
true );
161 s.my_last_local_observer = NULL;
163 #if __TBB_TASK_PRIORITY 164 if ( s.my_offloaded_tasks )
165 orphan_offloaded_tasks( s );
168 ++s.my_counters.arena_roundtrips;
169 *my_slots[index].my_counters += s.my_counters;
170 s.my_counters.reset();
183 on_thread_leaving<ref_worker>();
187 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
190 #if __TBB_TASK_PRIORITY 191 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
196 my_num_slots = num_arena_slots(num_slots);
197 my_num_reserved_slots = num_reserved_slots;
198 my_max_num_workers = num_slots-num_reserved_slots;
199 my_references = ref_external;
200 #if __TBB_TASK_PRIORITY 201 my_bottom_priority = my_top_priority = normalized_normal_priority;
204 #if __TBB_ARENA_OBSERVER 205 my_observers.my_arena =
this;
207 __TBB_ASSERT ( my_max_num_workers <= my_num_slots, NULL );
209 for(
unsigned i = 0; i < my_num_slots; ++i ) {
210 __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
213 ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
214 mailbox(i+1).construct();
216 my_slots[i].hint_for_pop = i;
217 #if __TBB_PREVIEW_CRITICAL_TASKS 218 my_slots[i].hint_for_critical = i;
221 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
224 my_task_stream.initialize(my_num_slots);
225 ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
226 #if __TBB_PREVIEW_CRITICAL_TASKS 227 my_critical_task_stream.initialize(my_num_slots);
228 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
230 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 231 my_concurrency_mode = cm_normal;
233 #if !__TBB_FP_CONTEXT 234 my_cpu_ctl_env.get_env();
242 size_t n = allocation_size(num_arena_slots(num_slots));
243 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
245 memset( storage, 0, n );
246 return *
new( storage + num_arena_slots(num_slots) *
sizeof(
mail_outbox) )
arena(m, num_slots, num_reserved_slots);
251 __TBB_ASSERT( !my_references,
"There are threads in the dying arena" );
252 __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted,
"Dying arena requests workers" );
253 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers,
"Inconsistent state of a dying arena" );
254 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 255 __TBB_ASSERT( my_concurrency_mode != cm_enforced_global, NULL );
257 #if !__TBB_STATISTICS_EARLY_DUMP 261 intptr_t drained = 0;
262 for (
unsigned i = 0; i < my_num_slots; ++i ) {
263 __TBB_ASSERT( !my_slots[i].my_scheduler,
"arena slot is not empty" );
267 my_slots[i].free_task_pool();
269 NFS_Free( my_slots[i].my_counters );
271 drained += mailbox(i+1).drain();
273 __TBB_ASSERT( my_task_stream.drain()==0,
"Not all enqueued tasks were executed");
274 #if __TBB_PREVIEW_CRITICAL_TASKS 275 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
277 #if __TBB_COUNT_TASK_NODES 278 my_market->update_task_node_count( -drained );
282 #if __TBB_TASK_GROUP_CONTEXT 283 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
284 my_default_ctx->~task_group_context();
287 #if __TBB_ARENA_OBSERVER 288 if ( !my_observers.empty() )
289 my_observers.clear();
291 void* storage = &mailbox(my_num_slots);
293 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
295 #if TBB_USE_ASSERT > 1 296 memset( storage, 0, allocation_size(my_num_slots) );
302 void arena::dump_arena_statistics () {
303 statistics_counters total;
304 for(
unsigned i = 0; i < my_num_slots; ++i ) {
305 #if __TBB_STATISTICS_EARLY_DUMP 308 *my_slots[i].my_counters += s->my_counters;
313 total += *my_slots[i].my_counters;
314 dump_statistics( *my_slots[i].my_counters, i );
317 dump_statistics( *my_slots[0].my_counters, 0 );
318 #if __TBB_STATISTICS_STDOUT 319 #if !__TBB_STATISTICS_TOTALS_ONLY 320 printf(
"----------------------------------------------\n" );
322 dump_statistics( total, workers_counters_total );
323 total += *my_slots[0].my_counters;
324 dump_statistics( total, arena_counters_total );
325 #if !__TBB_STATISTICS_TOTALS_ONLY 326 printf(
"==============================================\n" );
332 #if __TBB_TASK_PRIORITY 337 inline bool arena::may_have_tasks (
generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
341 if ( s->my_pool_reshuffling_pending ) {
344 tasks_present =
true;
347 if ( s->my_offloaded_tasks ) {
348 tasks_present =
true;
349 if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
361 ++my_abandonment_epoch;
362 __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );
365 orphans =
const_cast<task*
>(my_orphaned_tasks);
366 *s.my_offloaded_task_list_tail_link = orphans;
367 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );
368 s.my_offloaded_tasks = NULL;
370 s.my_offloaded_task_list_tail_link = NULL;
378 if ( !my_task_stream.empty(
p) )
389 if ( has_enqueued_tasks() ) {
390 advertise_new_work<work_enqueued>();
391 #if __TBB_TASK_PRIORITY 395 if ( !my_task_stream.empty(
p) ) {
396 if ( p < my_bottom_priority || p > my_top_priority )
410 case SNAPSHOT_FULL: {
414 if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
422 #if __TBB_TASK_PRIORITY 424 intptr_t top_priority = my_top_priority;
428 for( k=0; k<n; ++k ) {
435 if( my_pool_state!=busy )
439 bool work_absent = k == n;
440 #if __TBB_PREVIEW_CRITICAL_TASKS 441 bool no_critical_tasks = my_critical_task_stream.empty(0);
442 work_absent &= no_critical_tasks;
444 #if __TBB_TASK_PRIORITY 447 bool tasks_present = !work_absent || my_orphaned_tasks;
448 bool dequeuing_possible =
false;
453 uintptr_t abandonment_epoch = my_abandonment_epoch;
459 the_context_state_propagation_mutex.lock();
460 work_absent = !may_have_tasks( my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
461 the_context_state_propagation_mutex.unlock();
474 for( k = 1; work_absent && k < n; ++k ) {
475 if( my_pool_state!=busy )
477 work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
480 work_absent = work_absent
482 && abandonment_epoch == my_abandonment_epoch;
486 if( my_pool_state==busy ) {
487 #if __TBB_TASK_PRIORITY 488 bool no_fifo_tasks = my_task_stream.empty(top_priority);
489 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
490 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
492 bool no_fifo_tasks = my_task_stream.empty(0);
493 work_absent = work_absent && no_fifo_tasks;
496 #if __TBB_TASK_PRIORITY 497 if ( top_priority > my_bottom_priority ) {
498 if (
my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
499 && !my_task_stream.empty(top_priority) )
501 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
504 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
508 int current_demand = (
int)my_max_num_workers;
509 if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
510 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 511 if( my_concurrency_mode==cm_enforced_global ) {
513 my_market->mandatory_concurrency_disable(
this );
521 restore_priority_if_need();
525 #if __TBB_TASK_PRIORITY 530 my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
542 #if __TBB_COUNT_TASK_NODES 543 intptr_t arena::workers_task_node_count() {
545 for(
unsigned i = 1; i < my_num_slots; ++i ) {
548 result += s->my_task_node_count;
556 #if __TBB_RECYCLE_TO_ENQUEUE 567 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
568 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
573 #if __TBB_PREVIEW_CRITICAL_TASKS 582 #if __TBB_TASK_ISOLATION 590 my_critical_task_stream.push( &t, 0, internal::random_lane_selector(random) );
592 advertise_new_work<work_spawned>();
598 #if __TBB_TASK_PRIORITY 599 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
600 assert_priority_valid(p);
601 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 602 my_task_stream.push( &t, p, internal::random_lane_selector(random) );
604 my_task_stream.push( &t, p, random );
606 if ( p != my_top_priority )
607 my_market->update_arena_priority( *
this, p );
609 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
610 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 611 my_task_stream.push( &t, 0, internal::random_lane_selector(random) );
613 my_task_stream.push( &t, 0, random );
616 advertise_new_work<work_enqueued>();
617 #if __TBB_TASK_PRIORITY 618 if ( p != my_top_priority )
619 my_market->update_arena_priority( *
this, p );
626 : my_scheduler(*s), my_orig_ctx(NULL), same_arena(same) {
628 my_orig_state.my_properties = my_scheduler.my_properties;
629 my_orig_state.my_innermost_running_task = my_scheduler.my_innermost_running_task;
630 mimic_outermost_level(a, type);
633 mimic_outermost_level(a, type);
638 #if __TBB_TASK_GROUP_CONTEXT 639 my_scheduler.my_dummy_task->prefix().context = my_orig_ctx;
642 my_scheduler.my_properties = my_orig_state.my_properties;
643 my_scheduler.my_innermost_running_task = my_orig_state.my_innermost_running_task;
645 my_scheduler.nested_arena_exit();
647 #if __TBB_TASK_PRIORITY 648 my_scheduler.my_local_reload_epoch = *my_orig_state.my_ref_reload_epoch;
664 #if __TBB_PREVIEW_CRITICAL_TASKS 667 #if __TBB_TASK_GROUP_CONTEXT 680 #if __TBB_TASK_PRIORITY 681 if ( my_offloaded_tasks )
682 my_arena->orphan_offloaded_tasks( *
this );
683 my_offloaded_tasks = NULL;
693 #if __TBB_ARENA_OBSERVER 694 my_last_local_observer = 0;
695 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
700 #if __TBB_ARENA_OBSERVER 701 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
703 #if __TBB_TASK_PRIORITY 704 if ( my_offloaded_tasks )
705 my_arena->orphan_offloaded_tasks( *
this );
729 namespace interface7 {
734 if( my_max_concurrency < 1 )
736 __TBB_ASSERT( my_master_slots <= (
unsigned)my_max_concurrency,
"Number of slots reserved for master should not exceed arena concurrency");
741 #if __TBB_TASK_GROUP_CONTEXT 745 new_arena->my_default_ctx->capture_fp_settings();
754 #if __TBB_TASK_GROUP_CONTEXT 757 new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
758 as_atomic(my_context) = new_arena->my_default_ctx;
770 #if __TBB_TASK_GROUP_CONTEXT 785 #if __TBB_TASK_GROUP_CONTEXT 786 my_context =
my_arena->my_default_ctx;
787 my_version_and_traits |= my_context->my_version_and_traits & exact_exception_flag;
801 #if __TBB_TASK_GROUP_CONTEXT 804 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
816 struct outermost_context : internal::no_copy {
825 #if __TBB_TASK_GROUP_CONTEXT 826 orig_ctx = t->prefix().context;
827 t->prefix().context = s.
my_arena->my_default_ctx;
833 ~outermost_context() {
834 #if __TBB_TASK_GROUP_CONTEXT 836 t->prefix().context = orig_ctx;
853 : my_delegate(d), my_monitor(s), my_root(t) {}
855 bool operator()(uintptr_t ctx)
const {
return (
void*)ctx == (
void*)&my_delegate; }
869 #if __TBB_USE_OPTIONAL_RTTI 877 internal::delegated_function< graph_funct, void >* deleg_funct =
878 dynamic_cast< internal::delegated_function< graph_funct, void>*
>(&
d);
883 (internal::forward< graph_funct >(deleg_funct->my_func)), 0);
888 #if __TBB_TASK_GROUP_CONTEXT 911 #if TBB_USE_EXCEPTIONS 924 #if TBB_USE_EXCEPTIONS 927 TbbRethrowException(pe);
930 #if __TBB_USE_OPTIONAL_RTTI 938 #if TBB_USE_EXCEPTIONS 944 #if TBB_USE_EXCEPTIONS 948 if (my_version_and_traits & exact_exception_flag)
throw;
1016 #if __TBB_TASK_ISOLATION 1021 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1022 ~isolation_guard() {
1023 guarded = previous_value;
1031 __TBB_ASSERT( s,
"this_task_arena::isolate() needs an initialized scheduler" );
1036 isolation_guard guard( current_isolation );
market * my_market
The market I am in.
bool outermost
Indicates that a scheduler is on outermost level.
scheduler_properties my_properties
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate...
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
static const intptr_t num_priority_levels
static void one_time_init()
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
static int __TBB_EXPORTED_FUNC internal_current_slot()
virtual void local_wait_for_all(task &parent, task *child)=0
T __TBB_load_relaxed(const volatile T &location)
static const int priority_critical
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
static const unsigned ref_external
Reference increment values for externals and workers.
internal::arena * my_arena
NULL if not currently initialized.
task * my_dummy_task
Fake root task created by slave threads.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
A functor that spawns a task.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
void __TBB_EXPORTED_METHOD internal_wait() const
intptr_t reference_count
A reference count.
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
task object is freshly allocated or recycled.
void make_critical(task &t)
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
#define ITT_SYNC_CREATE(obj, type, name)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
task **__TBB_atomic task_pool
void const char const char int ITT_FORMAT __itt_group_sync s
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
task_group_context * my_orig_ctx
#define GATHER_STATISTIC(x)
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
static generic_scheduler * local_scheduler_weak()
void __TBB_EXPORTED_METHOD internal_attach()
internal::tbb_exception_ptr exception_container_type
static int unsigned num_arena_slots(unsigned num_slots)
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t reserved=0)
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
void attach_arena(arena *, size_t index, bool is_master)
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
unsigned num_workers_active()
The number of workers active in the arena.
bool type
Indicates that a scheduler acts as a master or a worker.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
unsigned short affinity_id
An id as used for specifying affinity.
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
generic_scheduler & my_scheduler
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
bool operator()(uintptr_t ctx) const
void __TBB_EXPORTED_METHOD internal_initialize()
atomic< T > & as_atomic(T &t)
void notify_one()
Notify one thread about the event.
market * my_market
The market that owns this arena.
void free_arena()
Completes arena shutdown, destructs and deallocates it.
void detach()
Detach inbox from its outbox.
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
static const size_t out_of_arena
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
bool is_worker() const
True if running on a worker thread, false otherwise.
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting 'thr' into the wait queue
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
binary_semaphore & my_signal
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
task * execute() __TBB_override
Should be overridden by derived classes.
state_type state() const
Current execution state.
void mimic_outermost_level(arena *a, bool type)
#define __TBB_CONTEXT_ARG(arg1, context)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
void nested_arena_entry(arena *, size_t)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
static generic_scheduler * local_scheduler_if_initialized()
concurrent_monitor & my_monitor
void __TBB_store_with_release(volatile T &location, V value)
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
Used to form groups of tasks.
A fast random number generator.
Bit-field representing properties of a sheduler.
bool is_out_of_work()
Check if there is job anywhere in arena.
task * execute() __TBB_override
Should be overridden by derived classes.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
internal::delegate_base & my_delegate
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
task is in ready pool, or is going to be put there, or was just taken off.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
T __TBB_load_with_acquire(const volatile T &location)
#define __TBB_CONTEXT_ARG1(context)
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
#define ITT_NOTIFY(name, obj)
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
bool is_critical(task &t)
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
Base class for types that should not be copied or assigned.
int ref_count() const
The internal reference count.
delegated_task(internal::delegate_base &d, concurrent_monitor &s, task *t)
Set if ref_count might be changed by another thread. Used for debugging.
intptr_t isolation_tag
A tag for task isolation.
Base class for user-defined tasks.
Work stealing task scheduler.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
#define __TBB_ENQUEUE_ENFORCED_CONCURRENCY
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const isolation_tag no_isolation
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
atomic< unsigned > my_references
Reference counter for the arena.
Class representing where mail is put.
scheduler_state my_orig_state
binary_semaphore for concurrent monitor
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
static unsigned default_num_threads()
void const char const char int ITT_FORMAT __itt_group_sync p
Smart holder for the empty task class with automatic destruction.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
bool is_quiescent_local_task_pool_reset() const
unsigned short get()
Get a random number.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void on_thread_leaving()
Notification that worker or master leaves its arena.
wait_task(binary_semaphore &sema)
int my_max_concurrency
Concurrency level for deferred initialization.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
void __TBB_EXPORTED_METHOD internal_terminate()
void attach_mailbox(affinity_id id)
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
unsigned my_num_slots
The number of slots in the arena.