17 #ifndef _TBB_scheduler_H 18 #define _TBB_scheduler_H 25 #include "../rml/include/rml_tbb.h" 29 #if __TBB_SURVIVE_THREAD_SWITCH 36 template<
typename SchedulerTraits>
class custom_scheduler;
42 #define EmptyTaskPool ((task**)0) 43 #define LockedTaskPool ((task**)~(intptr_t)0) 54 #if __TBB_PREVIEW_CRITICAL_TASKS 55 bool has_taken_critical_task : 1;
93 #if __TBB_SCHEDULER_OBSERVER 94 observer_proxy* my_last_global_observer;
98 #if __TBB_ARENA_OBSERVER 99 observer_proxy* my_last_local_observer;
102 #if __TBB_TASK_PRIORITY 106 volatile intptr_t *my_ref_top_priority;
109 volatile uintptr_t *my_ref_reload_epoch;
130 #if __TBB_PREVIEW_CRITICAL_TASKS 131 return (t.
prefix().extra_state & 0x7)>=0x1;
133 return (t.
prefix().extra_state & 0x0F)>=0x1;
140 uintptr_t my_rsb_stealing_threshold;
163 #if __TBB_HOARD_NONLOCAL_TASKS 164 task* my_nonlocal_free_list;
182 #if __TBB_COUNT_TASK_NODES 183 intptr_t my_task_node_count;
254 #if __TBB_TASK_ISOLATION 279 #if __TBB_PREVIEW_CRITICAL_TASKS 285 bool handled_as_critical(
task& t );
314 #if TBB_USE_ASSERT > 1 347 template<free_task_h
int h>
371 #if __TBB_COUNT_TASK_NODES 372 intptr_t get_task_node_count(
bool count_arena_workers =
false );
392 #if __TBB_TASK_GROUP_CONTEXT 418 uintptr_t my_context_state_propagation_epoch;
426 #if __TBB_TASK_PRIORITY 427 inline intptr_t effective_reference_priority ()
const;
432 task* my_offloaded_tasks;
435 task** my_offloaded_task_list_tail_link;
438 uintptr_t my_local_reload_epoch;
441 volatile bool my_pool_reshuffling_pending;
458 inline void offload_task (
task& t, intptr_t task_priority );
463 void cleanup_local_context_list ();
467 template <
typename T>
476 __TBB_ASSERT(is_alive(ctx),
"referenced task_group_context was destroyed");
477 static const char *msg =
"task_group_context is invalid";
486 #if __TBB_TASK_PRIORITY 490 #if TBB_USE_ASSERT > 1 502 ::rml::server::execution_resource_t master_exec_resource;
506 #if __TBB_TASK_GROUP_CONTEXT 512 #if __TBB_SURVIVE_THREAD_SWITCH 523 cilk_state_t my_cilk_state;
531 mutable statistics_counters my_counters;
579 #if __TBB_TASK_GROUP_CONTEXT 605 p.extra_state = 0xFF;
609 #if __TBB_COUNT_TASK_NODES 610 --my_task_node_count;
614 #if __TBB_COUNT_TASK_NODES 615 inline intptr_t generic_scheduler::get_task_node_count(
bool count_arena_workers ) {
616 return my_task_node_count + (count_arena_workers?
my_arena->workers_task_node_count(): 0);
629 __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size,
"task deque end was overwritten" );
639 "Task pool must be locked when calling commit_relocated_tasks()" );
647 template<free_task_h
int h
int>
649 #if __TBB_HOARD_NONLOCAL_TASKS 669 }
else if( !(
h&
local_task) &&
p.origin && uintptr_t(
p.origin) < uintptr_t(4096) ) {
674 #if __TBB_HOARD_NONLOCAL_TASKS 676 p.next = my_nonlocal_free_list;
677 my_nonlocal_free_list = &t;
687 #if __TBB_TASK_PRIORITY 688 inline intptr_t generic_scheduler::effective_reference_priority ()
const {
697 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 698 &&
my_arena->my_concurrency_mode!=arena_base::cm_enforced_global
700 ) ? *my_ref_top_priority :
my_arena->my_top_priority;
703 inline void generic_scheduler::offload_task (
task& t, intptr_t ) {
706 __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
710 t.
prefix().next_offloaded = my_offloaded_tasks;
711 my_offloaded_tasks = &t;
715 #if __TBB_PREVIEW_CRITICAL_TASKS 716 class critical_task_count_guard : internal::no_copy {
718 critical_task_count_guard(scheduler_properties& properties,
task& t)
719 : my_properties(properties),
720 my_original_critical_task_state(properties.has_taken_critical_task) {
723 ~critical_task_count_guard() {
724 my_properties.has_taken_critical_task = my_original_critical_task_state;
727 scheduler_properties& my_properties;
728 bool my_original_critical_task_state;
732 #if __TBB_FP_CONTEXT || __TBB_TASK_GROUP_CONTEXT 737 template <
bool report_tasks>
738 class context_guard_helper {
739 #if __TBB_TASK_GROUP_CONTEXT 740 const task_group_context *curr_ctx;
743 cpu_ctl_env guard_cpu_ctl_env;
744 cpu_ctl_env curr_cpu_ctl_env;
747 context_guard_helper()
748 #if __TBB_TASK_GROUP_CONTEXT 753 guard_cpu_ctl_env.get_env();
754 curr_cpu_ctl_env = guard_cpu_ctl_env;
757 ~context_guard_helper() {
759 if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
760 guard_cpu_ctl_env.set_env();
762 #if __TBB_TASK_GROUP_CONTEXT 763 if (report_tasks && curr_ctx)
767 void set_ctx(
const task_group_context *ctx ) {
768 generic_scheduler::assert_context_valid(ctx);
770 const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env);
772 #if __TBB_TASK_GROUP_CONTEXT 773 if(ctx != curr_ctx) {
776 if ( ctl != curr_cpu_ctl_env ) {
777 curr_cpu_ctl_env = ctl;
778 curr_cpu_ctl_env.set_env();
781 #if __TBB_TASK_GROUP_CONTEXT 797 if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
798 guard_cpu_ctl_env.set_env();
799 curr_cpu_ctl_env = guard_cpu_ctl_env;
void deallocate_task(task &t)
Return task object to the memory allocator.
__TBB_atomic size_t head
Index of the first ready task in the deque.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
void release_task_pool() const
Unlocks the local task pool.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
generic_scheduler(market &)
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
T __TBB_load_relaxed(const volatile T &location)
#define __TBB_store_release
void leave_task_pool()
Leave the task pool.
virtual void local_wait_for_all(task &parent, task *child)=0
task * my_dummy_task
Fake root task created by slave threads.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
void __TBB_store_relaxed(volatile T &location, V value)
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
unsigned char
Reserved bits.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
void acquire_task_pool() const
Locks the local task pool.
#define __TBB_ISOLATION_EXPR(isolation)
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
intptr_t reference_count
A reference count.
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
Bitwise-OR of local_task and small_task.
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
void nested_arena_entry(arena *, size_t)
void local_spawn(task *first, task *&next)
intptr_t my_priority
Priority level of the task group (in normalized representation)
bool can_steal()
Returns true if stealing is allowed.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
Memory prefix to a task object.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
#define GATHER_STATISTIC(x)
unsigned num_workers_active()
The number of workers active in the arena.
void poison_pointer(T *__TBB_atomic &)
bool is_task_pool_published() const
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
task **__TBB_atomic task_pool
state_type state() const
Current execution state.
auto first(Container &c) -> decltype(begin(c))
scheduler_properties my_properties
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
void attach_mailbox(affinity_id id)
task * my_free_list
Free list of small tasks that can be reused.
Used to form groups of tasks.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
task object is freshly allocated or recycled.
A fast random number generator.
Bit-field representing properties of a sheduler.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
Disable caching for a small task.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
static const size_t null_arena_index
bool is_local_task_pool_quiescent() const
unsigned short affinity_id
An id as used for specifying affinity.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
static const kind_type dying
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
unsigned my_num_slots
The number of slots in the arena.
void publish_task_pool()
Used by workers to enter the task pool.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
Class representing source of mail.
Task is known to be a small task.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
#define ITT_TASK_BEGIN(type, name, id)
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void attach(mail_outbox &putter)
Attach inbox to a corresponding outbox.
task object is on free list, or is going to be put there, or was just taken off.
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
Task is known to have been allocated by this scheduler.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
bool type
Indicates that a scheduler acts as a master or a worker.
A lock that occupies a single byte.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
bool is_quiescent_local_task_pool_reset() const
void spawn(task &first, task *&next) __TBB_override
For internal use only.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
static bool is_version_3_task(task &t)
bool outermost
Indicates that a scheduler is on outermost level.
void free_scheduler()
Destroy and deallocate this scheduler object.
static const size_t min_task_pool_size
#define __TBB_CONTEXT_ARG(arg1, context)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
Base class for user-defined tasks.
void local_spawn_root_and_wait(task *first, task *&next)
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
Work stealing task scheduler.
void __TBB_store_with_release(volatile T &location, V value)
void attach_arena(arena *, size_t index, bool is_master)
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
bool is_quiescent_local_task_pool_empty() const
#define __TBB_CONTEXT_ARG1(context)
long my_ref_count
Reference count for scheduler.
#define ITT_NOTIFY(name, obj)
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
void assert_task_pool_valid() const
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
bool is_critical(task &t)
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption).
Data structure to be inherited by the types that can form intrusive lists.
market * my_market
The market I am in.
intptr_t isolation_tag
A tag for task isolation.
A scheduler with a customized evaluation loop.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
#define __TBB_ISOLATION_ARG(arg1, isolation)
static bool is_proxy(const task &t)
True if t is a task_proxy.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
task is in ready pool, or is going to be put there, or was just taken off.
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
bool is_worker() const
True if running on a worker thread, false otherwise.
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
task is running, and will be destroyed after method execute() completes.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
void free_task(task &t)
Put task on free list.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h