31 class task_group_context;
35 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) 36 #define __TBB_TASK_BASE_ACCESS public 38 #define __TBB_TASK_BASE_ACCESS private 58 namespace interface5 {
71 static void spawn(
task& t );
105 virtual void spawn_root_and_wait(
task& first,
task*& next ) = 0;
112 virtual void enqueue(
task& t,
void* reserved ) = 0;
122 #if __TBB_TASK_ISOLATION 128 #if __TBB_TASK_GROUP_CONTEXT 163 #if __TBB_PREVIEW_CRITICAL_TASKS 189 friend class internal::scheduler;
190 friend class internal::allocate_root_proxy;
191 friend class internal::allocate_child_proxy;
192 friend class internal::allocate_continuation_proxy;
193 friend class internal::allocate_additional_child_of_proxy;
194 #if __TBB_PREVIEW_CRITICAL_TASKS 199 #if __TBB_TASK_ISOLATION 200 isolation_tag isolation;
203 intptr_t reserved_space_for_task_isolation_tag;
206 #if __TBB_TASK_GROUP_CONTEXT 222 #if __TBB_TASK_PRIORITY 230 #if __TBB_TASK_PRIORITY 280 #if __TBB_TASK_GROUP_CONTEXT 282 #if __TBB_TASK_PRIORITY 285 #if __TBB_PREVIEW_CRITICAL_TASKS 299 #if TBB_USE_CAPTURED_EXCEPTION 334 friend class internal::generic_scheduler;
338 #if TBB_USE_CAPTURED_EXCEPTION 346 version_mask = 0xFFFF,
347 traits_mask = 0xFFFFul << traits_offset
357 exact_exception = 0x0001ul << traits_offset,
359 fp_settings = 0x0002ul << traits_offset,
361 concurrent_wait = 0x0004ul << traits_offset,
362 #if TBB_USE_CAPTURED_EXCEPTION 365 default_traits = exact_exception
371 may_have_children = 1,
399 - 2 *
sizeof(uintptr_t)-
sizeof(
void*) -
sizeof(internal::context_list_node_t)
402 -
sizeof(internal::cpu_ctl_env_space)
431 #if __TBB_TASK_PRIORITY 432 intptr_t my_priority;
442 #if __TBB_TASK_PRIORITY 479 uintptr_t t = default_traits )
480 : my_kind(relation_with_parent)
481 , my_version_and_traits(3 | t)
482 , my_name(internal::CUSTOM_CTX)
490 , my_version_and_traits(3 | default_traits)
543 #if __TBB_TASK_PRIORITY 552 uintptr_t
traits()
const {
return my_version_and_traits & traits_mask; }
561 friend class internal::allocate_root_with_context_proxy;
569 template <
typename T>
573 void bind_to ( internal::generic_scheduler *local_sched );
576 void register_with ( internal::generic_scheduler *local_sched );
599 task() {prefix().extra_state=1;}
606 virtual task* execute() = 0;
622 #if __TBB_RECYCLE_TO_ENQUEUE 634 return internal::allocate_root_proxy();
637 #if __TBB_TASK_GROUP_CONTEXT 638 static internal::allocate_root_with_context_proxy allocate_root(
task_group_context& ctx ) {
640 return internal::allocate_root_with_context_proxy(ctx);
647 return *
reinterpret_cast<internal::allocate_continuation_proxy*
>(
this);
652 return *
reinterpret_cast<internal::allocate_child_proxy*
>(
this);
656 using task_base::allocate_additional_child_of;
658 #if __TBB_DEPRECATED_TASK_INTERFACE 666 using task_base::destroy;
682 __TBB_ASSERT( prefix().state==executing,
"execute not running?" );
683 prefix().state = allocated;
690 __TBB_ASSERT( prefix().state==executing,
"execute not running?" );
691 prefix().state = recycle;
696 internal::task_prefix&
p = prefix();
697 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated,
"execute not running, or already recycled" );
698 __TBB_ASSERT( prefix().ref_count==0,
"no child tasks allowed when recycled as a child" );
703 p.parent = &new_parent;
704 #if __TBB_TASK_GROUP_CONTEXT 712 __TBB_ASSERT( prefix().state==executing,
"execute not running, or already recycled" );
713 __TBB_ASSERT( prefix().ref_count==0,
"no child tasks allowed when recycled for reexecution" );
714 prefix().state = reexecute;
717 #if __TBB_RECYCLE_TO_ENQUEUE 720 void recycle_to_enqueue() {
721 __TBB_ASSERT( prefix().state==executing,
"execute not running, or already recycled" );
722 prefix().state = to_enqueue;
732 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 733 internal_set_ref_count(count);
735 prefix().ref_count =
count;
750 __TBB_ASSERT( k>=0,
"task's reference count underflowed" );
759 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 760 return int(internal_decrement_ref_count());
767 using task_base::spawn;
771 prefix().owner->wait_for_all( *
this, &child );
779 root.
prefix().owner->spawn_root_and_wait( root, root.
prefix().next );
785 static void spawn_root_and_wait(
task_list& root_list );
790 prefix().owner->wait_for_all( *
this, NULL );
794 #if __TBB_TASK_PRIORITY 807 t.
prefix().owner->enqueue( t, NULL );
810 #if __TBB_TASK_PRIORITY 813 #if __TBB_PREVIEW_CRITICAL_TASKS 819 t.
prefix().owner->enqueue( t, (
void*)p );
839 #if __TBB_TASK_GROUP_CONTEXT 840 __TBB_ASSERT(!p || prefix().context == p->
prefix().context,
"The tasks must be in the same context");
845 #if __TBB_TASK_GROUP_CONTEXT 856 return (prefix().extra_state & 0x80)!=0;
870 __TBB_ASSERT( ref_count_==
int(ref_count_),
"integer overflow error");
872 return int(prefix().ref_count);
890 affinity_id
affinity()
const {
return prefix().affinity;}
899 #if __TBB_TASK_GROUP_CONTEXT 918 bool is_cancelled ()
const {
return prefix().context->is_group_execution_cancelled(); }
920 bool is_cancelled ()
const {
return false; }
923 #if __TBB_TASK_PRIORITY 924 void set_group_priority (
priority_t p ) { prefix().context->set_priority(p); }
935 friend class internal::scheduler;
936 friend class internal::allocate_root_proxy;
937 #if __TBB_TASK_GROUP_CONTEXT 938 friend class internal::allocate_root_with_context_proxy;
940 friend class internal::allocate_continuation_proxy;
941 friend class internal::allocate_child_proxy;
942 friend class internal::allocate_additional_child_of_proxy;
947 return reinterpret_cast<internal::task_prefix*
>(
const_cast<task*
>(
this))[-1];
949 #if __TBB_PREVIEW_CRITICAL_TASKS 955 #if __TBB_PREVIEW_CRITICAL_TASKS 974 #if __TBB_ALLOW_MUTABLE_FUNCTORS 985 #if __TBB_CPP11_RVALUE_REF_PRESENT 1013 task.
prefix().next = NULL;
1015 next_ptr = &task.
prefix().next;
1031 __TBB_ASSERT( !empty(),
"attempt to pop item from empty task_list" );
1032 task* result =
first;
1033 first = result->
prefix().next;
1034 if( !first ) next_ptr = &
first;
1051 t->prefix().owner->spawn( *t, *list.
next_ptr );
1058 t->prefix().owner->spawn_root_and_wait( *t, *root_list.
next_ptr );
1073 #if __TBB_TASK_GROUP_CONTEXT 1075 return &
p.allocate(bytes);
1079 p.free( *static_cast<tbb::task*>(task) );
1084 return &
p.allocate(bytes);
1088 p.free( *static_cast<tbb::task*>(task) );
1092 return &
p.allocate(bytes);
1096 p.free( *static_cast<tbb::task*>(task) );
1100 return &
p.allocate(bytes);
1104 p.free( *static_cast<tbb::task*>(task) );
function_task(const F &f)
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
void set_ref_count(int count)
Set reference count.
task * execute() __TBB_override
Should be overridden by derived classes.
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task's parent becomes "t", not this.
allocate_root_with_context_proxy(task_group_context &ctx)
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
void recycle_as_continuation()
Change this to be a continuation of its former self.
void set_parent(task *p)
sets parent task pointer to specified value
static const int priority_critical
task * next_offloaded
Pointer to the next offloaded lower priority task.
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
task_group_context * context()
This method is deprecated and will be removed in the future.
tbb::task * next
"next" field for list of task
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
static const int priority_stride_v4
#define __TBB_TASK_PRIORITY
Class delimiting the scope of task scheduler activity.
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
intptr_t reference_count
A reference count.
internal::affinity_id affinity_id
An id as used for specifying affinity.
task()
Default constructor.
task object is freshly allocated or recycled.
Base class for methods that became static in TBB 3.0.
#define __TBB_EXPORTED_METHOD
void make_critical(task &t)
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
uintptr_t _my_kind_aligner
#define __TBB_FetchAndDecrementWrelease(P)
priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
#define __TBB_EXPORTED_FUNC
allocate_additional_child_of_proxy(task &parent_)
internal::tbb_exception_ptr exception_container_type
auto first(Container &c) -> decltype(begin(c))
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
task is running, and will be destroyed after method execute() completes.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
void set_affinity(affinity_id id)
Set affinity for this task.
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
void push_back(task &task)
Push task onto back of list.
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
unsigned short affinity_id
An id as used for specifying affinity.
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups...
bool is_cancelled() const
Returns true if the context has received cancellation request.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
task_group_context * group()
Pointer to the task group descriptor.
void clear()
Clear the list.
affinity_id affinity() const
Current affinity of this task.
internal::string_index my_name
Decription of algorithm for scheduler based instrumentation.
scheduler * owner
Obsolete. The scheduler that owns the task.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
#define __TBB_FetchAndIncrementWacquire(P)
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
tbb::task & task()
The task corresponding to this task_prefix.
Memory prefix to a task object.
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
task * execute() __TBB_override
Should be overridden by derived classes.
void move(tbb_thread &t1, tbb_thread &t2)
state_type state() const
Current execution state.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
task that does nothing. Useful for synchronization.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
static void __TBB_EXPORTED_FUNC free(task &)
unsigned char state
A task::state_type, stored as a byte for compactness.
Used to form groups of tasks.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
task_group_context(internal::string_index name)
task is in ready pool, or is going to be put there, or was just taken off.
task_list()
Construct empty list.
void __TBB_EXPORTED_METHOD free(task &) const
bool empty() const
True if list is empty; false otherwise.
task object is on free list, or is going to be put there, or was just taken off.
context_list_node_t * my_prev
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
task_group_context & my_context
tbb::task * parent
The task whose reference count includes me.
bool is_critical(task &t)
Base class for types that should not be copied or assigned.
int ref_count() const
The internal reference count.
#define __TBB_TASK_BASE_ACCESS
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
intptr_t isolation_tag
A tag for task isolation.
Base class for user-defined tasks.
void call_itt_notify(notify_type, void *)
Work stealing task scheduler.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
version_traits_word_layout
const isolation_tag no_isolation
Exception container that preserves the exact copy of the original exception.
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads...
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Base class for types that should not be assigned.
void const char const char int ITT_FORMAT __itt_group_sync p
state_type
Enumeration of task states that the scheduler considers.
struct ___itt_caller * __itt_caller
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
~task_list()
Destroys the list, but does not destroy the task objects.
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
version_tag_v3 version_tag
uintptr_t traits() const
Returns the context's trait.
virtual ~task()
Destructor.
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
void increment_ref_count()
Atomically increment reference count.