Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_H
18 #define __TBB_task_H
19 
20 #include "tbb_stddef.h"
21 #include "tbb_machine.h"
22 #include "tbb_profiling.h"
23 #include <climits>
24 
25 typedef struct ___itt_caller *__itt_caller;
26 
27 namespace tbb {
28 
29 class task;
30 class task_list;
31 class task_group_context;
32 
33 // MSVC does not allow taking the address of a member that was defined
34 // privately in task_base and made public in class task via a using declaration.
35 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
36 #define __TBB_TASK_BASE_ACCESS public
37 #else
38 #define __TBB_TASK_BASE_ACCESS private
39 #endif
40 
41 namespace internal { //< @cond INTERNAL
42 
45  task* self;
47  public:
48  explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
50  }
51  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
52  void __TBB_EXPORTED_METHOD free( task& ) const;
53  };
54 
55  struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
56 } //< namespace internal @endcond
57 
58 namespace interface5 {
59  namespace internal {
61 
68  friend class tbb::task;
69 
71  static void spawn( task& t );
72 
74  static void spawn( task_list& list );
75 
77 
81  }
82 
84 
88  static void __TBB_EXPORTED_FUNC destroy( task& victim );
89  };
90  } // internal
91 } // interface5
92 
94 namespace internal {
95 
96  class scheduler: no_copy {
97  public:
99  virtual void spawn( task& first, task*& next ) = 0;
100 
102  virtual void wait_for_all( task& parent, task* child ) = 0;
103 
105  virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
106 
108  // Have to have it just to shut up overzealous compilation warnings
109  virtual ~scheduler() = 0;
110 
112  virtual void enqueue( task& t, void* reserved ) = 0;
113  };
114 
116 
117  typedef intptr_t reference_count;
118 
120  typedef unsigned short affinity_id;
121 
122 #if __TBB_TASK_ISOLATION
123  typedef intptr_t isolation_tag;
125  const isolation_tag no_isolation = 0;
126 #endif /* __TBB_TASK_ISOLATION */
127 
128 #if __TBB_TASK_GROUP_CONTEXT
129  class generic_scheduler;
130 
133  *my_next;
134  };
135 
138  public:
140  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
141  void __TBB_EXPORTED_METHOD free( task& ) const;
142  };
143 #endif /* __TBB_TASK_GROUP_CONTEXT */
144 
146  public:
147  static task& __TBB_EXPORTED_FUNC allocate( size_t size );
148  static void __TBB_EXPORTED_FUNC free( task& );
149  };
150 
152  public:
153  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
154  void __TBB_EXPORTED_METHOD free( task& ) const;
155  };
156 
158  public:
159  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
160  void __TBB_EXPORTED_METHOD free( task& ) const;
161  };
162 
163 #if __TBB_PREVIEW_CRITICAL_TASKS
164  // TODO: move to class methods when critical task API becomes public
165  void make_critical( task& t );
166  bool is_critical( task& t );
167 #endif
168 
170 
184  class task_prefix {
185  private:
186  friend class tbb::task;
188  friend class tbb::task_list;
189  friend class internal::scheduler;
190  friend class internal::allocate_root_proxy;
191  friend class internal::allocate_child_proxy;
192  friend class internal::allocate_continuation_proxy;
193  friend class internal::allocate_additional_child_of_proxy;
194 #if __TBB_PREVIEW_CRITICAL_TASKS
195  friend void make_critical( task& );
196  friend bool is_critical( task& );
197 #endif
198 
199 #if __TBB_TASK_ISOLATION
200  isolation_tag isolation;
202 #else
203  intptr_t reserved_space_for_task_isolation_tag;
204 #endif /* __TBB_TASK_ISOLATION */
205 
206 #if __TBB_TASK_GROUP_CONTEXT
207 
212 #endif /* __TBB_TASK_GROUP_CONTEXT */
213 
215 
221 
222 #if __TBB_TASK_PRIORITY
223  union {
224 #endif /* __TBB_TASK_PRIORITY */
225 
229 
230 #if __TBB_TASK_PRIORITY
231 
234  };
235 #endif /* __TBB_TASK_PRIORITY */
236 
238 
242 
244 
248  __TBB_atomic reference_count ref_count;
249 
251 
253  int depth;
254 
256 
257  unsigned char state;
258 
260 
266  unsigned char extra_state;
267 
268  affinity_id affinity;
269 
272 
274  tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
275  };
276 
277 } // namespace internal
279 
280 #if __TBB_TASK_GROUP_CONTEXT
281 
282 #if __TBB_TASK_PRIORITY
283 namespace internal {
284  static const int priority_stride_v4 = INT_MAX / 4;
285 #if __TBB_PREVIEW_CRITICAL_TASKS
286  // TODO: move into priority_t enum when critical tasks become public feature
287  static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
288 #endif
289 }
290 
294  priority_high = priority_normal + internal::priority_stride_v4
295 };
296 
297 #endif /* __TBB_TASK_PRIORITY */
298 
299 #if TBB_USE_CAPTURED_EXCEPTION
300  class tbb_exception;
301 #else
302  namespace internal {
303  class tbb_exception_ptr;
304  }
305 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
306 
307 class task_scheduler_init;
308 namespace interface7 { class task_arena; }
310 
312 
332 class task_group_context : internal::no_copy {
333 private:
334  friend class internal::generic_scheduler;
335  friend class task_scheduler_init;
336  friend class task_arena;
337 
338 #if TBB_USE_CAPTURED_EXCEPTION
340 #else
341  typedef internal::tbb_exception_ptr exception_container_type;
342 #endif
343 
345  traits_offset = 16,
346  version_mask = 0xFFFF,
347  traits_mask = 0xFFFFul << traits_offset
348  };
349 
350 public:
351  enum kind_type {
353  bound
354  };
355 
356  enum traits_type {
357  exact_exception = 0x0001ul << traits_offset,
358 #if __TBB_FP_CONTEXT
359  fp_settings = 0x0002ul << traits_offset,
360 #endif
361  concurrent_wait = 0x0004ul << traits_offset,
362 #if TBB_USE_CAPTURED_EXCEPTION
363  default_traits = 0
364 #else
365  default_traits = exact_exception
366 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
367  };
368 
369 private:
370  enum state {
371  may_have_children = 1,
372  // the following enumerations must be the last, new 2^x values must go above
373  next_state_value, low_unused_state_bit = (next_state_value-1)*2
374  };
375 
376  union {
378  // TODO: describe asynchronous use, and whether any memory semantics are needed
380  uintptr_t _my_kind_aligner;
381  };
382 
385 
387 
389  internal::context_list_node_t my_node;
390 
393 
395 
398  char _leading_padding[internal::NFS_MaxLineSize
399  - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
400  - sizeof(__itt_caller)
401 #if __TBB_FP_CONTEXT
402  - sizeof(internal::cpu_ctl_env_space)
403 #endif
404  ];
405 
406 #if __TBB_FP_CONTEXT
407 
410  internal::cpu_ctl_env_space my_cpu_ctl_env;
411 #endif
412 
415 
417 
421 
423  exception_container_type *my_exception;
424 
426  internal::generic_scheduler *my_owner;
427 
429  uintptr_t my_state;
430 
431 #if __TBB_TASK_PRIORITY
432  intptr_t my_priority;
434 #endif /* __TBB_TASK_PRIORITY */
435 
438 
440 
441  char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
442 #if __TBB_TASK_PRIORITY
443  - sizeof(intptr_t)
444 #endif /* __TBB_TASK_PRIORITY */
445  - sizeof(internal::string_index)
446  ];
447 
448 public:
450 
478  task_group_context ( kind_type relation_with_parent = bound,
479  uintptr_t t = default_traits )
480  : my_kind(relation_with_parent)
481  , my_version_and_traits(3 | t)
482  , my_name(internal::CUSTOM_CTX)
483  {
484  init();
485  }
486 
487  // Custom constructor for instrumentation of tbb algorithm
489  : my_kind(bound)
490  , my_version_and_traits(3 | default_traits)
491  , my_name(name)
492  {
493  init();
494  }
495 
496  // Do not introduce standalone unbind method since it will break state propagation assumptions
498 
500 
507  void __TBB_EXPORTED_METHOD reset ();
508 
510 
517  bool __TBB_EXPORTED_METHOD cancel_group_execution ();
518 
520  bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
521 
523 
529  void __TBB_EXPORTED_METHOD register_pending_exception ();
530 
531 #if __TBB_FP_CONTEXT
532 
540  void __TBB_EXPORTED_METHOD capture_fp_settings ();
541 #endif
542 
543 #if __TBB_TASK_PRIORITY
544  void set_priority ( priority_t );
546 
548  priority_t priority () const;
549 #endif /* __TBB_TASK_PRIORITY */
550 
552  uintptr_t traits() const { return my_version_and_traits & traits_mask; }
553 
554 protected:
556 
557  void __TBB_EXPORTED_METHOD init ();
558 
559 private:
560  friend class task;
561  friend class internal::allocate_root_with_context_proxy;
562 
563  static const kind_type binding_required = bound;
564  static const kind_type binding_completed = kind_type(bound+1);
565  static const kind_type detached = kind_type(binding_completed+1);
566  static const kind_type dying = kind_type(detached+1);
567 
569  template <typename T>
570  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
571 
573  void bind_to ( internal::generic_scheduler *local_sched );
574 
576  void register_with ( internal::generic_scheduler *local_sched );
577 
578 #if __TBB_FP_CONTEXT
579  // TODO: Consider adding #else stub in order to omit #if sections in other code
581  void copy_fp_settings( const task_group_context &src );
582 #endif /* __TBB_FP_CONTEXT */
583 }; // class task_group_context
584 
585 #endif /* __TBB_TASK_GROUP_CONTEXT */
586 
588 
590 
592  void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
593 
595  internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
596 
597 protected:
599  task() {prefix().extra_state=1;}
600 
601 public:
603  virtual ~task() {}
604 
606  virtual task* execute() = 0;
607 
609  enum state_type {
621  recycle
622 #if __TBB_RECYCLE_TO_ENQUEUE
623  ,to_enqueue
625 #endif
626  };
627 
628  //------------------------------------------------------------------------
629  // Allocating tasks
630  //------------------------------------------------------------------------
631 
633  static internal::allocate_root_proxy allocate_root() {
634  return internal::allocate_root_proxy();
635  }
636 
637 #if __TBB_TASK_GROUP_CONTEXT
638  static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
640  return internal::allocate_root_with_context_proxy(ctx);
641  }
642 #endif /* __TBB_TASK_GROUP_CONTEXT */
643 
645 
646  internal::allocate_continuation_proxy& allocate_continuation() {
647  return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
648  }
649 
651  internal::allocate_child_proxy& allocate_child() {
652  return *reinterpret_cast<internal::allocate_child_proxy*>(this);
653  }
654 
656  using task_base::allocate_additional_child_of;
657 
658 #if __TBB_DEPRECATED_TASK_INTERFACE
659 
664  void __TBB_EXPORTED_METHOD destroy( task& t );
665 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
666  using task_base::destroy;
668 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
669 
670  //------------------------------------------------------------------------
671  // Recycling of tasks
672  //------------------------------------------------------------------------
673 
675 
682  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
683  prefix().state = allocated;
684  }
685 
687 
690  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
691  prefix().state = recycle;
692  }
693 
695  void recycle_as_child_of( task& new_parent ) {
696  internal::task_prefix& p = prefix();
697  __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
698  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
699  __TBB_ASSERT( p.parent==NULL, "parent must be null" );
700  __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
701  __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
702  p.state = allocated;
703  p.parent = &new_parent;
704 #if __TBB_TASK_GROUP_CONTEXT
705  p.context = new_parent.prefix().context;
706 #endif /* __TBB_TASK_GROUP_CONTEXT */
707  }
708 
710 
712  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
713  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
714  prefix().state = reexecute;
715  }
716 
717 #if __TBB_RECYCLE_TO_ENQUEUE
718 
720  void recycle_to_enqueue() {
721  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
722  prefix().state = to_enqueue;
723  }
724 #endif /* __TBB_RECYCLE_TO_ENQUEUE */
725 
726  //------------------------------------------------------------------------
727  // Spawning and blocking
728  //------------------------------------------------------------------------
729 
731  void set_ref_count( int count ) {
732 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
733  internal_set_ref_count(count);
734 #else
735  prefix().ref_count = count;
736 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
737  }
738 
740 
742  __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
743  }
744 
746 
747  int add_ref_count( int count ) {
748  internal::call_itt_notify( internal::releasing, &prefix().ref_count );
749  internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
750  __TBB_ASSERT( k>=0, "task's reference count underflowed" );
751  if( k==0 )
752  internal::call_itt_notify( internal::acquired, &prefix().ref_count );
753  return int(k);
754  }
755 
757 
759 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
760  return int(internal_decrement_ref_count());
761 #else
762  return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
763 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
764  }
765 
767  using task_base::spawn;
768 
770  void spawn_and_wait_for_all( task& child ) {
771  prefix().owner->wait_for_all( *this, &child );
772  }
773 
775  void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
776 
778  static void spawn_root_and_wait( task& root ) {
779  root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
780  }
781 
783 
785  static void spawn_root_and_wait( task_list& root_list );
786 
788 
789  void wait_for_all() {
790  prefix().owner->wait_for_all( *this, NULL );
791  }
792 
794 #if __TBB_TASK_PRIORITY
795 
805 #endif /* __TBB_TASK_PRIORITY */
806  static void enqueue( task& t ) {
807  t.prefix().owner->enqueue( t, NULL );
808  }
809 
810 #if __TBB_TASK_PRIORITY
811  static void enqueue( task& t, priority_t p ) {
813 #if __TBB_PREVIEW_CRITICAL_TASKS
815  || p == internal::priority_critical, "Invalid priority level value");
816 #else
817  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
818 #endif
819  t.prefix().owner->enqueue( t, (void*)p );
820  }
821 #endif /* __TBB_TASK_PRIORITY */
822 
825  inline static void enqueue( task& t, task_arena& arena
827  , priority_t p = priority_t(0)
828 #endif
829  );
830 
832  static task& __TBB_EXPORTED_FUNC self();
833 
835  task* parent() const {return prefix().parent;}
836 
838  void set_parent(task* p) {
839 #if __TBB_TASK_GROUP_CONTEXT
840  __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
841 #endif
842  prefix().parent = p;
843  }
844 
845 #if __TBB_TASK_GROUP_CONTEXT
846 
848  task_group_context* context() {return prefix().context;}
849 
851  task_group_context* group () { return prefix().context; }
852 #endif /* __TBB_TASK_GROUP_CONTEXT */
853 
855  bool is_stolen_task() const {
856  return (prefix().extra_state & 0x80)!=0;
857  }
858 
859  //------------------------------------------------------------------------
860  // Debugging
861  //------------------------------------------------------------------------
862 
864  state_type state() const {return state_type(prefix().state);}
865 
867  int ref_count() const {
868 #if TBB_USE_ASSERT
869  internal::reference_count ref_count_ = prefix().ref_count;
870  __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
871 #endif
872  return int(prefix().ref_count);
873  }
874 
876  bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
877 
878  //------------------------------------------------------------------------
879  // Affinity
880  //------------------------------------------------------------------------
881 
883 
885 
887  void set_affinity( affinity_id id ) {prefix().affinity = id;}
888 
890  affinity_id affinity() const {return prefix().affinity;}
891 
893 
897  virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
898 
899 #if __TBB_TASK_GROUP_CONTEXT
900 
911  void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
912 
914 
915  bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
916 
918  bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
919 #else
920  bool is_cancelled () const { return false; }
921 #endif /* __TBB_TASK_GROUP_CONTEXT */
922 
923 #if __TBB_TASK_PRIORITY
924  void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
926 
928  priority_t group_priority () const { return prefix().context->priority(); }
929 
930 #endif /* __TBB_TASK_PRIORITY */
931 
932 private:
934  friend class task_list;
935  friend class internal::scheduler;
936  friend class internal::allocate_root_proxy;
937 #if __TBB_TASK_GROUP_CONTEXT
938  friend class internal::allocate_root_with_context_proxy;
939 #endif /* __TBB_TASK_GROUP_CONTEXT */
940  friend class internal::allocate_continuation_proxy;
941  friend class internal::allocate_child_proxy;
942  friend class internal::allocate_additional_child_of_proxy;
943 
945 
946  internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
947  return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
948  }
949 #if __TBB_PREVIEW_CRITICAL_TASKS
950  friend void internal::make_critical( task& );
951  friend bool internal::is_critical( task& );
952 #endif
953 }; // class task
954 
955 #if __TBB_PREVIEW_CRITICAL_TASKS
956 namespace internal {
957 inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
958 inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
959 } // namespace internal
960 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
961 
963 
964 class empty_task: public task {
966  return NULL;
967  }
968 };
969 
971 namespace internal {
972  template<typename F>
973  class function_task : public task {
974 #if __TBB_ALLOW_MUTABLE_FUNCTORS
975  F my_func;
976 #else
977  const F my_func;
978 #endif
980  my_func();
981  return NULL;
982  }
983  public:
984  function_task( const F& f ) : my_func(f) {}
985 #if __TBB_CPP11_RVALUE_REF_PRESENT
986  function_task( F&& f ) : my_func( std::move(f) ) {}
987 #endif
988  };
989 } // namespace internal
991 
993 
995 class task_list: internal::no_copy {
996 private:
999  friend class task;
1001 public:
1003  task_list() : first(NULL), next_ptr(&first) {}
1004 
1007 
1009  bool empty() const {return !first;}
1010 
1012  void push_back( task& task ) {
1013  task.prefix().next = NULL;
1014  *next_ptr = &task;
1015  next_ptr = &task.prefix().next;
1016  }
1017 #if __TBB_TODO
1018  // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1020  void push_front( task& task ) {
1021  if( empty() ) {
1022  push_back(task);
1023  } else {
1024  task.prefix().next = first;
1025  first = &task;
1026  }
1027  }
1028 #endif
1029  task& pop_front() {
1031  __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1032  task* result = first;
1033  first = result->prefix().next;
1034  if( !first ) next_ptr = &first;
1035  return *result;
1036  }
1037 
1039  void clear() {
1040  first=NULL;
1041  next_ptr=&first;
1042  }
1043 };
1044 
1046  t.prefix().owner->spawn( t, t.prefix().next );
1047 }
1048 
1050  if( task* t = list.first ) {
1051  t->prefix().owner->spawn( *t, *list.next_ptr );
1052  list.clear();
1053  }
1054 }
1055 
1056 inline void task::spawn_root_and_wait( task_list& root_list ) {
1057  if( task* t = root_list.first ) {
1058  t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1059  root_list.clear();
1060  }
1061 }
1062 
1063 } // namespace tbb
1064 
1065 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1067 }
1068 
1069 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1070  tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1071 }
1072 
1073 #if __TBB_TASK_GROUP_CONTEXT
1074 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1075  return &p.allocate(bytes);
1076 }
1077 
1078 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1079  p.free( *static_cast<tbb::task*>(task) );
1080 }
1081 #endif /* __TBB_TASK_GROUP_CONTEXT */
1082 
1083 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1084  return &p.allocate(bytes);
1085 }
1086 
1087 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1088  p.free( *static_cast<tbb::task*>(task) );
1089 }
1090 
1091 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1092  return &p.allocate(bytes);
1093 }
1094 
1095 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1096  p.free( *static_cast<tbb::task*>(task) );
1097 }
1098 
1099 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1100  return &p.allocate(bytes);
1101 }
1102 
1103 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1104  p.free( *static_cast<tbb::task*>(task) );
1105 }
1106 
1107 #endif /* __TBB_task_H */
function_task(const F &f)
Definition: task.h:984
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:389
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:35
void set_ref_count(int count)
Set reference count.
Definition: task.h:731
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:979
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task&#39;s parent becomes "t", not this.
Definition: task.h:79
allocate_root_with_context_proxy(task_group_context &ctx)
Definition: task.h:139
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:253
affinity_id affinity
Definition: task.h:268
void recycle_as_continuation()
Change this to be a continuation of its former self.
Definition: task.h:681
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:838
static const int priority_critical
Definition: task.h:287
task * next_offloaded
Pointer to the next offloaded lower priority task.
Definition: task.h:233
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:646
#define __TBB_override
Definition: tbb_stddef.h:240
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:848
tbb::task * next
"next" field for list of task
Definition: task.h:271
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
Definition: task.h:1045
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:946
static const int priority_stride_v4
Definition: task.h:284
#define __TBB_TASK_PRIORITY
Definition: tbb_config.h:575
Class delimiting the scope of task scheduler activity.
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
Definition: task.h:747
intptr_t reference_count
A reference count.
Definition: task.h:117
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:884
task()
Default constructor.
Definition: task.h:599
task object is freshly allocated or recycled.
Definition: task.h:617
Base class for methods that became static in TBB 3.0.
Definition: task.h:66
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
void make_critical(task &t)
Definition: task.h:957
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:633
uintptr_t _my_kind_aligner
Definition: task.h:380
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:314
priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
Definition: task.h:928
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
#define __TBB_EXPORTED_FUNC
internal::tbb_exception_ptr exception_container_type
Definition: task.h:341
auto first(Container &c) -> decltype(begin(c))
The graph class.
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
task is running, and will be destroyed after method execute() completes.
Definition: task.h:611
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:423
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:887
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:758
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:384
void push_back(task &task)
Push task onto back of list.
Definition: task.h:1012
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:651
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:120
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
Definition: task.h:45
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups...
Definition: task.h:915
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:918
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
Definition: task.h:711
task_group_context * group()
Pointer to the task group descriptor.
Definition: task.h:851
void clear()
Clear the list.
Definition: task.h:1039
affinity_id affinity() const
Current affinity of this task.
Definition: task.h:890
internal::string_index my_name
Decription of algorithm for scheduler based instrumentation.
Definition: task.h:437
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:228
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:426
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
Definition: task.h:689
#define __TBB_FetchAndIncrementWacquire(P)
Definition: tbb_machine.h:313
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:778
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:379
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:855
tbb::task & task()
The task corresponding to this task_prefix.
Definition: task.h:274
Memory prefix to a task object.
Definition: task.h:184
priority_t
Definition: task.h:291
task ** next_ptr
Definition: task.h:998
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:410
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:414
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:965
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
state_type state() const
Current execution state.
Definition: task.h:864
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:248
task that does nothing. Useful for synchronization.
Definition: task.h:964
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:266
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:47
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:257
Used to form groups of tasks.
Definition: task.h:332
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:420
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:806
task_group_context(internal::string_index name)
Definition: task.h:488
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:615
STL namespace.
task_list()
Construct empty list.
Definition: task.h:1003
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:134
bool empty() const
True if list is empty; false otherwise.
Definition: task.h:1009
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:619
context_list_node_t * my_prev
Definition: task.h:132
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
tbb::task * parent
The task whose reference count includes me.
Definition: task.h:241
bool is_critical(task &t)
Definition: task.h:958
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:331
int ref_count() const
The internal reference count.
Definition: task.h:867
#define __TBB_TASK_BASE_ACCESS
Definition: task.h:38
task * first
Definition: task.h:997
A list of children.
Definition: task.h:995
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:770
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:124
Base class for user-defined tasks.
Definition: task.h:589
void call_itt_notify(notify_type, void *)
Work stealing task scheduler.
Definition: scheduler.h:120
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:835
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:789
const isolation_tag no_isolation
Definition: task.h:125
Exception container that preserves the exact copy of the original exception.
#define __TBB_atomic
Definition: tbb_stddef.h:237
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads...
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Base class for types that should not be assigned.
Definition: tbb_stddef.h:320
void const char const char int ITT_FORMAT __itt_group_sync p
state_type
Enumeration of task states that the scheduler considers.
Definition: task.h:609
struct ___itt_caller * __itt_caller
Definition: task.h:25
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:478
~task_list()
Destroys the list, but does not destroy the task objects.
Definition: task.h:1006
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:128
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
task to be rescheduled.
Definition: task.h:613
version_tag_v3 version_tag
Definition: tbb_stddef.h:386
uintptr_t traits() const
Returns the context&#39;s trait.
Definition: task.h:552
virtual ~task()
Destructor.
Definition: task.h:603
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
Definition: task.h:695
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:220
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:211
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:392
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:429
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:741

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.