Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
scheduler.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef _TBB_scheduler_H
18 #define _TBB_scheduler_H
19 
20 #include "scheduler_common.h"
21 #include "tbb/spin_mutex.h"
22 #include "mailbox.h"
23 #include "tbb_misc.h" // for FastRandom
24 #include "itt_notify.h"
25 #include "../rml/include/rml_tbb.h"
26 
27 #include "intrusive_list.h"
28 
29 #if __TBB_SURVIVE_THREAD_SWITCH
30 #include "cilk-tbb-interop.h"
31 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
32 
33 namespace tbb {
34 namespace internal {
35 
36 template<typename SchedulerTraits> class custom_scheduler;
37 
38 //------------------------------------------------------------------------
39 // generic_scheduler
40 //------------------------------------------------------------------------
41 
42 #define EmptyTaskPool ((task**)0)
43 #define LockedTaskPool ((task**)~(intptr_t)0)
44 
47  static const bool worker = false;
48  static const bool master = true;
50  bool type : 1;
52 
53  bool outermost : 1;
54 #if __TBB_PREVIEW_CRITICAL_TASKS
55  bool has_taken_critical_task : 1;
57 
59  unsigned char : 5;
60 #else
61  unsigned char : 6;
63 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
64 };
65 
68  size_t my_arena_index; // TODO: make it unsigned and pair with my_affinity_id to fit into cache line
69 
72 
75 
78 
79 
81 
83 
90 
92 
93 #if __TBB_SCHEDULER_OBSERVER
94  observer_proxy* my_last_global_observer;
96 #endif
97 
98 #if __TBB_ARENA_OBSERVER
99  observer_proxy* my_last_local_observer;
101 #endif
102 #if __TBB_TASK_PRIORITY
103 
106  volatile intptr_t *my_ref_top_priority;
107 
109  volatile uintptr_t *my_ref_reload_epoch;
110 #endif /* __TBB_TASK_PRIORITY */
111 };
112 
114 
121  , public ::rml::job
122  , public intrusive_list_node
123  , public scheduler_state {
124 public: // almost every class in TBB uses generic_scheduler
125 
127  static const size_t quick_task_size = 256-task_prefix_reservation_size;
128 
129  static bool is_version_3_task( task& t ) {
130 #if __TBB_PREVIEW_CRITICAL_TASKS
131  return (t.prefix().extra_state & 0x7)>=0x1;
132 #else
133  return (t.prefix().extra_state & 0x0F)>=0x1;
134 #endif
135  }
136 
139 #if __TBB_ipf
140  uintptr_t my_rsb_stealing_threshold;
142 #endif
143 
144  static const size_t null_arena_index = ~size_t(0);
145 
146  inline bool is_task_pool_published () const;
147 
148  inline bool is_local_task_pool_quiescent () const;
149 
150  inline bool is_quiescent_local_task_pool_empty () const;
151 
152  inline bool is_quiescent_local_task_pool_reset () const;
153 
156 
159 
162 
163 #if __TBB_HOARD_NONLOCAL_TASKS
164  task* my_nonlocal_free_list;
166 #endif
167 
170 
172 
174 
175  inline void attach_mailbox( affinity_id id );
176 
177  /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */
178 
181 
182 #if __TBB_COUNT_TASK_NODES
183  intptr_t my_task_node_count;
185 #endif /* __TBB_COUNT_TASK_NODES */
186 
188  void init_stack_info ();
189 
191  bool can_steal () {
192  int anchor;
193  // TODO IDEA: Add performance warning?
194 #if __TBB_ipf
195  return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold;
196 #else
197  return my_stealing_threshold < (uintptr_t)&anchor;
198 #endif
199  }
200 
202 
203  void publish_task_pool();
204 
206 
207  void leave_task_pool();
208 
210 
211  inline void reset_task_pool_and_leave ();
212 
214 
215  task** lock_task_pool( arena_slot* victim_arena_slot ) const;
216 
218 
219  void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const;
220 
222 
224  void acquire_task_pool() const;
225 
227 
229  void release_task_pool() const;
230 
232 
233  task* prepare_for_spawning( task* t );
234 
236  inline void commit_spawned_tasks( size_t new_tail );
237 
239 
240  inline void commit_relocated_tasks( size_t new_tail );
241 
243 
246  task* get_task( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
247 
249 
254 #if __TBB_TASK_ISOLATION
255  task* get_task( size_t T, isolation_tag isolation, bool& tasks_omitted );
256 #else
257  task* get_task( size_t T );
258 #endif /* __TBB_TASK_ISOLATION */
259 
266  task* get_mailbox_task( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
267 
269  static bool is_proxy( const task& t ) {
270  return t.prefix().extra_state==es_task_proxy;
271  }
272 
274  task* steal_task( __TBB_ISOLATION_EXPR(isolation_tag isolation) );
275 
277  task* steal_task_from( __TBB_ISOLATION_ARG( arena_slot& victim_arena_slot, isolation_tag isolation ) );
278 
279 #if __TBB_PREVIEW_CRITICAL_TASKS
280  task* get_critical_task( __TBB_ISOLATION_EXPR(isolation_tag isolation) );
282 
285  bool handled_as_critical( task& t );
286 #endif
287 
290  static const size_t min_task_pool_size = 64;
291 
293 
295  size_t prepare_task_pool( size_t n );
296 
298  static generic_scheduler* create_master( arena* a );
299 
301  bool cleanup_master( bool blocking_terminate );
302 
304  static generic_scheduler* create_worker( market& m, size_t index );
305 
307  static void cleanup_worker( void* arg, bool worker );
308 
309 protected:
310  template<typename SchedulerTraits> friend class custom_scheduler;
312 
313 public:
314 #if TBB_USE_ASSERT > 1
315 
317  void assert_task_pool_valid() const;
318 #else
319  void assert_task_pool_valid() const {}
320 #endif /* TBB_USE_ASSERT <= 1 */
321 
322  void attach_arena( arena*, size_t index, bool is_master );
323  void nested_arena_entry( arena*, size_t );
324  void nested_arena_exit();
325  void wait_until_empty();
326 
327  void spawn( task& first, task*& next ) __TBB_override;
328 
329  void spawn_root_and_wait( task& first, task*& next ) __TBB_override;
330 
331  void enqueue( task&, void* reserved ) __TBB_override;
332 
333  void local_spawn( task* first, task*& next );
334  void local_spawn_root_and_wait( task* first, task*& next );
335  virtual void local_wait_for_all( task& parent, task* child ) = 0;
336 
338  void free_scheduler();
339 
341 
342  task& allocate_task( size_t number_of_bytes,
343  __TBB_CONTEXT_ARG(task* parent, task_group_context* context) );
344 
346 
347  template<free_task_hint h>
348  void free_task( task& t );
349 
351  inline void deallocate_task( task& t );
352 
354  inline bool is_worker() const;
355 
357  inline bool outermost_level() const;
358 
360 
363  inline bool master_outermost_level () const;
364 
366  inline bool worker_outermost_level () const;
367 
369  unsigned max_threads_in_arena();
370 
371 #if __TBB_COUNT_TASK_NODES
372  intptr_t get_task_node_count( bool count_arena_workers = false );
373 #endif /* __TBB_COUNT_TASK_NODES */
374 
376  static task* plugged_return_list() {return (task*)(intptr_t)(-1);}
377 
380 
382  // TODO IDEA: see if putting my_return_list on separate cache line improves performance
384 
386 
387  virtual task* receive_or_steal_task( __TBB_ISOLATION_ARG( __TBB_atomic reference_count& completion_ref_count, isolation_tag isolation ) ) = 0;
388 
390  void free_nonlocal_small_task( task& t );
391 
392 #if __TBB_TASK_GROUP_CONTEXT
393 
399  inline task_group_context* default_context ();
400 
402  char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)];
403 
405  context_list_node_t my_context_list_head;
406 
408  // TODO: check whether it can be deadly preempted and replace by spinning/sleeping mutex
409  spin_mutex my_context_list_mutex;
410 
412 
418  uintptr_t my_context_state_propagation_epoch;
419 
421 
424  tbb::atomic<uintptr_t> my_local_ctx_list_update;
425 
426 #if __TBB_TASK_PRIORITY
427  inline intptr_t effective_reference_priority () const;
429 
430  // TODO: move into slots and fix is_out_of_work
432  task* my_offloaded_tasks;
433 
435  task** my_offloaded_task_list_tail_link;
436 
438  uintptr_t my_local_reload_epoch;
439 
441  volatile bool my_pool_reshuffling_pending;
442 
444 
445  task* reload_tasks( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
446 
447  task* reload_tasks( task*& offloaded_tasks, task**& offloaded_task_list_link, __TBB_ISOLATION_ARG( intptr_t top_priority, isolation_tag isolation ) );
448 
450 
451  task* winnow_task_pool ( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
452 
454 
455  task *get_task_and_activate_task_pool( size_t H0 , __TBB_ISOLATION_ARG( size_t T0, isolation_tag isolation ) );
456 
458  inline void offload_task ( task& t, intptr_t task_priority );
459 #endif /* __TBB_TASK_PRIORITY */
460 
462 
463  void cleanup_local_context_list ();
464 
467  template <typename T>
468  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
469 
470  // check consistency
471  static void assert_context_valid(const task_group_context *tgc) {
473 #if TBB_USE_ASSERT
474  __TBB_ASSERT(tgc, NULL);
475  uintptr_t ctx = tgc->my_version_and_traits;
476  __TBB_ASSERT(is_alive(ctx), "referenced task_group_context was destroyed");
477  static const char *msg = "task_group_context is invalid";
478  __TBB_ASSERT(!(ctx&~(3|(7<<task_group_context::traits_offset))), msg); // the value fits known values of versions and traits
483  __TBB_ASSERT(tgc->my_owner, msg);
484  __TBB_ASSERT(tgc->my_node.my_next && tgc->my_node.my_prev, msg);
485  }
486 #if __TBB_TASK_PRIORITY
487  assert_priority_valid(tgc->my_priority);
488 #endif
489  if(tgc->my_parent)
490 #if TBB_USE_ASSERT > 1
491  assert_context_valid(tgc->my_parent);
492 #else
493  __TBB_ASSERT(is_alive(tgc->my_parent->my_version_and_traits), msg);
494 #endif
495 #endif
496  }
497 #endif /* __TBB_TASK_GROUP_CONTEXT */
498 
499 #if _WIN32||_WIN64
500 private:
502  ::rml::server::execution_resource_t master_exec_resource;
503 public:
504 #endif /* _WIN32||_WIN64 */
505 
506 #if __TBB_TASK_GROUP_CONTEXT
507 
509  tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;
510 #endif /* __TBB_TASK_GROUP_CONTEXT */
511 
512 #if __TBB_SURVIVE_THREAD_SWITCH
513  __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk;
514 #if TBB_USE_ASSERT
515 
517  enum cilk_state_t {
518  cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory.
519  cs_running,
520  cs_limbo,
521  cs_freed
522  };
523  cilk_state_t my_cilk_state;
524 #endif /* TBB_USE_ASSERT */
525 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
526 
527 #if __TBB_STATISTICS
528 
531  mutable statistics_counters my_counters;
532 #endif /* __TBB_STATISTICS */
533 
534 }; // class generic_scheduler
535 
536 
537 } // namespace internal
538 } // namespace tbb
539 
540 #include "arena.h"
541 #include "governor.h"
542 
543 namespace tbb {
544 namespace internal {
545 
547  __TBB_ASSERT(my_arena_slot, 0);
548  return my_arena_slot->task_pool != EmptyTaskPool;
549 }
550 
552  __TBB_ASSERT(my_arena_slot, 0);
553  task** tp = my_arena_slot->task_pool;
554  return tp == EmptyTaskPool || tp == LockedTaskPool;
555 }
556 
558  __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" );
559  return __TBB_load_relaxed(my_arena_slot->head) == __TBB_load_relaxed(my_arena_slot->tail);
560 }
561 
563  __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" );
564  return __TBB_load_relaxed(my_arena_slot->head) == 0 && __TBB_load_relaxed(my_arena_slot->tail) == 0;
565 }
566 
568  return my_properties.outermost;
569 }
570 
572  return !is_worker() && outermost_level();
573 }
574 
576  return is_worker() && outermost_level();
577 }
578 
579 #if __TBB_TASK_GROUP_CONTEXT
580 inline task_group_context* generic_scheduler::default_context () {
581  return my_dummy_task->prefix().context;
582 }
583 #endif /* __TBB_TASK_GROUP_CONTEXT */
584 
586  __TBB_ASSERT(id>0,NULL);
587  my_inbox.attach( my_arena->mailbox(id) );
588  my_affinity_id = id;
589 }
590 
591 inline bool generic_scheduler::is_worker() const {
592  return my_properties.type == scheduler_properties::worker;
593 }
594 
596  __TBB_ASSERT(my_arena, NULL);
597  return my_arena->my_num_slots;
598 }
599 
602 #if TBB_USE_ASSERT
603  task_prefix& p = t.prefix();
604  p.state = 0xFF;
605  p.extra_state = 0xFF;
606  poison_pointer(p.next);
607 #endif /* TBB_USE_ASSERT */
609 #if __TBB_COUNT_TASK_NODES
610  --my_task_node_count;
611 #endif /* __TBB_COUNT_TASK_NODES */
612 }
613 
614 #if __TBB_COUNT_TASK_NODES
615 inline intptr_t generic_scheduler::get_task_node_count( bool count_arena_workers ) {
616  return my_task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0);
617 }
618 #endif /* __TBB_COUNT_TASK_NODES */
619 
621  __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "Task pool must be locked when resetting task pool" );
622  __TBB_store_relaxed( my_arena_slot->tail, 0 );
623  __TBB_store_relaxed( my_arena_slot->head, 0 );
624  leave_task_pool();
625 }
626 
627 //TODO: move to arena_slot
628 inline void generic_scheduler::commit_spawned_tasks( size_t new_tail ) {
629  __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size, "task deque end was overwritten" );
630  // emit "task was released" signal
631  ITT_NOTIFY(sync_releasing, (void*)((uintptr_t)my_arena_slot+sizeof(uintptr_t)));
632  // Release fence is necessary to make sure that previously stored task pointers
633  // are visible to thieves.
634  __TBB_store_with_release( my_arena_slot->tail, new_tail );
635 }
636 
638  __TBB_ASSERT( is_local_task_pool_quiescent(),
639  "Task pool must be locked when calling commit_relocated_tasks()" );
640  __TBB_store_relaxed( my_arena_slot->head, 0 );
641  // Tail is updated last to minimize probability of a thread making arena
642  // snapshot being misguided into thinking that this task pool is empty.
643  __TBB_store_release( my_arena_slot->tail, new_tail );
644  release_task_pool();
645 }
646 
647 template<free_task_hint hint>
649 #if __TBB_HOARD_NONLOCAL_TASKS
650  static const int h = hint&(~local_task);
651 #else
652  static const free_task_hint h = hint;
653 #endif
654  GATHER_STATISTIC(--my_counters.active_tasks);
655  task_prefix& p = t.prefix();
656  // Verify that optimization hints are correct.
657  __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL );
658  __TBB_ASSERT( !(h&small_task) || p.origin, NULL );
659  __TBB_ASSERT( !(h&local_task) || (!p.origin || uintptr_t(p.origin) > uintptr_t(4096)), "local_task means allocated");
660  poison_value(p.depth);
663  __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL );
664  p.state = task::freed;
665  if( h==small_local_task || p.origin==this ) {
666  GATHER_STATISTIC(++my_counters.free_list_length);
667  p.next = my_free_list;
668  my_free_list = &t;
669  } else if( !(h&local_task) && p.origin && uintptr_t(p.origin) < uintptr_t(4096) ) {
670  // a special value reserved for future use, do nothing since
671  // origin is not pointing to a scheduler instance
672  } else if( !(h&local_task) && p.origin ) {
673  GATHER_STATISTIC(++my_counters.free_list_length);
674 #if __TBB_HOARD_NONLOCAL_TASKS
675  if( !(h&no_cache) ) {
676  p.next = my_nonlocal_free_list;
677  my_nonlocal_free_list = &t;
678  } else
679 #endif
680  free_nonlocal_small_task(t);
681  } else {
682  GATHER_STATISTIC(--my_counters.big_tasks);
683  deallocate_task(t);
684  }
685 }
686 
687 #if __TBB_TASK_PRIORITY
688 inline intptr_t generic_scheduler::effective_reference_priority () const {
689  // Workers on the outermost dispatch level (i.e. with empty stack) use market's
690  // priority as a reference point (to speedup discovering process level priority
691  // changes). But when there are enough workers to service (even if only partially)
692  // a lower priority arena, they should use arena's priority as a reference, lest
693  // be trapped in a futile spinning (because market's priority would prohibit
694  // executing ANY tasks in this arena).
695  return !worker_outermost_level() ||
696  (my_arena->my_num_workers_allotted < my_arena->num_workers_active()
697 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
698  && my_arena->my_concurrency_mode!=arena_base::cm_enforced_global
699 #endif
700  ) ? *my_ref_top_priority : my_arena->my_top_priority;
701 }
702 
703 inline void generic_scheduler::offload_task ( task& t, intptr_t /*priority*/ ) {
704  GATHER_STATISTIC( ++my_counters.prio_tasks_offloaded );
705  __TBB_ASSERT( !is_proxy(t), "The proxy task cannot be offloaded" );
706  __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
707 #if TBB_USE_ASSERT
708  t.prefix().state = task::ready;
709 #endif /* TBB_USE_ASSERT */
710  t.prefix().next_offloaded = my_offloaded_tasks;
711  my_offloaded_tasks = &t;
712 }
713 #endif /* __TBB_TASK_PRIORITY */
714 
715 #if __TBB_PREVIEW_CRITICAL_TASKS
716 class critical_task_count_guard : internal::no_copy {
717 public:
718  critical_task_count_guard(scheduler_properties& properties, task& t)
719  : my_properties(properties),
720  my_original_critical_task_state(properties.has_taken_critical_task) {
721  my_properties.has_taken_critical_task |= internal::is_critical(t);
722  }
723  ~critical_task_count_guard() {
724  my_properties.has_taken_critical_task = my_original_critical_task_state;
725  }
726 private:
727  scheduler_properties& my_properties;
728  bool my_original_critical_task_state;
729 };
730 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
731 
732 #if __TBB_FP_CONTEXT || __TBB_TASK_GROUP_CONTEXT
733 
737 template <bool report_tasks>
738 class context_guard_helper {
739 #if __TBB_TASK_GROUP_CONTEXT
740  const task_group_context *curr_ctx;
741 #endif
742 #if __TBB_FP_CONTEXT
743  cpu_ctl_env guard_cpu_ctl_env;
744  cpu_ctl_env curr_cpu_ctl_env;
745 #endif
746 public:
748 #if __TBB_TASK_GROUP_CONTEXT
749  : curr_ctx(NULL)
750 #endif
751  {
752 #if __TBB_FP_CONTEXT
753  guard_cpu_ctl_env.get_env();
754  curr_cpu_ctl_env = guard_cpu_ctl_env;
755 #endif
756  }
758 #if __TBB_FP_CONTEXT
759  if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
760  guard_cpu_ctl_env.set_env();
761 #endif
762 #if __TBB_TASK_GROUP_CONTEXT
763  if (report_tasks && curr_ctx)
764  ITT_TASK_END;
765 #endif
766  }
767  void set_ctx( const task_group_context *ctx ) {
768  generic_scheduler::assert_context_valid(ctx);
769 #if __TBB_FP_CONTEXT
770  const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env);
771 #endif
772 #if __TBB_TASK_GROUP_CONTEXT
773  if(ctx != curr_ctx) {
774 #endif
775 #if __TBB_FP_CONTEXT
776  if ( ctl != curr_cpu_ctl_env ) {
777  curr_cpu_ctl_env = ctl;
778  curr_cpu_ctl_env.set_env();
779  }
780 #endif
781 #if __TBB_TASK_GROUP_CONTEXT
782  // if task group context was active, report end of current execution frame.
783  if (report_tasks) {
784  if (curr_ctx)
785  ITT_TASK_END;
786  // reporting begin of new task group context execution frame.
787  // using address of task group context object to group tasks (parent).
788  // id of task execution frame is NULL and reserved for future use.
789  ITT_TASK_BEGIN(ctx,ctx->my_name,NULL);
790  curr_ctx = ctx;
791  }
792  }
793 #endif
794  }
795  void restore_default() {
796 #if __TBB_FP_CONTEXT
797  if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
798  guard_cpu_ctl_env.set_env();
799  curr_cpu_ctl_env = guard_cpu_ctl_env;
800  }
801 #endif
802  }
803 };
804 #else
805 template <bool T>
808  void restore_default() {}
809 };
810 #endif /* __TBB_FP_CONTEXT */
811 
812 } // namespace internal
813 } // namespace tbb
814 
815 #endif /* _TBB_scheduler_H */
market * my_market
The market I am in.
Definition: scheduler.h:155
bool outermost
Indicates that a scheduler is on outermost level.
Definition: scheduler.h:53
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:389
scheduler_properties my_properties
Definition: scheduler.h:91
Class representing source of mail.
Definition: mailbox.h:185
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:253
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:738
#define __TBB_store_release
Definition: tbb_machine.h:860
#define __TBB_override
Definition: tbb_stddef.h:240
tbb::task * next
"next" field for list of task
Definition: task.h:271
task * my_dummy_task
Fake root task created by slave threads.
Definition: scheduler.h:169
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:946
bool is_quiescent_local_task_pool_empty() const
Definition: scheduler.h:557
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:742
#define __TBB_ISOLATION_EXPR(isolation)
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
Definition: scheduler.h:628
intptr_t reference_count
A reference count.
Definition: task.h:117
task object is freshly allocated or recycled.
Definition: task.h:617
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
Bitwise-OR of local_task and small_task.
void free_task(task &t)
Put task on free list.
Definition: scheduler.h:648
void deallocate_task(task &t)
Return task object to the memory allocator.
Definition: scheduler.h:601
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
Definition: scheduler.h:575
long my_ref_count
Reference count for scheduler.
Definition: scheduler.h:173
#define EmptyTaskPool
Definition: scheduler.h:42
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
Definition: scheduler.h:620
#define GATHER_STATISTIC(x)
static bool is_proxy(const task &t)
True if t is a task_proxy.
Definition: scheduler.h:269
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
Definition: scheduler.h:571
void poison_pointer(T *__TBB_atomic &)
Definition: tbb_stddef.h:305
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
Definition: scheduler.h:376
auto first(Container &c) -> decltype(begin(c))
static bool is_version_3_task(task &t)
Definition: scheduler.h:129
The graph class.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Definition: scheduler.h:567
Data structure to be inherited by the types that can form intrusive lists.
task is running, and will be destroyed after method execute() completes.
Definition: task.h:611
A scheduler with a customized evaluation loop.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
bool type
Indicates that a scheduler acts as a master or a worker.
Definition: scheduler.h:50
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:384
Disable caching for a small task.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:120
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
Task is known to be a small task.
internal::string_index my_name
Decription of algorithm for scheduler based instrumentation.
Definition: task.h:437
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:228
#define ITT_TASK_BEGIN(type, name, id)
Definition: itt_notify.h:125
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:426
T punned_cast(U *ptr)
Cast between unrelated pointer types.
Definition: tbb_stddef.h:314
Task is known to have been allocated by this scheduler.
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
Definition: scheduler.h:89
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:379
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
Definition: scheduler.h:383
bool is_worker() const
True if running on a worker thread, false otherwise.
Definition: scheduler.h:591
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
Definition: scheduler.h:807
Memory prefix to a task object.
Definition: task.h:184
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:410
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
Definition: scheduler.h:158
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:414
state_type state() const
Current execution state.
Definition: task.h:864
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
Definition: scheduler.h:138
#define __TBB_CONTEXT_ARG(arg1, context)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:248
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:266
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
Definition: scheduler.h:74
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:257
Used to form groups of tasks.
Definition: task.h:332
A fast random number generator.
Definition: tbb_misc.h:128
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:420
#define ITT_TASK_END
Definition: itt_notify.h:126
Bit-field representing properties of a sheduler.
Definition: scheduler.h:46
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:615
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
Definition: scheduler.h:180
#define __TBB_CONTEXT_ARG1(context)
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:619
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:116
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:433
bool is_critical(task &t)
Definition: task.h:958
static const kind_type dying
Definition: task.h:566
#define LockedTaskPool
Definition: scheduler.h:43
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:124
Base class for user-defined tasks.
Definition: task.h:589
Work stealing task scheduler.
Definition: scheduler.h:120
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
Definition: scheduler.h:71
bool can_steal()
Returns true if stealing is allowed.
Definition: scheduler.h:191
task * my_free_list
Free list of small tasks that can be reused.
Definition: scheduler.h:161
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
Definition: scheduler.h:595
#define __TBB_ISOLATION_ARG(arg1, isolation)
#define __TBB_atomic
Definition: tbb_stddef.h:237
A lock that occupies a single byte.
Definition: spin_mutex.h:36
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
Definition: scheduler.h:68
bool is_local_task_pool_quiescent() const
Definition: scheduler.h:551
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
Definition: scheduler.h:637
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
Definition: scheduler.h:379
void const char const char int ITT_FORMAT __itt_group_sync p
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
Definition: scheduler.h:77
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
unsigned char
Reserved bits.
Definition: scheduler.h:62
bool is_quiescent_local_task_pool_reset() const
Definition: scheduler.h:562
#define poison_value(g)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:220
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:429
Tag for v3 task_proxy.
void attach_mailbox(affinity_id id)
Definition: scheduler.h:585
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.