17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
20 #include "ompt-specific.h"
32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
36 static void __kmp_init_node(kmp_depnode_t *node) {
37 node->dn.successors = NULL;
40 for (
int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1);
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51 KMP_ATOMIC_INC(&node->dn.nrefs);
55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
57 size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
58 const size_t MAX_GEN = 8;
60 static inline size_t __kmp_dephash_hash(kmp_intptr_t addr,
size_t hsize) {
63 return ((addr >> 6) ^ (addr >> 2)) % hsize;
66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67 kmp_dephash_t *current_dephash) {
70 size_t gen = current_dephash->generation + 1;
72 return current_dephash;
73 size_t new_size = sizes[gen];
75 size_t size_to_allocate =
76 new_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
85 h->nelements = current_dephash->nelements;
86 h->buckets = (kmp_dephash_entry **)(h + 1);
91 for (
size_t i = 0; i < new_size; i++) {
96 for (
size_t i = 0; i < current_dephash->size; i++) {
97 kmp_dephash_entry_t *next, *entry;
98 for (entry = current_dephash->buckets[i]; entry; entry = next) {
99 next = entry->next_in_bucket;
102 size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
103 entry->next_in_bucket = h->buckets[new_bucket];
104 if (entry->next_in_bucket) {
107 h->buckets[new_bucket] = entry;
113 __kmp_fast_free(thread, current_dephash);
115 __kmp_thread_free(thread, current_dephash);
121 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
122 kmp_taskdata_t *current_task) {
127 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
128 h_size = KMP_DEPHASH_MASTER_SIZE;
130 h_size = KMP_DEPHASH_OTHER_SIZE;
132 size_t size = h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
135 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
137 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
144 h->buckets = (kmp_dephash_entry **)(h + 1);
146 for (
size_t i = 0; i < h_size; i++)
152 #define ENTRY_LAST_INS 0
153 #define ENTRY_LAST_MTXS 1
155 static kmp_dephash_entry *
156 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) {
157 kmp_dephash_t *h = *hash;
158 if (h->nelements != 0
159 && h->nconflicts/h->size >= 1) {
160 *hash = __kmp_dephash_extend(thread, h);
163 size_t bucket = __kmp_dephash_hash(addr, h->size);
165 kmp_dephash_entry_t *entry;
166 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
167 if (entry->addr == addr)
173 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
174 thread,
sizeof(kmp_dephash_entry_t));
176 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
177 thread,
sizeof(kmp_dephash_entry_t));
180 entry->last_out = NULL;
181 entry->last_ins = NULL;
182 entry->last_mtxs = NULL;
183 entry->last_flag = ENTRY_LAST_INS;
184 entry->mtx_lock = NULL;
185 entry->next_in_bucket = h->buckets[bucket];
186 h->buckets[bucket] = entry;
188 if (entry->next_in_bucket)
194 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
195 kmp_depnode_list_t *list,
196 kmp_depnode_t *node) {
197 kmp_depnode_list_t *new_head;
200 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
201 thread,
sizeof(kmp_depnode_list_t));
203 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
204 thread,
sizeof(kmp_depnode_list_t));
207 new_head->node = __kmp_node_ref(node);
208 new_head->next = list;
213 static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
215 kmp_task_t *sink_task) {
216 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
217 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
220 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
222 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id,
223 task_source->td_ident->psource, sink->dn.id,
224 task_sink->td_ident->psource);
226 #if OMPT_SUPPORT && OMPT_OPTIONAL
230 if (ompt_enabled.ompt_callback_task_dependence) {
231 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
232 ompt_data_t *sink_data;
234 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
236 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
238 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
239 &(task_source->ompt_task_info.task_data), sink_data);
244 static inline kmp_int32
245 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
246 kmp_task_t *task, kmp_depnode_t *node,
247 kmp_depnode_list_t *plist) {
250 kmp_int32 npredecessors = 0;
252 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
253 kmp_depnode_t *dep = p->node;
255 KMP_ACQUIRE_DEPNODE(gtid, dep);
257 __kmp_track_dependence(gtid, dep, node, task);
258 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
259 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
261 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
262 KMP_TASK_TO_TASKDATA(task)));
265 KMP_RELEASE_DEPNODE(gtid, dep);
268 return npredecessors;
271 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
274 kmp_depnode_t *source,
275 kmp_depnode_t *sink) {
278 kmp_int32 npredecessors = 0;
281 KMP_ACQUIRE_DEPNODE(gtid, sink);
283 __kmp_track_dependence(gtid, sink, source, task);
284 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
285 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
287 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
288 KMP_TASK_TO_TASKDATA(task)));
291 KMP_RELEASE_DEPNODE(gtid, sink);
293 return npredecessors;
296 template <
bool filter>
297 static inline kmp_int32
298 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
299 bool dep_barrier, kmp_int32 ndeps,
300 kmp_depend_info_t *dep_list, kmp_task_t *task) {
301 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependencies : "
302 "dep_barrier = %d\n",
303 filter, gtid, ndeps, dep_barrier));
305 kmp_info_t *thread = __kmp_threads[gtid];
306 kmp_int32 npredecessors = 0;
307 for (kmp_int32 i = 0; i < ndeps; i++) {
308 const kmp_depend_info_t *dep = &dep_list[i];
310 if (filter && dep->base_addr == 0)
313 kmp_dephash_entry_t *info =
314 __kmp_dephash_find(thread, hash, dep->base_addr);
315 kmp_depnode_t *last_out = info->last_out;
316 kmp_depnode_list_t *last_ins = info->last_ins;
317 kmp_depnode_list_t *last_mtxs = info->last_mtxs;
319 if (dep->flags.out) {
320 if (last_ins || last_mtxs) {
321 if (info->last_flag == ENTRY_LAST_INS) {
323 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
326 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
328 __kmp_depnode_list_free(thread, last_ins);
329 __kmp_depnode_list_free(thread, last_mtxs);
330 info->last_ins = NULL;
331 info->last_mtxs = NULL;
334 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
336 __kmp_node_deref(thread, last_out);
341 info->last_out = NULL;
343 info->last_out = __kmp_node_ref(node);
345 }
else if (dep->flags.in) {
349 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
350 __kmp_node_deref(thread, last_out);
351 info->last_out = NULL;
352 if (info->last_flag == ENTRY_LAST_MTXS && last_ins) {
354 __kmp_depnode_list_free(thread, last_ins);
355 info->last_ins = NULL;
360 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
362 info->last_flag = ENTRY_LAST_INS;
363 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
365 KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
369 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
370 __kmp_node_deref(thread, last_out);
371 info->last_out = NULL;
372 if (info->last_flag == ENTRY_LAST_INS && last_mtxs) {
374 __kmp_depnode_list_free(thread, last_mtxs);
375 info->last_mtxs = NULL;
380 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
382 info->last_flag = ENTRY_LAST_MTXS;
383 info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
384 if (info->mtx_lock == NULL) {
385 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(
sizeof(kmp_lock_t));
386 __kmp_init_lock(info->mtx_lock);
388 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
391 for (m = 0; m < MAX_MTX_DEPS; ++m) {
393 if (node->dn.mtx_locks[m] < info->mtx_lock) {
394 KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
395 for (
int n = node->dn.mtx_num_locks; n > m; --n) {
397 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
398 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
400 node->dn.mtx_locks[m] = info->mtx_lock;
404 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS);
405 node->dn.mtx_num_locks++;
408 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
409 gtid, npredecessors));
410 return npredecessors;
413 #define NO_DEP_BARRIER (false)
414 #define DEP_BARRIER (true)
417 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
418 kmp_task_t *task, kmp_dephash_t **hash,
419 bool dep_barrier, kmp_int32 ndeps,
420 kmp_depend_info_t *dep_list,
421 kmp_int32 ndeps_noalias,
422 kmp_depend_info_t *noalias_dep_list) {
425 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
427 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d "
428 "possibly aliased dependencies, %d non-aliased dependencies : "
429 "dep_barrier=%d .\n",
430 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
434 for (i = 0; i < ndeps; i++) {
435 if (dep_list[i].base_addr != 0) {
436 for (
int j = i + 1; j < ndeps; j++) {
437 if (dep_list[i].base_addr == dep_list[j].base_addr) {
438 dep_list[i].flags.in |= dep_list[j].flags.in;
439 dep_list[i].flags.out |=
440 (dep_list[j].flags.out ||
441 (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
442 (dep_list[i].flags.mtx && dep_list[j].flags.in));
443 dep_list[i].flags.mtx =
444 dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
445 !dep_list[i].flags.out;
446 dep_list[j].base_addr = 0;
449 if (dep_list[i].flags.mtx) {
451 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
454 dep_list[i].flags.in = 1;
455 dep_list[i].flags.out = 1;
456 dep_list[i].flags.mtx = 0;
466 node->dn.npredecessors = -1;
472 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
474 npredecessors += __kmp_process_deps<false>(
475 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
477 node->dn.task = task;
487 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
489 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n",
490 gtid, npredecessors, taskdata));
494 return npredecessors > 0 ? true :
false;
514 kmp_task_t *new_task, kmp_int32 ndeps,
515 kmp_depend_info_t *dep_list,
516 kmp_int32 ndeps_noalias,
517 kmp_depend_info_t *noalias_dep_list) {
519 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
520 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
521 loc_ref, new_taskdata));
522 __kmp_assert_valid_gtid(gtid);
523 kmp_info_t *thread = __kmp_threads[gtid];
524 kmp_taskdata_t *current_task = thread->th.th_current_task;
527 if (ompt_enabled.enabled) {
528 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
529 current_task->ompt_task_info.frame.enter_frame.ptr =
530 OMPT_GET_FRAME_ADDRESS(0);
531 if (ompt_enabled.ompt_callback_task_create) {
532 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
533 &(current_task->ompt_task_info.task_data),
534 &(current_task->ompt_task_info.frame),
535 &(new_taskdata->ompt_task_info.task_data),
536 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
537 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
540 new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
545 if (ndeps + ndeps_noalias > 0 &&
546 ompt_enabled.ompt_callback_dependences) {
549 int ompt_ndeps = ndeps + ndeps_noalias;
550 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
551 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_dependence_t));
553 KMP_ASSERT(ompt_deps != NULL);
555 for (i = 0; i < ndeps; i++) {
556 ompt_deps[i].variable.ptr = (
void *)dep_list[i].base_addr;
557 if (dep_list[i].flags.in && dep_list[i].flags.out)
558 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
559 else if (dep_list[i].flags.out)
560 ompt_deps[i].dependence_type = ompt_dependence_type_out;
561 else if (dep_list[i].flags.in)
562 ompt_deps[i].dependence_type = ompt_dependence_type_in;
563 else if (dep_list[i].flags.mtx)
564 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
566 for (i = 0; i < ndeps_noalias; i++) {
567 ompt_deps[ndeps + i].variable.ptr = (
void *)noalias_dep_list[i].base_addr;
568 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
569 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
570 else if (noalias_dep_list[i].flags.out)
571 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
572 else if (noalias_dep_list[i].flags.in)
573 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
574 else if (noalias_dep_list[i].flags.mtx)
575 ompt_deps[ndeps + i].dependence_type =
576 ompt_dependence_type_mutexinoutset;
578 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
579 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
582 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
587 bool serial = current_task->td_flags.team_serial ||
588 current_task->td_flags.tasking_ser ||
589 current_task->td_flags.final;
590 kmp_task_team_t *task_team = thread->th.th_task_team;
592 !(task_team && (task_team->tt.tt_found_proxy_tasks ||
593 task_team->tt.tt_hidden_helper_task_encountered));
595 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
597 if (current_task->td_dephash == NULL)
598 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
601 kmp_depnode_t *node =
602 (kmp_depnode_t *)__kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
604 kmp_depnode_t *node =
605 (kmp_depnode_t *)__kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
608 __kmp_init_node(node);
609 new_taskdata->td_depnode = node;
611 if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash,
612 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
614 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
616 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
617 gtid, loc_ref, new_taskdata));
619 if (ompt_enabled.enabled) {
620 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
623 return TASK_CURRENT_NOT_QUEUED;
626 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
627 "for task (serialized)"
629 gtid, loc_ref, new_taskdata));
632 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
634 "loc=%p task=%p, transferring to __kmp_omp_task\n",
635 gtid, loc_ref, new_taskdata));
637 kmp_int32 ret = __kmp_omp_task(gtid, new_task,
true);
639 if (ompt_enabled.enabled) {
640 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
647 void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
648 ompt_data_t *taskwait_task_data) {
649 if (ompt_enabled.ompt_callback_task_schedule) {
650 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
651 &(current_task->ompt_task_info.task_data), ompt_task_switch,
653 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
654 taskwait_task_data, ompt_task_complete,
655 &(current_task->ompt_task_info.task_data));
657 current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
658 *taskwait_task_data = ompt_data_none;
674 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
675 kmp_depend_info_t *noalias_dep_list) {
676 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
678 if (ndeps == 0 && ndeps_noalias == 0) {
679 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
680 "wait upon : loc=%p\n",
684 __kmp_assert_valid_gtid(gtid);
685 kmp_info_t *thread = __kmp_threads[gtid];
686 kmp_taskdata_t *current_task = thread->th.th_current_task;
694 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
695 KMP_ASSERT(taskwait_task_data->ptr == NULL);
696 if (ompt_enabled.enabled) {
697 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
698 current_task->ompt_task_info.frame.enter_frame.ptr =
699 OMPT_GET_FRAME_ADDRESS(0);
700 if (ompt_enabled.ompt_callback_task_create) {
701 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
702 &(current_task->ompt_task_info.task_data),
703 &(current_task->ompt_task_info.frame), taskwait_task_data,
704 ompt_task_explicit | ompt_task_undeferred | ompt_task_mergeable, 1,
705 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
711 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
714 int ompt_ndeps = ndeps + ndeps_noalias;
715 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
716 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_dependence_t));
718 KMP_ASSERT(ompt_deps != NULL);
720 for (i = 0; i < ndeps; i++) {
721 ompt_deps[i].variable.ptr = (
void *)dep_list[i].base_addr;
722 if (dep_list[i].flags.in && dep_list[i].flags.out)
723 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
724 else if (dep_list[i].flags.out)
725 ompt_deps[i].dependence_type = ompt_dependence_type_out;
726 else if (dep_list[i].flags.in)
727 ompt_deps[i].dependence_type = ompt_dependence_type_in;
728 else if (dep_list[i].flags.mtx)
729 ompt_deps[ndeps + i].dependence_type =
730 ompt_dependence_type_mutexinoutset;
732 for (i = 0; i < ndeps_noalias; i++) {
733 ompt_deps[ndeps + i].variable.ptr = (
void *)noalias_dep_list[i].base_addr;
734 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
735 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
736 else if (noalias_dep_list[i].flags.out)
737 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
738 else if (noalias_dep_list[i].flags.in)
739 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
740 else if (noalias_dep_list[i].flags.mtx)
741 ompt_deps[ndeps + i].dependence_type =
742 ompt_dependence_type_mutexinoutset;
744 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
745 taskwait_task_data, ompt_deps, ompt_ndeps);
748 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
757 bool ignore = current_task->td_flags.team_serial ||
758 current_task->td_flags.tasking_ser ||
759 current_task->td_flags.final;
760 ignore = ignore && thread->th.th_task_team != NULL &&
761 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
762 ignore = ignore || current_task->td_dephash == NULL;
765 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking "
766 "dependencies : loc=%p\n",
769 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
774 kmp_depnode_t node = {0};
775 __kmp_init_node(&node);
777 __kmp_node_ref(&node);
779 if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash,
780 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
782 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking "
783 "dependencies : loc=%p\n",
786 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
791 int thread_finished = FALSE;
792 kmp_flag_32<false, false> flag(
793 (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
794 while (node.dn.npredecessors > 0) {
795 flag.execute_tasks(thread, gtid, FALSE,
796 &thread_finished USE_ITT_BUILD_ARG(NULL),
797 __kmp_task_stealing_constraint);
801 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
803 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)