20 #include "kmp_error.h"
23 #include "kmp_stats.h"
27 #include "ompt-specific.h"
33 char const *traits_t<int>::spec =
"d";
34 char const *traits_t<unsigned int>::spec =
"u";
35 char const *traits_t<long long>::spec =
"lld";
36 char const *traits_t<unsigned long long>::spec =
"llu";
37 char const *traits_t<long>::spec =
"ld";
42 #define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61 #define KMP_STATS_LOOP_END(stat)
65 static inline void check_loc(
ident_t *&loc) {
71 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77 #
if OMPT_SUPPORT && OMPT_OPTIONAL
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
86 typedef typename traits_t<T>::unsigned_t UT;
87 typedef typename traits_t<T>::signed_t ST;
89 kmp_int32 gtid = global_tid;
94 __kmp_assert_valid_gtid(gtid);
95 kmp_info_t *th = __kmp_threads[gtid];
97 #if OMPT_SUPPORT && OMPT_OPTIONAL
98 ompt_team_info_t *team_info = NULL;
99 ompt_task_info_t *task_info = NULL;
100 ompt_work_t ompt_work_type = ompt_work_loop;
102 static kmp_int8 warn = 0;
104 if (ompt_enabled.ompt_callback_work) {
106 team_info = __ompt_get_teaminfo(0, NULL);
107 task_info = __ompt_get_task_info_object(0);
111 ompt_work_type = ompt_work_loop;
113 ompt_work_type = ompt_work_sections;
115 ompt_work_type = ompt_work_distribute;
118 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
120 KMP_WARNING(OmptOutdatedWorkshare);
122 KMP_DEBUG_ASSERT(ompt_work_type);
127 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
128 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
133 buff = __kmp_str_format(
134 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
135 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
136 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
137 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
138 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
139 *pstride, incr, chunk));
140 __kmp_str_free(&buff);
144 if (__kmp_env_consistency_check) {
145 __kmp_push_workshare(global_tid, ct_pdo, loc);
147 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
152 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
153 if (plastiter != NULL)
165 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
166 "lower=%%%s upper=%%%s stride = %%%s "
167 "signed?<%s>, loc = %%s\n",
168 traits_t<T>::spec, traits_t<T>::spec,
169 traits_t<ST>::spec, traits_t<T>::spec);
171 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
172 __kmp_str_free(&buff);
175 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
177 #if OMPT_SUPPORT && OMPT_OPTIONAL
178 if (ompt_enabled.ompt_callback_work) {
179 ompt_callbacks.ompt_callback(ompt_callback_work)(
180 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
181 &(task_info->task_data), 0, codeptr);
184 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
196 tid = th->th.th_team->t.t_master_tid;
197 team = th->th.th_team->t.t_parent;
199 tid = __kmp_tid_from_gtid(global_tid);
200 team = th->th.th_team;
204 if (team->t.t_serialized) {
206 if (plastiter != NULL)
210 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
216 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
217 "lower=%%%s upper=%%%s stride = %%%s\n",
218 traits_t<T>::spec, traits_t<T>::spec,
220 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
221 __kmp_str_free(&buff);
224 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
226 #if OMPT_SUPPORT && OMPT_OPTIONAL
227 if (ompt_enabled.ompt_callback_work) {
228 ompt_callbacks.ompt_callback(ompt_callback_work)(
229 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
230 &(task_info->task_data), *pstride, codeptr);
233 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
236 nth = team->t.t_nproc;
238 if (plastiter != NULL)
241 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
246 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
247 "lower=%%%s upper=%%%s stride = %%%s\n",
248 traits_t<T>::spec, traits_t<T>::spec,
250 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
251 __kmp_str_free(&buff);
254 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
256 #if OMPT_SUPPORT && OMPT_OPTIONAL
257 if (ompt_enabled.ompt_callback_work) {
258 ompt_callbacks.ompt_callback(ompt_callback_work)(
259 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
260 &(task_info->task_data), *pstride, codeptr);
263 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
269 trip_count = *pupper - *plower + 1;
270 }
else if (incr == -1) {
271 trip_count = *plower - *pupper + 1;
272 }
else if (incr > 0) {
274 trip_count = (UT)(*pupper - *plower) / incr + 1;
276 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
279 #if KMP_STATS_ENABLED
280 if (KMP_MASTER_GTID(gtid)) {
285 if (__kmp_env_consistency_check) {
287 if (trip_count == 0 && *pupper != *plower) {
288 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
296 if (trip_count < nth) {
298 __kmp_static == kmp_sch_static_greedy ||
300 kmp_sch_static_balanced);
301 if (tid < trip_count) {
302 *pupper = *plower = *plower + tid * incr;
304 *plower = *pupper + incr;
306 if (plastiter != NULL)
307 *plastiter = (tid == trip_count - 1);
309 if (__kmp_static == kmp_sch_static_balanced) {
310 UT small_chunk = trip_count / nth;
311 UT extras = trip_count % nth;
312 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
313 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
314 if (plastiter != NULL)
315 *plastiter = (tid == nth - 1);
317 T big_chunk_inc_count =
318 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
319 T old_upper = *pupper;
321 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
324 *plower += tid * big_chunk_inc_count;
325 *pupper = *plower + big_chunk_inc_count - incr;
327 if (*pupper < *plower)
328 *pupper = traits_t<T>::max_value;
329 if (plastiter != NULL)
330 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
331 if (*pupper > old_upper)
334 if (*pupper > *plower)
335 *pupper = traits_t<T>::min_value;
336 if (plastiter != NULL)
337 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
338 if (*pupper < old_upper)
343 *pstride = trip_count;
346 case kmp_sch_static_chunked: {
352 *pstride = span * nth;
353 *plower = *plower + (span * tid);
354 *pupper = *plower + span - incr;
355 if (plastiter != NULL)
356 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
359 case kmp_sch_static_balanced_chunked: {
360 T old_upper = *pupper;
362 UT span = (trip_count + nth - 1) / nth;
365 chunk = (span + chunk - 1) & ~(chunk - 1);
368 *plower = *plower + (span * tid);
369 *pupper = *plower + span - incr;
371 if (*pupper > old_upper)
373 }
else if (*pupper < old_upper)
376 if (plastiter != NULL)
377 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
381 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
387 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
388 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
389 team->t.t_active_level == 1) {
390 kmp_uint64 cur_chunk = chunk;
395 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
398 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
405 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
406 "upper=%%%s stride = %%%s signed?<%s>\n",
407 traits_t<T>::spec, traits_t<T>::spec,
408 traits_t<ST>::spec, traits_t<T>::spec);
409 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
410 __kmp_str_free(&buff);
413 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
415 #if OMPT_SUPPORT && OMPT_OPTIONAL
416 if (ompt_enabled.ompt_callback_work) {
417 ompt_callbacks.ompt_callback(ompt_callback_work)(
418 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
419 &(task_info->task_data), trip_count, codeptr);
423 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
427 template <
typename T>
428 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
429 kmp_int32 schedule, kmp_int32 *plastiter,
430 T *plower, T *pupper, T *pupperDist,
431 typename traits_t<T>::signed_t *pstride,
432 typename traits_t<T>::signed_t incr,
433 typename traits_t<T>::signed_t chunk) {
435 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
436 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
437 typedef typename traits_t<T>::unsigned_t UT;
438 typedef typename traits_t<T>::signed_t ST;
447 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
448 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
449 __kmp_assert_valid_gtid(gtid);
454 buff = __kmp_str_format(
455 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
456 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
457 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
458 traits_t<ST>::spec, traits_t<T>::spec);
460 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
461 __kmp_str_free(&buff);
465 if (__kmp_env_consistency_check) {
466 __kmp_push_workshare(gtid, ct_pdo, loc);
468 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
471 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
481 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
484 tid = __kmp_tid_from_gtid(gtid);
485 th = __kmp_threads[gtid];
486 nth = th->th.th_team_nproc;
487 team = th->th.th_team;
488 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
489 nteams = th->th.th_teams_size.nteams;
490 team_id = team->t.t_master_tid;
491 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
495 trip_count = *pupper - *plower + 1;
496 }
else if (incr == -1) {
497 trip_count = *plower - *pupper + 1;
498 }
else if (incr > 0) {
500 trip_count = (UT)(*pupper - *plower) / incr + 1;
502 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
505 *pstride = *pupper - *plower;
506 if (trip_count <= nteams) {
508 __kmp_static == kmp_sch_static_greedy ||
510 kmp_sch_static_balanced);
513 if (team_id < trip_count && tid == 0) {
514 *pupper = *pupperDist = *plower = *plower + team_id * incr;
516 *pupperDist = *pupper;
517 *plower = *pupper + incr;
519 if (plastiter != NULL)
520 *plastiter = (tid == 0 && team_id == trip_count - 1);
523 if (__kmp_static == kmp_sch_static_balanced) {
524 UT chunkD = trip_count / nteams;
525 UT extras = trip_count % nteams;
527 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
528 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
529 if (plastiter != NULL)
530 *plastiter = (team_id == nteams - 1);
533 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
535 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
537 *plower += team_id * chunk_inc_count;
538 *pupperDist = *plower + chunk_inc_count - incr;
541 if (*pupperDist < *plower)
542 *pupperDist = traits_t<T>::max_value;
543 if (plastiter != NULL)
544 *plastiter = *plower <= upper && *pupperDist > upper - incr;
545 if (*pupperDist > upper)
547 if (*plower > *pupperDist) {
548 *pupper = *pupperDist;
552 if (*pupperDist > *plower)
553 *pupperDist = traits_t<T>::min_value;
554 if (plastiter != NULL)
555 *plastiter = *plower >= upper && *pupperDist < upper - incr;
556 if (*pupperDist < upper)
558 if (*plower < *pupperDist) {
559 *pupper = *pupperDist;
567 trip_count = *pupperDist - *plower + 1;
568 }
else if (incr == -1) {
569 trip_count = *plower - *pupperDist + 1;
570 }
else if (incr > 1) {
572 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
574 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
576 KMP_DEBUG_ASSERT(trip_count);
579 if (trip_count <= nth) {
581 __kmp_static == kmp_sch_static_greedy ||
583 kmp_sch_static_balanced);
584 if (tid < trip_count)
585 *pupper = *plower = *plower + tid * incr;
587 *plower = *pupper + incr;
588 if (plastiter != NULL)
589 if (*plastiter != 0 && !(tid == trip_count - 1))
592 if (__kmp_static == kmp_sch_static_balanced) {
593 UT chunkL = trip_count / nth;
594 UT extras = trip_count % nth;
595 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
596 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
597 if (plastiter != NULL)
598 if (*plastiter != 0 && !(tid == nth - 1))
602 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
603 T upper = *pupperDist;
604 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
606 *plower += tid * chunk_inc_count;
607 *pupper = *plower + chunk_inc_count - incr;
609 if (*pupper < *plower)
610 *pupper = traits_t<T>::max_value;
611 if (plastiter != NULL)
612 if (*plastiter != 0 &&
613 !(*plower <= upper && *pupper > upper - incr))
618 if (*pupper > *plower)
619 *pupper = traits_t<T>::min_value;
620 if (plastiter != NULL)
621 if (*plastiter != 0 &&
622 !(*plower >= upper && *pupper < upper - incr))
631 case kmp_sch_static_chunked: {
636 *pstride = span * nth;
637 *plower = *plower + (span * tid);
638 *pupper = *plower + span - incr;
639 if (plastiter != NULL)
640 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
646 "__kmpc_dist_for_static_init: unknown loop scheduling type");
655 buff = __kmp_str_format(
656 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
657 "stride=%%%s signed?<%s>\n",
658 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
659 traits_t<ST>::spec, traits_t<T>::spec);
660 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
661 __kmp_str_free(&buff);
664 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
665 KMP_STATS_LOOP_END(OMP_distribute_iterations);
669 template <
typename T>
670 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
671 kmp_int32 *p_last, T *p_lb, T *p_ub,
672 typename traits_t<T>::signed_t *p_st,
673 typename traits_t<T>::signed_t incr,
674 typename traits_t<T>::signed_t chunk) {
680 typedef typename traits_t<T>::unsigned_t UT;
681 typedef typename traits_t<T>::signed_t ST;
691 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
692 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
693 __kmp_assert_valid_gtid(gtid);
698 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
699 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
700 traits_t<T>::spec, traits_t<T>::spec,
701 traits_t<ST>::spec, traits_t<ST>::spec,
703 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
704 __kmp_str_free(&buff);
710 if (__kmp_env_consistency_check) {
712 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
715 if (incr > 0 ? (upper < lower) : (lower < upper)) {
725 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
728 th = __kmp_threads[gtid];
729 team = th->th.th_team;
730 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
731 nteams = th->th.th_teams_size.nteams;
732 team_id = team->t.t_master_tid;
733 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
737 trip_count = upper - lower + 1;
738 }
else if (incr == -1) {
739 trip_count = lower - upper + 1;
740 }
else if (incr > 0) {
742 trip_count = (UT)(upper - lower) / incr + 1;
744 trip_count = (UT)(lower - upper) / (-incr) + 1;
749 *p_st = span * nteams;
750 *p_lb = lower + (span * team_id);
751 *p_ub = *p_lb + span - incr;
753 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
757 *p_ub = traits_t<T>::max_value;
762 *p_ub = traits_t<T>::min_value;
771 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
772 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
773 traits_t<T>::spec, traits_t<T>::spec,
774 traits_t<ST>::spec, traits_t<ST>::spec);
775 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
776 __kmp_str_free(&buff);
805 kmp_int32 *plastiter, kmp_int32 *plower,
806 kmp_int32 *pupper, kmp_int32 *pstride,
807 kmp_int32 incr, kmp_int32 chunk) {
808 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
809 pupper, pstride, incr, chunk
810 #
if OMPT_SUPPORT && OMPT_OPTIONAL
812 OMPT_GET_RETURN_ADDRESS(0)
821 kmp_int32 schedtype, kmp_int32 *plastiter,
822 kmp_uint32 *plower, kmp_uint32 *pupper,
823 kmp_int32 *pstride, kmp_int32 incr,
825 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
826 pupper, pstride, incr, chunk
827 #
if OMPT_SUPPORT && OMPT_OPTIONAL
829 OMPT_GET_RETURN_ADDRESS(0)
838 kmp_int32 *plastiter, kmp_int64 *plower,
839 kmp_int64 *pupper, kmp_int64 *pstride,
840 kmp_int64 incr, kmp_int64 chunk) {
841 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
842 pupper, pstride, incr, chunk
843 #
if OMPT_SUPPORT && OMPT_OPTIONAL
845 OMPT_GET_RETURN_ADDRESS(0)
854 kmp_int32 schedtype, kmp_int32 *plastiter,
855 kmp_uint64 *plower, kmp_uint64 *pupper,
856 kmp_int64 *pstride, kmp_int64 incr,
858 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
859 pupper, pstride, incr, chunk
860 #
if OMPT_SUPPORT && OMPT_OPTIONAL
862 OMPT_GET_RETURN_ADDRESS(0)
893 kmp_int32 schedule, kmp_int32 *plastiter,
894 kmp_int32 *plower, kmp_int32 *pupper,
895 kmp_int32 *pupperD, kmp_int32 *pstride,
896 kmp_int32 incr, kmp_int32 chunk) {
897 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
898 pupper, pupperD, pstride, incr, chunk);
905 kmp_int32 schedule, kmp_int32 *plastiter,
906 kmp_uint32 *plower, kmp_uint32 *pupper,
907 kmp_uint32 *pupperD, kmp_int32 *pstride,
908 kmp_int32 incr, kmp_int32 chunk) {
909 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
910 pupper, pupperD, pstride, incr, chunk);
917 kmp_int32 schedule, kmp_int32 *plastiter,
918 kmp_int64 *plower, kmp_int64 *pupper,
919 kmp_int64 *pupperD, kmp_int64 *pstride,
920 kmp_int64 incr, kmp_int64 chunk) {
921 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
922 pupper, pupperD, pstride, incr, chunk);
929 kmp_int32 schedule, kmp_int32 *plastiter,
930 kmp_uint64 *plower, kmp_uint64 *pupper,
931 kmp_uint64 *pupperD, kmp_int64 *pstride,
932 kmp_int64 incr, kmp_int64 chunk) {
933 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
934 pupper, pupperD, pstride, incr, chunk);
967 kmp_int32 *p_lb, kmp_int32 *p_ub,
968 kmp_int32 *p_st, kmp_int32 incr,
970 KMP_DEBUG_ASSERT(__kmp_init_serial);
971 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
979 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
980 kmp_int32 *p_st, kmp_int32 incr,
982 KMP_DEBUG_ASSERT(__kmp_init_serial);
983 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
991 kmp_int64 *p_lb, kmp_int64 *p_ub,
992 kmp_int64 *p_st, kmp_int64 incr,
994 KMP_DEBUG_ASSERT(__kmp_init_serial);
995 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1003 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1004 kmp_int64 *p_st, kmp_int64 incr,
1006 KMP_DEBUG_ASSERT(__kmp_init_serial);
1007 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)