13 #ifndef OMPT_SPECIFIC_H
14 #define OMPT_SPECIFIC_H
23 void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid);
24 void __ompt_thread_assign_wait_id(
void *variable);
26 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
27 int gtid, ompt_data_t *ompt_pid,
void *codeptr);
29 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
30 int on_heap,
bool always =
false);
32 void __ompt_lw_taskteam_unlink(kmp_info_t *thr);
34 ompt_team_info_t *__ompt_get_teaminfo(
int depth,
int *size);
36 ompt_task_info_t *__ompt_get_task_info_object(
int depth);
38 int __ompt_get_parallel_info_internal(
int ancestor_level,
39 ompt_data_t **parallel_data,
42 int __ompt_get_task_info_internal(
int ancestor_level,
int *type,
43 ompt_data_t **task_data,
44 ompt_frame_t **task_frame,
45 ompt_data_t **parallel_data,
int *thread_num);
47 ompt_data_t *__ompt_get_thread_data_internal();
54 ompt_sync_region_t __ompt_get_barrier_kind(
enum barrier_type, kmp_info_t *);
60 #define OMPT_CUR_TASK_INFO(thr) (&(thr->th.th_current_task->ompt_task_info))
61 #define OMPT_CUR_TASK_DATA(thr) \
62 (&(thr->th.th_current_task->ompt_task_info.task_data))
63 #define OMPT_CUR_TEAM_INFO(thr) (&(thr->th.th_team->t.ompt_team_info))
64 #define OMPT_CUR_TEAM_DATA(thr) \
65 (&(thr->th.th_team->t.ompt_team_info.parallel_data))
67 #define OMPT_HAVE_WEAK_ATTRIBUTE KMP_HAVE_WEAK_ATTRIBUTE
68 #define OMPT_HAVE_PSAPI KMP_HAVE_PSAPI
69 #define OMPT_STR_MATCH(haystack, needle) __kmp_str_match(haystack, 0, needle)
71 inline void *__ompt_load_return_address(
int gtid) {
72 kmp_info_t *thr = __kmp_threads[gtid];
73 void *return_address = thr->th.ompt_thread_info.return_address;
74 thr->th.ompt_thread_info.return_address = NULL;
75 return return_address;
78 #define OMPT_STORE_RETURN_ADDRESS(gtid) \
79 if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
80 !__kmp_threads[gtid]->th.ompt_thread_info.return_address) \
81 __kmp_threads[gtid]->th.ompt_thread_info.return_address = \
82 __builtin_return_address(0)
83 #define OMPT_LOAD_RETURN_ADDRESS(gtid) __ompt_load_return_address(gtid)
84 #define OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid) \
85 ((ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
86 __kmp_threads[gtid]->th.ompt_thread_info.return_address)? \
87 __ompt_load_return_address(gtid): \
88 __builtin_return_address(0))
94 inline kmp_info_t *ompt_get_thread_gtid(
int gtid) {
95 return (gtid >= 0) ? __kmp_thread_from_gtid(gtid) : NULL;
98 inline kmp_info_t *ompt_get_thread() {
99 int gtid = __kmp_get_gtid();
100 return ompt_get_thread_gtid(gtid);
103 inline void ompt_set_thread_state(kmp_info_t *thread, ompt_state_t state) {
104 thread->th.ompt_thread_info.state = state;
107 inline const char *ompt_get_runtime_version() {
108 return &__kmp_version_lib_ver[KMP_VERSION_MAGIC_LEN];
110 #endif // OMPT_SUPPORT
113 #if OMPT_SUPPORT && OMPT_OPTIONAL
114 #define OMPT_REDUCTION_DECL(this_thr, gtid) \
115 ompt_data_t *my_task_data = OMPT_CUR_TASK_DATA(this_thr); \
116 ompt_data_t *my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr); \
117 void *return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
118 #define OMPT_REDUCTION_BEGIN \
119 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
120 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
121 ompt_sync_region_reduction, ompt_scope_begin, my_parallel_data, \
122 my_task_data, return_address); \
124 #define OMPT_REDUCTION_END \
125 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
126 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
127 ompt_sync_region_reduction, ompt_scope_end, my_parallel_data, \
128 my_task_data, return_address); \
130 #else // OMPT_SUPPORT && OMPT_OPTIONAL
131 #define OMPT_REDUCTION_DECL(this_thr, gtid)
132 #define OMPT_REDUCTION_BEGIN
133 #define OMPT_REDUCTION_END
134 #endif // ! OMPT_SUPPORT && OMPT_OPTIONAL