xref: /llvm-project/openmp/libompd/src/omp-icv.cpp (revision 822142ffdfbe93f213c2c6b3f2aec7fe5f0af072)
1 /*
2  * omp-icv.cpp -- OMPD Internal Control Variable handling
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 // clang-format off
13 /* clang-format expect kmp.h before omp.h which results in build break
14  * due to a few redeclarations.
15  */
16 #include "omp-debug.h"
17 // NOLINTNEXTLINE "to avoid clang tidy warning for the same reason as above."
18 #include "omp.h"
19 #include "ompd-private.h"
20 #include "TargetValue.h"
21 #define OMPD_SKIP_HWLOC 1
22 #include "kmp.h"
23 #undef OMPD_SKIP_HWLOC
24 #include <cstring>
25 
26 /* The ICVs ompd-final-var and ompd-implicit-var below are for backward
27  * compatibility with 5.0.
28  */
29 
30 #define FOREACH_OMPD_ICV(macro)                                                     \
31   macro(dyn_var, "dyn-var", ompd_scope_thread, 0)                                   \
32   macro(run_sched_var, "run-sched-var", ompd_scope_task, 0)                         \
33   macro(stacksize_var, "stacksize-var", ompd_scope_address_space, 0)                \
34   macro(cancel_var, "cancel-var", ompd_scope_address_space, 0)                      \
35   macro(max_task_priority_var, "max-task-priority-var", ompd_scope_address_space, 0)\
36   macro(debug_var, "debug-var", ompd_scope_address_space, 0)                        \
37   macro(nthreads_var, "nthreads-var", ompd_scope_thread, 0)                         \
38   macro(display_affinity_var, "display-affinity-var", ompd_scope_address_space, 0)  \
39   macro(affinity_format_var, "affinity-format-var", ompd_scope_address_space, 0)    \
40   macro(default_device_var, "default-device-var", ompd_scope_thread, 0)             \
41   macro(tool_var, "tool-var", ompd_scope_address_space, 0)                          \
42   macro(tool_libraries_var, "tool-libraries-var", ompd_scope_address_space, 0)      \
43   macro(tool_verbose_init_var, "tool-verbose-init-var", ompd_scope_address_space, 0)\
44   macro(levels_var, "levels-var", ompd_scope_parallel, 1)                           \
45   macro(active_levels_var, "active-levels-var", ompd_scope_parallel, 0)             \
46   macro(thread_limit_var, "thread-limit-var", ompd_scope_task, 0)                   \
47   macro(max_active_levels_var, "max-active-levels-var", ompd_scope_task, 0)         \
48   macro(bind_var, "bind-var", ompd_scope_task, 0)                                   \
49   macro(num_procs_var, "num-procs-var", ompd_scope_address_space, 0)                \
50   macro(ompd_num_procs_var, "ompd-num-procs-var", ompd_scope_address_space, 0)      \
51   macro(thread_num_var, "thread-num-var", ompd_scope_thread, 1)                     \
52   macro(ompd_thread_num_var, "ompd-thread-num-var", ompd_scope_thread, 1)           \
53   macro(final_var, "final-task-var", ompd_scope_task, 0)                            \
54   macro(ompd_final_var, "ompd-final-var", ompd_scope_task, 0)                       \
55   macro(ompd_final_task_var, "ompd-final-task-var", ompd_scope_task, 0)             \
56   macro(implicit_var, "implicit-task-var", ompd_scope_task, 0)                      \
57   macro(ompd_implicit_var, "ompd-implicit-var", ompd_scope_task, 0)                 \
58   macro(ompd_implicit_task_var, "ompd-implicit-task-var", ompd_scope_task, 0)       \
59   macro(team_size_var, "team-size-var", ompd_scope_parallel, 1)                     \
60   macro(ompd_team_size_var, "ompd-team-size-var", ompd_scope_parallel, 1)
61 
__ompd_init_icvs(const ompd_callbacks_t * table)62 void __ompd_init_icvs(const ompd_callbacks_t *table) { callbacks = table; }
63 
64 enum ompd_icv {
65   ompd_icv_undefined_marker =
66       0, // ompd_icv_undefined is already defined in ompd.h
67 #define ompd_icv_macro(v, n, s, d) ompd_icv_##v,
68   FOREACH_OMPD_ICV(ompd_icv_macro)
69 #undef ompd_icv_macro
70       ompd_icv_after_last_icv
71 };
72 
73 static const char *ompd_icv_string_values[] = {"undefined",
74 #define ompd_icv_macro(v, n, s, d) n,
75   FOREACH_OMPD_ICV(ompd_icv_macro)
76 #undef ompd_icv_macro
77 };
78 
79 static const ompd_scope_t ompd_icv_scope_values[] = {
80     ompd_scope_global, // undefined marker
81 #define ompd_icv_macro(v, n, s, d) s,
82     FOREACH_OMPD_ICV(ompd_icv_macro)
83 #undef ompd_icv_macro
84 };
85 
86 // clang-format on
ompd_enumerate_icvs(ompd_address_space_handle_t * handle,ompd_icv_id_t current,ompd_icv_id_t * next_id,const char ** next_icv_name,ompd_scope_t * next_scope,int * more)87 ompd_rc_t ompd_enumerate_icvs(ompd_address_space_handle_t *handle,
88                               ompd_icv_id_t current, ompd_icv_id_t *next_id,
89                               const char **next_icv_name,
90                               ompd_scope_t *next_scope, int *more) {
91   if (!handle) {
92     return ompd_rc_stale_handle;
93   }
94   if (!next_id || !next_icv_name || !next_scope || !more) {
95     return ompd_rc_bad_input;
96   }
97   if (current + 1 >= ompd_icv_after_last_icv) {
98     return ompd_rc_bad_input;
99   }
100 
101   *next_id = current + 1;
102 
103   char *icv_name = NULL;
104   ompd_rc_t ret = callbacks->alloc_memory(
105       std::strlen(ompd_icv_string_values[*next_id]) + 1, (void **)&icv_name);
106   *next_icv_name = icv_name;
107   if (ret != ompd_rc_ok) {
108     return ret;
109   }
110   std::strcpy(icv_name, ompd_icv_string_values[*next_id]);
111 
112   *next_scope = ompd_icv_scope_values[*next_id];
113 
114   if ((*next_id) + 1 >= ompd_icv_after_last_icv) {
115     *more = 0;
116   } else {
117     *more = 1;
118   }
119 
120   return ompd_rc_ok;
121 }
122 
create_empty_string(const char ** empty_string_ptr)123 static ompd_rc_t create_empty_string(const char **empty_string_ptr) {
124   char *empty_str;
125   ompd_rc_t ret;
126 
127   if (!callbacks) {
128     return ompd_rc_callback_error;
129   }
130   ret = callbacks->alloc_memory(1, (void **)&empty_str);
131   if (ret != ompd_rc_ok) {
132     return ret;
133   }
134   empty_str[0] = '\0';
135   *empty_string_ptr = empty_str;
136   return ompd_rc_ok;
137 }
138 
ompd_get_dynamic(ompd_thread_handle_t * thread_handle,ompd_word_t * dyn_val)139 static ompd_rc_t ompd_get_dynamic(
140     ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle */
141     ompd_word_t *dyn_val /* OUT: Dynamic adjustment of threads */
142 ) {
143   if (!thread_handle)
144     return ompd_rc_stale_handle;
145   if (!thread_handle->ah)
146     return ompd_rc_stale_handle;
147   ompd_address_space_context_t *context = thread_handle->ah->context;
148   if (!context)
149     return ompd_rc_stale_handle;
150   if (!callbacks) {
151     return ompd_rc_callback_error;
152   }
153 
154   int8_t dynamic;
155   ompd_rc_t ret =
156       TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
157           .cast("kmp_base_info_t")
158           .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
159           .cast("kmp_taskdata_t", 1)
160           .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
161           .cast("kmp_internal_control_t", 0)
162           .access(
163               "dynamic") /*__kmp_threads[t]->th.th_current_task->td_icvs.dynamic*/
164           .castBase()
165           .getValue(dynamic);
166   *dyn_val = dynamic;
167   return ret;
168 }
169 
170 static ompd_rc_t
ompd_get_stacksize(ompd_address_space_handle_t * addr_handle,ompd_word_t * stacksize_val)171 ompd_get_stacksize(ompd_address_space_handle_t
172                        *addr_handle, /* IN: handle for the address space */
173                    ompd_word_t *stacksize_val /* OUT: per thread stack size */
174 ) {
175   ompd_address_space_context_t *context = addr_handle->context;
176   if (!context)
177     return ompd_rc_stale_handle;
178   ompd_rc_t ret;
179   if (!callbacks) {
180     return ompd_rc_callback_error;
181   }
182 
183   size_t stacksize;
184   ret = TValue(context, "__kmp_stksize")
185             .castBase("__kmp_stksize")
186             .getValue(stacksize);
187   *stacksize_val = stacksize;
188   return ret;
189 }
190 
ompd_get_cancellation(ompd_address_space_handle_t * addr_handle,ompd_word_t * cancellation_val)191 static ompd_rc_t ompd_get_cancellation(
192     ompd_address_space_handle_t
193         *addr_handle,             /* IN: handle for the address space */
194     ompd_word_t *cancellation_val /* OUT: cancellation value */
195 ) {
196   ompd_address_space_context_t *context = addr_handle->context;
197   if (!context)
198     return ompd_rc_stale_handle;
199   if (!callbacks) {
200     return ompd_rc_callback_error;
201   }
202   ompd_rc_t ret;
203 
204   int omp_cancellation;
205   ret = TValue(context, "__kmp_omp_cancellation")
206             .castBase("__kmp_omp_cancellation")
207             .getValue(omp_cancellation);
208   *cancellation_val = omp_cancellation;
209   return ret;
210 }
211 
ompd_get_max_task_priority(ompd_address_space_handle_t * addr_handle,ompd_word_t * max_task_priority_val)212 static ompd_rc_t ompd_get_max_task_priority(
213     ompd_address_space_handle_t
214         *addr_handle,                  /* IN: handle for the address space */
215     ompd_word_t *max_task_priority_val /* OUT: max task priority value */
216 ) {
217   ompd_address_space_context_t *context = addr_handle->context;
218   if (!context)
219     return ompd_rc_stale_handle;
220   if (!callbacks) {
221     return ompd_rc_callback_error;
222   }
223   ompd_rc_t ret;
224 
225   int max_task_priority;
226   ret = TValue(context, "__kmp_max_task_priority")
227             .castBase("__kmp_max_task_priority")
228             .getValue(max_task_priority);
229   *max_task_priority_val = max_task_priority;
230   return ret;
231 }
232 
233 static ompd_rc_t
ompd_get_debug(ompd_address_space_handle_t * addr_handle,ompd_word_t * debug_val)234 ompd_get_debug(ompd_address_space_handle_t
235                    *addr_handle,      /* IN: handle for the address space */
236                ompd_word_t *debug_val /* OUT: debug value */
237 ) {
238   ompd_address_space_context_t *context = addr_handle->context;
239   if (!context)
240     return ompd_rc_stale_handle;
241   if (!callbacks) {
242     return ompd_rc_callback_error;
243   }
244   ompd_rc_t ret;
245 
246   uint64_t ompd_state_val;
247   ret = TValue(context, "ompd_state")
248             .castBase("ompd_state")
249             .getValue(ompd_state_val);
250   if (ompd_state_val > 0) {
251     *debug_val = 1;
252   } else {
253     *debug_val = 0;
254   }
255   return ret;
256 }
257 
258 /* Helper routine for the ompd_get_nthreads routines */
ompd_get_nthreads_aux(ompd_thread_handle_t * thread_handle,uint32_t * used,uint32_t * current_nesting_level,uint32_t * nproc)259 static ompd_rc_t ompd_get_nthreads_aux(ompd_thread_handle_t *thread_handle,
260                                        uint32_t *used,
261                                        uint32_t *current_nesting_level,
262                                        uint32_t *nproc) {
263   if (!thread_handle)
264     return ompd_rc_stale_handle;
265   if (!thread_handle->ah)
266     return ompd_rc_stale_handle;
267   ompd_address_space_context_t *context = thread_handle->ah->context;
268   if (!context)
269     return ompd_rc_stale_handle;
270   if (!callbacks) {
271     return ompd_rc_callback_error;
272   }
273 
274   ompd_rc_t ret = TValue(context, "__kmp_nested_nth")
275                       .cast("kmp_nested_nthreads_t")
276                       .access("used")
277                       .castBase(ompd_type_int)
278                       .getValue(*used);
279   if (ret != ompd_rc_ok)
280     return ret;
281 
282   TValue taskdata =
283       TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
284           .cast("kmp_base_info_t")
285           .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
286           .cast("kmp_taskdata_t", 1);
287 
288   ret = taskdata
289             .access("td_team") /*__kmp_threads[t]->th.th_current_task.td_team*/
290             .cast("kmp_team_p", 1)
291             .access("t") /*__kmp_threads[t]->th.th_current_task.td_team->t*/
292             .cast("kmp_base_team_t", 0) /*t*/
293             .access("t_level")          /*t.t_level*/
294             .castBase(ompd_type_int)
295             .getValue(*current_nesting_level);
296   if (ret != ompd_rc_ok)
297     return ret;
298 
299   ret = taskdata.cast("kmp_taskdata_t", 1)
300             .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
301             .cast("kmp_internal_control_t", 0)
302             .access(
303                 "nproc") /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
304             .castBase(ompd_type_int)
305             .getValue(*nproc);
306   if (ret != ompd_rc_ok)
307     return ret;
308 
309   return ompd_rc_ok;
310 }
311 
ompd_get_nthreads(ompd_thread_handle_t * thread_handle,ompd_word_t * nthreads_var_val)312 static ompd_rc_t ompd_get_nthreads(
313     ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
314     ompd_word_t *nthreads_var_val        /* OUT: nthreads-var (of integer type)
315                                             value */
316 ) {
317   uint32_t used;
318   uint32_t nproc;
319   uint32_t current_nesting_level;
320 
321   ompd_rc_t ret;
322   ret = ompd_get_nthreads_aux(thread_handle, &used, &current_nesting_level,
323                               &nproc);
324   if (ret != ompd_rc_ok)
325     return ret;
326 
327   /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
328   *nthreads_var_val = nproc;
329   /* If the nthreads-var is a list with more than one element, then the value of
330      this ICV cannot be represented by an integer type. In this case,
331      ompd_rc_incomplete is returned. The tool can check the return value and
332      can choose to invoke ompd_get_icv_string_from_scope() if needed. */
333   if (current_nesting_level < used - 1) {
334     return ompd_rc_incomplete;
335   }
336   return ompd_rc_ok;
337 }
338 
ompd_get_nthreads(ompd_thread_handle_t * thread_handle,const char ** nthreads_list_string)339 static ompd_rc_t ompd_get_nthreads(
340     ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
341     const char **nthreads_list_string    /* OUT: string list of comma separated
342                                             nthreads values */
343 ) {
344   uint32_t used;
345   uint32_t nproc;
346   uint32_t current_nesting_level;
347 
348   ompd_rc_t ret;
349   ret = ompd_get_nthreads_aux(thread_handle, &used, &current_nesting_level,
350                               &nproc);
351   if (ret != ompd_rc_ok)
352     return ret;
353 
354   uint32_t num_list_elems;
355   if (used == 0 || current_nesting_level >= used) {
356     num_list_elems = 1;
357   } else {
358     num_list_elems = used - current_nesting_level;
359   }
360   size_t buffer_size = 16 /* digits per element including the comma separator */
361                            * num_list_elems +
362                        1; /* string terminator NULL */
363   char *nthreads_list_str;
364   ret = callbacks->alloc_memory(buffer_size, (void **)&nthreads_list_str);
365   if (ret != ompd_rc_ok)
366     return ret;
367 
368   /* The nthreads-var list would be:
369   [__kmp_threads[t]->th.th_current_task->td_icvs.nproc,
370    __kmp_nested_nth.nth[current_nesting_level + 1],
371    __kmp_nested_nth.nth[current_nesting_level + 2],
372     …,
373    __kmp_nested_nth.nth[used - 1]]*/
374 
375   sprintf(nthreads_list_str, "%d", nproc);
376   *nthreads_list_string = nthreads_list_str;
377   if (num_list_elems == 1) {
378     return ompd_rc_ok;
379   }
380 
381   char temp_value[16];
382   uint32_t nth_value;
383 
384   for (current_nesting_level++; /* the list element for this nesting
385                                  * level has already been accounted for
386                                    by nproc */
387        current_nesting_level < used; current_nesting_level++) {
388 
389     ret = TValue(thread_handle->ah->context, "__kmp_nested_nth")
390               .cast("kmp_nested_nthreads_t")
391               .access("nth")
392               .cast("int", 1)
393               .getArrayElement(current_nesting_level)
394               .castBase(ompd_type_int)
395               .getValue(nth_value);
396 
397     if (ret != ompd_rc_ok)
398       return ret;
399 
400     sprintf(temp_value, ",%d", nth_value);
401     strcat(nthreads_list_str, temp_value);
402   }
403 
404   return ompd_rc_ok;
405 }
406 
ompd_get_display_affinity(ompd_address_space_handle_t * addr_handle,ompd_word_t * display_affinity_val)407 static ompd_rc_t ompd_get_display_affinity(
408     ompd_address_space_handle_t
409         *addr_handle,                 /* IN: handle for the address space */
410     ompd_word_t *display_affinity_val /* OUT: display affinity value */
411 ) {
412   ompd_address_space_context_t *context = addr_handle->context;
413   if (!context)
414     return ompd_rc_stale_handle;
415   ompd_rc_t ret;
416 
417   if (!callbacks) {
418     return ompd_rc_callback_error;
419   }
420   ret = TValue(context, "__kmp_display_affinity")
421             .castBase("__kmp_display_affinity")
422             .getValue(*display_affinity_val);
423   return ret;
424 }
425 
ompd_get_affinity_format(ompd_address_space_handle_t * addr_handle,const char ** affinity_format_string)426 static ompd_rc_t ompd_get_affinity_format(
427     ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
428     const char **affinity_format_string       /* OUT: affinity format string */
429 ) {
430   ompd_address_space_context_t *context = addr_handle->context;
431   if (!context)
432     return ompd_rc_stale_handle;
433 
434   if (!callbacks) {
435     return ompd_rc_callback_error;
436   }
437   ompd_rc_t ret;
438   ret = TValue(context, "__kmp_affinity_format")
439             .cast("char", 1)
440             .getString(affinity_format_string);
441   return ret;
442 }
443 
ompd_get_tool_libraries(ompd_address_space_handle_t * addr_handle,const char ** tool_libraries_string)444 static ompd_rc_t ompd_get_tool_libraries(
445     ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
446     const char **tool_libraries_string        /* OUT: tool libraries string */
447 ) {
448   if (!tool_libraries_string)
449     return ompd_rc_bad_input;
450 
451   ompd_address_space_context_t *context = addr_handle->context;
452   if (!context)
453     return ompd_rc_stale_handle;
454 
455   if (!callbacks) {
456     return ompd_rc_callback_error;
457   }
458   ompd_rc_t ret;
459   ret = TValue(context, "__kmp_tool_libraries")
460             .cast("char", 1)
461             .getString(tool_libraries_string);
462   if (ret == ompd_rc_unsupported) {
463     ret = create_empty_string(tool_libraries_string);
464   }
465   return ret;
466 }
467 
ompd_get_default_device(ompd_thread_handle_t * thread_handle,ompd_word_t * default_device_val)468 static ompd_rc_t ompd_get_default_device(
469     ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
470     ompd_word_t *default_device_val      /* OUT: default device value */
471 ) {
472   if (!thread_handle)
473     return ompd_rc_stale_handle;
474   if (!thread_handle->ah)
475     return ompd_rc_stale_handle;
476   ompd_address_space_context_t *context = thread_handle->ah->context;
477   if (!context)
478     return ompd_rc_stale_handle;
479   if (!callbacks)
480     return ompd_rc_callback_error;
481 
482   ompd_rc_t ret =
483       TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
484           .cast("kmp_base_info_t")
485           .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
486           .cast("kmp_taskdata_t", 1)
487           .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
488           .cast("kmp_internal_control_t", 0)
489           /*__kmp_threads[t]->th.th_current_task->td_icvs.default_device*/
490           .access("default_device")
491           .castBase()
492           .getValue(*default_device_val);
493   return ret;
494 }
495 
496 static ompd_rc_t
ompd_get_tool(ompd_address_space_handle_t * addr_handle,ompd_word_t * tool_val)497 ompd_get_tool(ompd_address_space_handle_t
498                   *addr_handle,     /* IN: handle for the address space */
499               ompd_word_t *tool_val /* OUT: tool value */
500 ) {
501   ompd_address_space_context_t *context = addr_handle->context;
502   if (!context)
503     return ompd_rc_stale_handle;
504   if (!callbacks) {
505     return ompd_rc_callback_error;
506   }
507   ompd_rc_t ret;
508 
509   ret =
510       TValue(context, "__kmp_tool").castBase("__kmp_tool").getValue(*tool_val);
511   return ret;
512 }
513 
ompd_get_tool_verbose_init(ompd_address_space_handle_t * addr_handle,const char ** tool_verbose_init_string)514 static ompd_rc_t ompd_get_tool_verbose_init(
515     ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
516     const char **tool_verbose_init_string /* OUT: tool verbose init string */
517 ) {
518   ompd_address_space_context_t *context = addr_handle->context;
519   if (!context)
520     return ompd_rc_stale_handle;
521 
522   if (!callbacks) {
523     return ompd_rc_callback_error;
524   }
525   ompd_rc_t ret;
526   ret = TValue(context, "__kmp_tool_verbose_init")
527             .cast("char", 1)
528             .getString(tool_verbose_init_string);
529   if (ret == ompd_rc_unsupported) {
530     ret = create_empty_string(tool_verbose_init_string);
531   }
532   return ret;
533 }
534 
ompd_get_level(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)535 static ompd_rc_t ompd_get_level(
536     ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
537     ompd_word_t *val                         /* OUT: nesting level */
538 ) {
539   if (!parallel_handle->ah)
540     return ompd_rc_stale_handle;
541   ompd_address_space_context_t *context = parallel_handle->ah->context;
542   if (!context)
543     return ompd_rc_stale_handle;
544 
545   if (!callbacks) {
546     return ompd_rc_callback_error;
547   }
548 
549   uint32_t res;
550 
551   ompd_rc_t ret = TValue(context, parallel_handle->th)
552                       .cast("kmp_base_team_t", 0) /*t*/
553                       .access("t_level")          /*t.t_level*/
554                       .castBase()
555                       .getValue(res);
556   *val = res;
557   return ret;
558 }
559 
ompd_get_active_level(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)560 static ompd_rc_t ompd_get_active_level(
561     ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
562     ompd_word_t *val                         /* OUT: active nesting level */
563 ) {
564   if (!parallel_handle->ah)
565     return ompd_rc_stale_handle;
566   ompd_address_space_context_t *context = parallel_handle->ah->context;
567   if (!context)
568     return ompd_rc_stale_handle;
569   if (!callbacks) {
570     return ompd_rc_callback_error;
571   }
572 
573   uint32_t res;
574 
575   ompd_rc_t ret = TValue(context, parallel_handle->th)
576                       .cast("kmp_base_team_t", 0) /*t*/
577                       .access("t_active_level")   /*t.t_active_level*/
578                       .castBase()
579                       .getValue(res);
580   *val = res;
581   return ret;
582 }
583 
584 static ompd_rc_t
ompd_get_num_procs(ompd_address_space_handle_t * addr_handle,ompd_word_t * val)585 ompd_get_num_procs(ompd_address_space_handle_t
586                        *addr_handle, /* IN: handle for the address space */
587                    ompd_word_t *val  /* OUT: number of processes */
588 ) {
589   ompd_address_space_context_t *context = addr_handle->context;
590   if (!context)
591     return ompd_rc_stale_handle;
592   if (!callbacks) {
593     return ompd_rc_callback_error;
594   }
595 
596   if (!val)
597     return ompd_rc_bad_input;
598   ompd_rc_t ret;
599 
600   int nth;
601   ret = TValue(context, "__kmp_avail_proc")
602             .castBase("__kmp_avail_proc")
603             .getValue(nth);
604   *val = nth;
605   return ret;
606 }
607 
ompd_get_thread_limit(ompd_task_handle_t * task_handle,ompd_word_t * val)608 static ompd_rc_t ompd_get_thread_limit(
609     ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
610     ompd_word_t *val                 /* OUT: max number of threads */
611 ) {
612   if (!task_handle->ah)
613     return ompd_rc_stale_handle;
614   ompd_address_space_context_t *context = task_handle->ah->context;
615   if (!context)
616     return ompd_rc_stale_handle;
617   if (!callbacks) {
618     return ompd_rc_callback_error;
619   }
620 
621   ompd_rc_t ret = TValue(context, task_handle->th)
622                       .cast("kmp_taskdata_t") // td
623                       .access("td_icvs")      // td->td_icvs
624                       .cast("kmp_internal_control_t", 0)
625                       .access("thread_limit") // td->td_icvs.thread_limit
626                       .castBase()
627                       .getValue(*val);
628 
629   return ret;
630 }
631 
ompd_get_thread_num(ompd_thread_handle_t * thread_handle,ompd_word_t * val)632 static ompd_rc_t ompd_get_thread_num(
633     ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle*/
634     ompd_word_t *val /* OUT: number of the thread within the team */
635 ) {
636   if (!thread_handle)
637     return ompd_rc_stale_handle;
638   if (!thread_handle->ah)
639     return ompd_rc_stale_handle;
640   ompd_address_space_context_t *context = thread_handle->ah->context;
641   if (!context)
642     return ompd_rc_stale_handle;
643   if (!callbacks) {
644     return ompd_rc_callback_error;
645   }
646 
647   ompd_rc_t ret =
648       TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
649           .cast("kmp_base_info_t")
650           .access("th_info") /*__kmp_threads[t]->th.th_info*/
651           .cast("kmp_desc_t")
652           .access("ds") /*__kmp_threads[t]->th.th_info.ds*/
653           .cast("kmp_desc_base_t")
654           .access("ds_tid") /*__kmp_threads[t]->th.th_info.ds.ds_tid*/
655           .castBase()
656           .getValue(*val);
657   return ret;
658 }
659 
660 static ompd_rc_t
ompd_in_final(ompd_task_handle_t * task_handle,ompd_word_t * val)661 ompd_in_final(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
662               ompd_word_t *val                 /* OUT: max number of threads */
663 ) {
664   if (!task_handle->ah)
665     return ompd_rc_stale_handle;
666   ompd_address_space_context_t *context = task_handle->ah->context;
667   if (!context)
668     return ompd_rc_stale_handle;
669   if (!callbacks) {
670     return ompd_rc_callback_error;
671   }
672 
673   ompd_rc_t ret = TValue(context, task_handle->th)
674                       .cast("kmp_taskdata_t") // td
675                       .access("td_flags")     // td->td_flags
676                       .cast("kmp_tasking_flags_t")
677                       .check("final", val); // td->td_flags.tasktype
678 
679   return ret;
680 }
681 
ompd_get_max_active_levels(ompd_task_handle_t * task_handle,ompd_word_t * val)682 static ompd_rc_t ompd_get_max_active_levels(
683     ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
684     ompd_word_t *val                 /* OUT: max number of threads */
685 ) {
686   if (!task_handle->ah)
687     return ompd_rc_stale_handle;
688   ompd_address_space_context_t *context = task_handle->ah->context;
689   if (!context)
690     return ompd_rc_stale_handle;
691   if (!callbacks) {
692     return ompd_rc_callback_error;
693   }
694 
695   ompd_rc_t ret =
696       TValue(context, task_handle->th)
697           .cast("kmp_taskdata_t") // td
698           .access("td_icvs")      // td->td_icvs
699           .cast("kmp_internal_control_t", 0)
700           .access("max_active_levels") // td->td_icvs.max_active_levels
701           .castBase()
702           .getValue(*val);
703 
704   return ret;
705 }
706 
ompd_get_run_schedule(ompd_task_handle_t * task_handle,const char ** run_sched_string)707 static ompd_rc_t ompd_get_run_schedule(
708     ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
709     const char **run_sched_string    /* OUT: Run Schedule String
710                                         consisting of kind and modifier */
711 ) {
712   if (!task_handle->ah)
713     return ompd_rc_stale_handle;
714   ompd_address_space_context_t *context = task_handle->ah->context;
715   if (!context)
716     return ompd_rc_stale_handle;
717   if (!callbacks) {
718     return ompd_rc_callback_error;
719   }
720 
721   int kind;
722 
723   TValue sched = TValue(context, task_handle->th)
724                      .cast("kmp_taskdata_t") // td
725                      .access("td_icvs")      // td->td_icvs
726                      .cast("kmp_internal_control_t", 0)
727                      .access("sched") // td->td_icvs.sched
728                      .cast("kmp_r_sched_t", 0);
729 
730   ompd_rc_t ret = sched
731                       .access("r_sched_type") // td->td_icvs.sched.r_sched_type
732                       .castBase()
733                       .getValue(kind);
734   if (ret != ompd_rc_ok) {
735     return ret;
736   }
737   int chunk = 0;
738   ret = sched
739             .access("chunk") // td->td_icvs.sched.chunk
740             .castBase()
741             .getValue(chunk);
742   if (ret != ompd_rc_ok) {
743     return ret;
744   }
745   char *run_sched_var_string;
746   ret = callbacks->alloc_memory(100, (void **)&run_sched_var_string);
747   if (ret != ompd_rc_ok) {
748     return ret;
749   }
750   run_sched_var_string[0] = '\0';
751   if (SCHEDULE_HAS_MONOTONIC(kind)) {
752     strcpy(run_sched_var_string, "monotonic:");
753   } else if (SCHEDULE_HAS_NONMONOTONIC(kind)) {
754     strcpy(run_sched_var_string, "nonmonotonic:");
755   }
756 
757   bool static_unchunked = false;
758   switch (SCHEDULE_WITHOUT_MODIFIERS(kind)) {
759   case kmp_sch_static:
760   case kmp_sch_static_greedy:
761   case kmp_sch_static_balanced:
762     static_unchunked = true;
763     strcat(run_sched_var_string, "static");
764     break;
765   case kmp_sch_static_chunked:
766     strcat(run_sched_var_string, "static");
767     break;
768   case kmp_sch_dynamic_chunked:
769     strcat(run_sched_var_string, "dynamic");
770     break;
771   case kmp_sch_guided_chunked:
772   case kmp_sch_guided_iterative_chunked:
773   case kmp_sch_guided_analytical_chunked:
774     strcat(run_sched_var_string, "guided");
775     break;
776   case kmp_sch_auto:
777     strcat(run_sched_var_string, "auto");
778     break;
779   case kmp_sch_trapezoidal:
780     strcat(run_sched_var_string, "trapezoidal");
781     break;
782   case kmp_sch_static_steal:
783     strcat(run_sched_var_string, "static_steal");
784     break;
785   default:
786     ret = callbacks->free_memory((void *)(run_sched_var_string));
787     if (ret != ompd_rc_ok) {
788       return ret;
789     }
790     ret = create_empty_string(run_sched_string);
791     return ret;
792   }
793 
794   if (static_unchunked == true) {
795     // To be in sync with what OMPT returns.
796     // Chunk was not set. Shown with a zero value.
797     chunk = 0;
798   }
799 
800   char temp_str[16];
801   sprintf(temp_str, ",%d", chunk);
802   strcat(run_sched_var_string, temp_str);
803   *run_sched_string = run_sched_var_string;
804   return ret;
805 }
806 
807 /* Helper routine for the ompd_get_proc_bind routines */
ompd_get_proc_bind_aux(ompd_task_handle_t * task_handle,uint32_t * used,uint32_t * current_nesting_level,uint32_t * proc_bind)808 static ompd_rc_t ompd_get_proc_bind_aux(ompd_task_handle_t *task_handle,
809                                         uint32_t *used,
810                                         uint32_t *current_nesting_level,
811                                         uint32_t *proc_bind) {
812   if (!task_handle->ah)
813     return ompd_rc_stale_handle;
814   ompd_address_space_context_t *context = task_handle->ah->context;
815   if (!context)
816     return ompd_rc_stale_handle;
817   if (!callbacks) {
818     return ompd_rc_callback_error;
819   }
820 
821   ompd_rc_t ret = TValue(context, "__kmp_nested_proc_bind")
822                       .cast("kmp_nested_proc_bind_t")
823                       .access("used")
824                       .castBase(ompd_type_int)
825                       .getValue(*used);
826   if (ret != ompd_rc_ok)
827     return ret;
828 
829   TValue taskdata = TValue(context, task_handle->th) /* td */
830                         .cast("kmp_taskdata_t");
831 
832   ret = taskdata
833             .access("td_team") /* td->td_team*/
834             .cast("kmp_team_p", 1)
835             .access("t")                /* td->td_team->t*/
836             .cast("kmp_base_team_t", 0) /*t*/
837             .access("t_level")          /*t.t_level*/
838             .castBase(ompd_type_int)
839             .getValue(*current_nesting_level);
840   if (ret != ompd_rc_ok)
841     return ret;
842 
843   ret = taskdata
844             .access("td_icvs") /* td->td_icvs */
845             .cast("kmp_internal_control_t", 0)
846             .access("proc_bind") /* td->td_icvs.proc_bind */
847             .castBase()
848             .getValue(*proc_bind);
849   return ret;
850 }
851 
852 static ompd_rc_t
ompd_get_proc_bind(ompd_task_handle_t * task_handle,ompd_word_t * bind)853 ompd_get_proc_bind(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle */
854                    ompd_word_t *bind /* OUT: Kind of proc-binding */
855 ) {
856   uint32_t used;
857   uint32_t proc_bind;
858   uint32_t current_nesting_level;
859 
860   ompd_rc_t ret;
861   ret = ompd_get_proc_bind_aux(task_handle, &used, &current_nesting_level,
862                                &proc_bind);
863   if (ret != ompd_rc_ok)
864     return ret;
865 
866   *bind = proc_bind;
867   /* If bind-var is a list with more than one element, then the value of
868      this ICV cannot be represented by an integer type. In this case,
869      ompd_rc_incomplete is returned. The tool can check the return value and
870      can choose to invoke ompd_get_icv_string_from_scope() if needed. */
871   if (current_nesting_level < used - 1) {
872     return ompd_rc_incomplete;
873   }
874   return ompd_rc_ok;
875 }
876 
ompd_get_proc_bind(ompd_task_handle_t * task_handle,const char ** proc_bind_list_string)877 static ompd_rc_t ompd_get_proc_bind(
878     ompd_task_handle_t *task_handle,   /* IN: OpenMP task handle */
879     const char **proc_bind_list_string /* OUT: string list of comma separated
880                                           bind-var values */
881 ) {
882   uint32_t used;
883   uint32_t proc_bind;
884   uint32_t current_nesting_level;
885 
886   ompd_rc_t ret;
887   ret = ompd_get_proc_bind_aux(task_handle, &used, &current_nesting_level,
888                                &proc_bind);
889   if (ret != ompd_rc_ok)
890     return ret;
891 
892   uint32_t num_list_elems;
893   if (used == 0 || current_nesting_level >= used) {
894     num_list_elems = 1;
895   } else {
896     num_list_elems = used - current_nesting_level;
897   }
898   size_t buffer_size = 16 /* digits per element including the comma separator */
899                            * num_list_elems +
900                        1; /* string terminator NULL */
901   char *proc_bind_list_str;
902   ret = callbacks->alloc_memory(buffer_size, (void **)&proc_bind_list_str);
903   if (ret != ompd_rc_ok)
904     return ret;
905 
906   /* The bind-var list would be:
907   [td->td_icvs.proc_bind,
908    __kmp_nested_proc_bind.bind_types[current_nesting_level + 1],
909    __kmp_nested_proc_bind.bind_types[current_nesting_level + 2],
910     …,
911    __kmp_nested_proc_bind.bind_types[used - 1]]*/
912 
913   sprintf(proc_bind_list_str, "%d", proc_bind);
914   *proc_bind_list_string = proc_bind_list_str;
915   if (num_list_elems == 1) {
916     return ompd_rc_ok;
917   }
918 
919   char temp_value[16];
920   uint32_t bind_types_value;
921 
922   for (current_nesting_level++; /* the list element for this nesting
923                                    level has already been accounted for
924                                    by proc_bind */
925        current_nesting_level < used; current_nesting_level++) {
926 
927     ret = TValue(task_handle->ah->context, "__kmp_nested_proc_bind")
928               .cast("kmp_nested_proc_bind_t")
929               .access("bind_types")
930               .cast("int", 1)
931               .getArrayElement(current_nesting_level)
932               .castBase(ompd_type_int)
933               .getValue(bind_types_value);
934 
935     if (ret != ompd_rc_ok)
936       return ret;
937 
938     sprintf(temp_value, ",%d", bind_types_value);
939     strcat(proc_bind_list_str, temp_value);
940   }
941 
942   return ompd_rc_ok;
943 }
944 
945 static ompd_rc_t
ompd_is_implicit(ompd_task_handle_t * task_handle,ompd_word_t * val)946 ompd_is_implicit(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
947                  ompd_word_t *val /* OUT: max number of threads */
948 ) {
949   if (!task_handle)
950     return ompd_rc_stale_handle;
951   if (!task_handle->ah)
952     return ompd_rc_stale_handle;
953   ompd_address_space_context_t *context = task_handle->ah->context;
954   if (!context)
955     return ompd_rc_stale_handle;
956   if (!callbacks) {
957     return ompd_rc_callback_error;
958   }
959 
960   ompd_rc_t ret = TValue(context, task_handle->th)
961                       .cast("kmp_taskdata_t") // td
962                       .access("td_flags")     // td->td_flags
963                       .cast("kmp_tasking_flags_t")
964                       .check("tasktype", val); // td->td_flags.tasktype
965   *val ^= 1; // tasktype: explicit = 1, implicit = 0 => invert the value
966   return ret;
967 }
968 
ompd_get_num_threads(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)969 ompd_rc_t ompd_get_num_threads(
970     ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
971     ompd_word_t *val                         /* OUT: number of threads */
972 ) {
973   if (!parallel_handle->ah)
974     return ompd_rc_stale_handle;
975   ompd_address_space_context_t *context = parallel_handle->ah->context;
976   if (!context)
977     return ompd_rc_stale_handle;
978   if (!callbacks) {
979     return ompd_rc_callback_error;
980   }
981 
982   ompd_rc_t ret = ompd_rc_ok;
983   if (parallel_handle->lwt.address != 0) {
984     *val = 1;
985   } else {
986     uint32_t res;
987     ret = TValue(context, parallel_handle->th)
988               .cast("kmp_base_team_t", 0) /*t*/
989               .access("t_nproc")          /*t.t_nproc*/
990               .castBase()
991               .getValue(res);
992     *val = res;
993   }
994   return ret;
995 }
996 
ompd_get_icv_from_scope(void * handle,ompd_scope_t scope,ompd_icv_id_t icv_id,ompd_word_t * icv_value)997 ompd_rc_t ompd_get_icv_from_scope(void *handle, ompd_scope_t scope,
998                                   ompd_icv_id_t icv_id,
999                                   ompd_word_t *icv_value) {
1000   if (!handle) {
1001     return ompd_rc_stale_handle;
1002   }
1003   if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1004     return ompd_rc_bad_input;
1005   }
1006   if (scope != ompd_icv_scope_values[icv_id]) {
1007     return ompd_rc_bad_input;
1008   }
1009 
1010   ompd_device_t device_kind;
1011 
1012   switch (scope) {
1013   case ompd_scope_thread:
1014     device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1015     break;
1016   case ompd_scope_parallel:
1017     device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1018     break;
1019   case ompd_scope_address_space:
1020     device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1021     break;
1022   case ompd_scope_task:
1023     device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1024     break;
1025   default:
1026     return ompd_rc_bad_input;
1027   }
1028 
1029   if (device_kind == OMPD_DEVICE_KIND_HOST) {
1030     switch (icv_id) {
1031     case ompd_icv_dyn_var:
1032       return ompd_get_dynamic((ompd_thread_handle_t *)handle, icv_value);
1033     case ompd_icv_run_sched_var:
1034       return ompd_rc_incompatible;
1035     case ompd_icv_stacksize_var:
1036       return ompd_get_stacksize((ompd_address_space_handle_t *)handle,
1037                                 icv_value);
1038     case ompd_icv_cancel_var:
1039       return ompd_get_cancellation((ompd_address_space_handle_t *)handle,
1040                                    icv_value);
1041     case ompd_icv_max_task_priority_var:
1042       return ompd_get_max_task_priority((ompd_address_space_handle_t *)handle,
1043                                         icv_value);
1044     case ompd_icv_debug_var:
1045       return ompd_get_debug((ompd_address_space_handle_t *)handle, icv_value);
1046     case ompd_icv_nthreads_var:
1047       return ompd_get_nthreads((ompd_thread_handle_t *)handle, icv_value);
1048     case ompd_icv_display_affinity_var:
1049       return ompd_get_display_affinity((ompd_address_space_handle_t *)handle,
1050                                        icv_value);
1051     case ompd_icv_affinity_format_var:
1052       return ompd_rc_incompatible;
1053     case ompd_icv_tool_libraries_var:
1054       return ompd_rc_incompatible;
1055     case ompd_icv_default_device_var:
1056       return ompd_get_default_device((ompd_thread_handle_t *)handle, icv_value);
1057     case ompd_icv_tool_var:
1058       return ompd_get_tool((ompd_address_space_handle_t *)handle, icv_value);
1059     case ompd_icv_tool_verbose_init_var:
1060       return ompd_rc_incompatible;
1061     case ompd_icv_levels_var:
1062       return ompd_get_level((ompd_parallel_handle_t *)handle, icv_value);
1063     case ompd_icv_active_levels_var:
1064       return ompd_get_active_level((ompd_parallel_handle_t *)handle, icv_value);
1065     case ompd_icv_thread_limit_var:
1066       return ompd_get_thread_limit((ompd_task_handle_t *)handle, icv_value);
1067     case ompd_icv_max_active_levels_var:
1068       return ompd_get_max_active_levels((ompd_task_handle_t *)handle,
1069                                         icv_value);
1070     case ompd_icv_bind_var:
1071       return ompd_get_proc_bind((ompd_task_handle_t *)handle, icv_value);
1072     case ompd_icv_num_procs_var:
1073     case ompd_icv_ompd_num_procs_var:
1074       return ompd_get_num_procs((ompd_address_space_handle_t *)handle,
1075                                 icv_value);
1076     case ompd_icv_thread_num_var:
1077     case ompd_icv_ompd_thread_num_var:
1078       return ompd_get_thread_num((ompd_thread_handle_t *)handle, icv_value);
1079     case ompd_icv_final_var:
1080     case ompd_icv_ompd_final_var:
1081     case ompd_icv_ompd_final_task_var:
1082       return ompd_in_final((ompd_task_handle_t *)handle, icv_value);
1083     case ompd_icv_implicit_var:
1084     case ompd_icv_ompd_implicit_var:
1085     case ompd_icv_ompd_implicit_task_var:
1086       return ompd_is_implicit((ompd_task_handle_t *)handle, icv_value);
1087     case ompd_icv_team_size_var:
1088     case ompd_icv_ompd_team_size_var:
1089       return ompd_get_num_threads((ompd_parallel_handle_t *)handle, icv_value);
1090     default:
1091       return ompd_rc_unsupported;
1092     }
1093   }
1094   return ompd_rc_unsupported;
1095 }
1096 
ompd_get_icv_string_from_scope(void * handle,ompd_scope_t scope,ompd_icv_id_t icv_id,const char ** icv_string)1097 ompd_rc_t ompd_get_icv_string_from_scope(void *handle, ompd_scope_t scope,
1098                                          ompd_icv_id_t icv_id,
1099                                          const char **icv_string) {
1100   if (!handle) {
1101     return ompd_rc_stale_handle;
1102   }
1103   if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1104     return ompd_rc_bad_input;
1105   }
1106   if (scope != ompd_icv_scope_values[icv_id]) {
1107     return ompd_rc_bad_input;
1108   }
1109 
1110   ompd_device_t device_kind;
1111 
1112   switch (scope) {
1113   case ompd_scope_thread:
1114     device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1115     break;
1116   case ompd_scope_parallel:
1117     device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1118     break;
1119   case ompd_scope_address_space:
1120     device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1121     break;
1122   case ompd_scope_task:
1123     device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1124     break;
1125   default:
1126     return ompd_rc_bad_input;
1127   }
1128 
1129   if (device_kind == OMPD_DEVICE_KIND_HOST) {
1130     switch (icv_id) {
1131     case ompd_icv_run_sched_var:
1132       return ompd_get_run_schedule((ompd_task_handle_t *)handle, icv_string);
1133     case ompd_icv_nthreads_var:
1134       return ompd_get_nthreads((ompd_thread_handle_t *)handle, icv_string);
1135     case ompd_icv_bind_var:
1136       return ompd_get_proc_bind((ompd_task_handle_t *)handle, icv_string);
1137     case ompd_icv_affinity_format_var:
1138       return ompd_get_affinity_format((ompd_address_space_handle_t *)handle,
1139                                       icv_string);
1140     case ompd_icv_tool_libraries_var:
1141       return ompd_get_tool_libraries((ompd_address_space_handle_t *)handle,
1142                                      icv_string);
1143     case ompd_icv_tool_verbose_init_var:
1144       return ompd_get_tool_verbose_init((ompd_address_space_handle_t *)handle,
1145                                         icv_string);
1146     default:
1147       return ompd_rc_unsupported;
1148     }
1149   }
1150   return ompd_rc_unsupported;
1151 }
1152 
__ompd_get_tool_data(TValue & dataValue,ompd_word_t * value,ompd_address_t * ptr)1153 static ompd_rc_t __ompd_get_tool_data(TValue &dataValue, ompd_word_t *value,
1154                                       ompd_address_t *ptr) {
1155   ompd_rc_t ret = dataValue.getError();
1156   if (ret != ompd_rc_ok)
1157     return ret;
1158   ret = dataValue.access("value").castBase().getValue(*value);
1159   if (ret != ompd_rc_ok)
1160     return ret;
1161   ptr->segment = OMPD_SEGMENT_UNSPECIFIED;
1162   ret = dataValue.access("ptr").castBase().getValue(ptr->address);
1163   return ret;
1164 }
1165 
ompd_get_task_data(ompd_task_handle_t * task_handle,ompd_word_t * value,ompd_address_t * ptr)1166 ompd_rc_t ompd_get_task_data(ompd_task_handle_t *task_handle,
1167                              ompd_word_t *value, ompd_address_t *ptr) {
1168   ompd_address_space_context_t *context = task_handle->ah->context;
1169   if (!context)
1170     return ompd_rc_stale_handle;
1171   if (!callbacks) {
1172     return ompd_rc_callback_error;
1173   }
1174 
1175   TValue dataValue;
1176   if (task_handle->lwt.address) {
1177     dataValue = TValue(context, task_handle->lwt)
1178                     .cast("ompt_lw_taskteam_t") /*lwt*/
1179                     .access("ompt_task_info")   // lwt->ompt_task_info
1180                     .cast("ompt_task_info_t")
1181                     .access("task_data") // lwt->ompd_task_info.task_data
1182                     .cast("ompt_data_t");
1183   } else {
1184     dataValue = TValue(context, task_handle->th)
1185                     .cast("kmp_taskdata_t")   /*td*/
1186                     .access("ompt_task_info") // td->ompt_task_info
1187                     .cast("ompt_task_info_t")
1188                     .access("task_data") // td->ompd_task_info.task_data
1189                     .cast("ompt_data_t");
1190   }
1191   return __ompd_get_tool_data(dataValue, value, ptr);
1192 }
1193 
ompd_get_parallel_data(ompd_parallel_handle_t * parallel_handle,ompd_word_t * value,ompd_address_t * ptr)1194 ompd_rc_t ompd_get_parallel_data(ompd_parallel_handle_t *parallel_handle,
1195                                  ompd_word_t *value, ompd_address_t *ptr) {
1196   ompd_address_space_context_t *context = parallel_handle->ah->context;
1197   if (!context)
1198     return ompd_rc_stale_handle;
1199   if (!callbacks) {
1200     return ompd_rc_callback_error;
1201   }
1202 
1203   TValue dataValue;
1204   if (parallel_handle->lwt.address) {
1205     dataValue =
1206         TValue(context, parallel_handle->lwt)
1207             .cast("ompt_lw_taskteam_t") /*lwt*/
1208             .access("ompt_team_info")   // lwt->ompt_team_info
1209             .cast("ompt_team_info_t")
1210             .access("parallel_data") // lwt->ompt_team_info.parallel_data
1211             .cast("ompt_data_t");
1212   } else {
1213     dataValue = TValue(context, parallel_handle->th)
1214                     .cast("kmp_base_team_t")  /*t*/
1215                     .access("ompt_team_info") // t->ompt_team_info
1216                     .cast("ompt_team_info_t")
1217                     .access("parallel_data") // t->ompt_team_info.parallel_data
1218                     .cast("ompt_data_t");
1219   }
1220   return __ompd_get_tool_data(dataValue, value, ptr);
1221 }
1222 
ompd_get_thread_data(ompd_thread_handle_t * thread_handle,ompd_word_t * value,ompd_address_t * ptr)1223 ompd_rc_t ompd_get_thread_data(ompd_thread_handle_t *thread_handle,
1224                                ompd_word_t *value, ompd_address_t *ptr) {
1225   ompd_address_space_context_t *context = thread_handle->ah->context;
1226   if (!context)
1227     return ompd_rc_stale_handle;
1228   if (!callbacks) {
1229     return ompd_rc_callback_error;
1230   }
1231 
1232   TValue dataValue =
1233       TValue(context, thread_handle->th)
1234           .cast("kmp_base_info_t")    /*th*/
1235           .access("ompt_thread_info") // th->ompt_thread_info
1236           .cast("ompt_thread_info_t")
1237           .access("thread_data") // th->ompt_thread_info.thread_data
1238           .cast("ompt_data_t");
1239   return __ompd_get_tool_data(dataValue, value, ptr);
1240 }
1241 
ompd_get_tool_data(void * handle,ompd_scope_t scope,ompd_word_t * value,ompd_address_t * ptr)1242 ompd_rc_t ompd_get_tool_data(void *handle, ompd_scope_t scope,
1243                              ompd_word_t *value, ompd_address_t *ptr) {
1244   if (!handle) {
1245     return ompd_rc_stale_handle;
1246   }
1247 
1248   ompd_device_t device_kind;
1249 
1250   switch (scope) {
1251   case ompd_scope_thread:
1252     device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1253     break;
1254   case ompd_scope_parallel:
1255     device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1256     break;
1257   case ompd_scope_task:
1258     device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1259     break;
1260   default:
1261     return ompd_rc_bad_input;
1262   }
1263 
1264   if (device_kind == OMPD_DEVICE_KIND_HOST) {
1265     switch (scope) {
1266     case ompd_scope_thread:
1267       return ompd_get_thread_data((ompd_thread_handle_t *)handle, value, ptr);
1268     case ompd_scope_parallel:
1269       return ompd_get_parallel_data((ompd_parallel_handle_t *)handle, value,
1270                                     ptr);
1271     case ompd_scope_task:
1272       return ompd_get_task_data((ompd_task_handle_t *)handle, value, ptr);
1273     default:
1274       return ompd_rc_unsupported;
1275     }
1276   }
1277   return ompd_rc_unsupported;
1278 }
1279