1 /*
2 * omp-icv.cpp -- OMPD Internal Control Variable handling
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 // clang-format off
13 /* clang-format expect kmp.h before omp.h which results in build break
14 * due to a few redeclarations.
15 */
16 #include "omp-debug.h"
17 // NOLINTNEXTLINE "to avoid clang tidy warning for the same reason as above."
18 #include "omp.h"
19 #include "ompd-private.h"
20 #include "TargetValue.h"
21 #include "kmp.h"
22 #include <cstring>
23
24 /* The ICVs ompd-final-var and ompd-implicit-var below are for backward
25 * compatibility with 5.0.
26 */
27
28 #define FOREACH_OMPD_ICV(macro) \
29 macro(dyn_var, "dyn-var", ompd_scope_thread, 0) \
30 macro(run_sched_var, "run-sched-var", ompd_scope_task, 0) \
31 macro(stacksize_var, "stacksize-var", ompd_scope_address_space, 0) \
32 macro(cancel_var, "cancel-var", ompd_scope_address_space, 0) \
33 macro(max_task_priority_var, "max-task-priority-var", ompd_scope_address_space, 0)\
34 macro(debug_var, "debug-var", ompd_scope_address_space, 0) \
35 macro(nthreads_var, "nthreads-var", ompd_scope_thread, 0) \
36 macro(display_affinity_var, "display-affinity-var", ompd_scope_address_space, 0) \
37 macro(affinity_format_var, "affinity-format-var", ompd_scope_address_space, 0) \
38 macro(default_device_var, "default-device-var", ompd_scope_thread, 0) \
39 macro(tool_var, "tool-var", ompd_scope_address_space, 0) \
40 macro(tool_libraries_var, "tool-libraries-var", ompd_scope_address_space, 0) \
41 macro(tool_verbose_init_var, "tool-verbose-init-var", ompd_scope_address_space, 0)\
42 macro(levels_var, "levels-var", ompd_scope_parallel, 1) \
43 macro(active_levels_var, "active-levels-var", ompd_scope_parallel, 0) \
44 macro(thread_limit_var, "thread-limit-var", ompd_scope_task, 0) \
45 macro(max_active_levels_var, "max-active-levels-var", ompd_scope_task, 0) \
46 macro(bind_var, "bind-var", ompd_scope_task, 0) \
47 macro(num_procs_var, "num-procs-var", ompd_scope_address_space, 0) \
48 macro(ompd_num_procs_var, "ompd-num-procs-var", ompd_scope_address_space, 0) \
49 macro(thread_num_var, "thread-num-var", ompd_scope_thread, 1) \
50 macro(ompd_thread_num_var, "ompd-thread-num-var", ompd_scope_thread, 1) \
51 macro(final_var, "final-task-var", ompd_scope_task, 0) \
52 macro(ompd_final_var, "ompd-final-var", ompd_scope_task, 0) \
53 macro(ompd_final_task_var, "ompd-final-task-var", ompd_scope_task, 0) \
54 macro(implicit_var, "implicit-task-var", ompd_scope_task, 0) \
55 macro(ompd_implicit_var, "ompd-implicit-var", ompd_scope_task, 0) \
56 macro(ompd_implicit_task_var, "ompd-implicit-task-var", ompd_scope_task, 0) \
57 macro(team_size_var, "team-size-var", ompd_scope_parallel, 1) \
58 macro(ompd_team_size_var, "ompd-team-size-var", ompd_scope_parallel, 1)
59
__ompd_init_icvs(const ompd_callbacks_t * table)60 void __ompd_init_icvs(const ompd_callbacks_t *table) { callbacks = table; }
61
62 enum ompd_icv {
63 ompd_icv_undefined_marker =
64 0, // ompd_icv_undefined is already defined in ompd.h
65 #define ompd_icv_macro(v, n, s, d) ompd_icv_##v,
66 FOREACH_OMPD_ICV(ompd_icv_macro)
67 #undef ompd_icv_macro
68 ompd_icv_after_last_icv
69 };
70
71 static const char *ompd_icv_string_values[] = {"undefined",
72 #define ompd_icv_macro(v, n, s, d) n,
73 FOREACH_OMPD_ICV(ompd_icv_macro)
74 #undef ompd_icv_macro
75 };
76
77 static const ompd_scope_t ompd_icv_scope_values[] = {
78 ompd_scope_global, // undefined marker
79 #define ompd_icv_macro(v, n, s, d) s,
80 FOREACH_OMPD_ICV(ompd_icv_macro)
81 #undef ompd_icv_macro
82 };
83
84 // clang-format on
ompd_enumerate_icvs(ompd_address_space_handle_t * handle,ompd_icv_id_t current,ompd_icv_id_t * next_id,const char ** next_icv_name,ompd_scope_t * next_scope,int * more)85 ompd_rc_t ompd_enumerate_icvs(ompd_address_space_handle_t *handle,
86 ompd_icv_id_t current, ompd_icv_id_t *next_id,
87 const char **next_icv_name,
88 ompd_scope_t *next_scope, int *more) {
89 if (!handle) {
90 return ompd_rc_stale_handle;
91 }
92 if (!next_id || !next_icv_name || !next_scope || !more) {
93 return ompd_rc_bad_input;
94 }
95 if (current + 1 >= ompd_icv_after_last_icv) {
96 return ompd_rc_bad_input;
97 }
98
99 *next_id = current + 1;
100
101 char *icv_name = NULL;
102 ompd_rc_t ret = callbacks->alloc_memory(
103 std::strlen(ompd_icv_string_values[*next_id]) + 1, (void **)&icv_name);
104 *next_icv_name = icv_name;
105 if (ret != ompd_rc_ok) {
106 return ret;
107 }
108 std::strcpy(icv_name, ompd_icv_string_values[*next_id]);
109
110 *next_scope = ompd_icv_scope_values[*next_id];
111
112 if ((*next_id) + 1 >= ompd_icv_after_last_icv) {
113 *more = 0;
114 } else {
115 *more = 1;
116 }
117
118 return ompd_rc_ok;
119 }
120
create_empty_string(const char ** empty_string_ptr)121 static ompd_rc_t create_empty_string(const char **empty_string_ptr) {
122 char *empty_str;
123 ompd_rc_t ret;
124
125 if (!callbacks) {
126 return ompd_rc_callback_error;
127 }
128 ret = callbacks->alloc_memory(1, (void **)&empty_str);
129 if (ret != ompd_rc_ok) {
130 return ret;
131 }
132 empty_str[0] = '\0';
133 *empty_string_ptr = empty_str;
134 return ompd_rc_ok;
135 }
136
ompd_get_dynamic(ompd_thread_handle_t * thread_handle,ompd_word_t * dyn_val)137 static ompd_rc_t ompd_get_dynamic(
138 ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle */
139 ompd_word_t *dyn_val /* OUT: Dynamic adjustment of threads */
140 ) {
141 if (!thread_handle)
142 return ompd_rc_stale_handle;
143 if (!thread_handle->ah)
144 return ompd_rc_stale_handle;
145 ompd_address_space_context_t *context = thread_handle->ah->context;
146 if (!context)
147 return ompd_rc_stale_handle;
148 if (!callbacks) {
149 return ompd_rc_callback_error;
150 }
151
152 int8_t dynamic;
153 ompd_rc_t ret =
154 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
155 .cast("kmp_base_info_t")
156 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
157 .cast("kmp_taskdata_t", 1)
158 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
159 .cast("kmp_internal_control_t", 0)
160 .access(
161 "dynamic") /*__kmp_threads[t]->th.th_current_task->td_icvs.dynamic*/
162 .castBase()
163 .getValue(dynamic);
164 *dyn_val = dynamic;
165 return ret;
166 }
167
168 static ompd_rc_t
ompd_get_stacksize(ompd_address_space_handle_t * addr_handle,ompd_word_t * stacksize_val)169 ompd_get_stacksize(ompd_address_space_handle_t
170 *addr_handle, /* IN: handle for the address space */
171 ompd_word_t *stacksize_val /* OUT: per thread stack size */
172 ) {
173 ompd_address_space_context_t *context = addr_handle->context;
174 if (!context)
175 return ompd_rc_stale_handle;
176 ompd_rc_t ret;
177 if (!callbacks) {
178 return ompd_rc_callback_error;
179 }
180
181 size_t stacksize;
182 ret = TValue(context, "__kmp_stksize")
183 .castBase("__kmp_stksize")
184 .getValue(stacksize);
185 *stacksize_val = stacksize;
186 return ret;
187 }
188
ompd_get_cancellation(ompd_address_space_handle_t * addr_handle,ompd_word_t * cancellation_val)189 static ompd_rc_t ompd_get_cancellation(
190 ompd_address_space_handle_t
191 *addr_handle, /* IN: handle for the address space */
192 ompd_word_t *cancellation_val /* OUT: cancellation value */
193 ) {
194 ompd_address_space_context_t *context = addr_handle->context;
195 if (!context)
196 return ompd_rc_stale_handle;
197 if (!callbacks) {
198 return ompd_rc_callback_error;
199 }
200 ompd_rc_t ret;
201
202 int omp_cancellation;
203 ret = TValue(context, "__kmp_omp_cancellation")
204 .castBase("__kmp_omp_cancellation")
205 .getValue(omp_cancellation);
206 *cancellation_val = omp_cancellation;
207 return ret;
208 }
209
ompd_get_max_task_priority(ompd_address_space_handle_t * addr_handle,ompd_word_t * max_task_priority_val)210 static ompd_rc_t ompd_get_max_task_priority(
211 ompd_address_space_handle_t
212 *addr_handle, /* IN: handle for the address space */
213 ompd_word_t *max_task_priority_val /* OUT: max task priority value */
214 ) {
215 ompd_address_space_context_t *context = addr_handle->context;
216 if (!context)
217 return ompd_rc_stale_handle;
218 if (!callbacks) {
219 return ompd_rc_callback_error;
220 }
221 ompd_rc_t ret;
222
223 int max_task_priority;
224 ret = TValue(context, "__kmp_max_task_priority")
225 .castBase("__kmp_max_task_priority")
226 .getValue(max_task_priority);
227 *max_task_priority_val = max_task_priority;
228 return ret;
229 }
230
231 static ompd_rc_t
ompd_get_debug(ompd_address_space_handle_t * addr_handle,ompd_word_t * debug_val)232 ompd_get_debug(ompd_address_space_handle_t
233 *addr_handle, /* IN: handle for the address space */
234 ompd_word_t *debug_val /* OUT: debug value */
235 ) {
236 ompd_address_space_context_t *context = addr_handle->context;
237 if (!context)
238 return ompd_rc_stale_handle;
239 if (!callbacks) {
240 return ompd_rc_callback_error;
241 }
242 ompd_rc_t ret;
243
244 uint64_t ompd_state_val;
245 ret = TValue(context, "ompd_state")
246 .castBase("ompd_state")
247 .getValue(ompd_state_val);
248 if (ompd_state_val > 0) {
249 *debug_val = 1;
250 } else {
251 *debug_val = 0;
252 }
253 return ret;
254 }
255
256 /* Helper routine for the ompd_get_nthreads routines */
ompd_get_nthreads_aux(ompd_thread_handle_t * thread_handle,uint32_t * used,uint32_t * current_nesting_level,uint32_t * nproc)257 static ompd_rc_t ompd_get_nthreads_aux(ompd_thread_handle_t *thread_handle,
258 uint32_t *used,
259 uint32_t *current_nesting_level,
260 uint32_t *nproc) {
261 if (!thread_handle)
262 return ompd_rc_stale_handle;
263 if (!thread_handle->ah)
264 return ompd_rc_stale_handle;
265 ompd_address_space_context_t *context = thread_handle->ah->context;
266 if (!context)
267 return ompd_rc_stale_handle;
268 if (!callbacks) {
269 return ompd_rc_callback_error;
270 }
271
272 ompd_rc_t ret = TValue(context, "__kmp_nested_nth")
273 .cast("kmp_nested_nthreads_t")
274 .access("used")
275 .castBase(ompd_type_int)
276 .getValue(*used);
277 if (ret != ompd_rc_ok)
278 return ret;
279
280 TValue taskdata =
281 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
282 .cast("kmp_base_info_t")
283 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
284 .cast("kmp_taskdata_t", 1);
285
286 ret = taskdata
287 .access("td_team") /*__kmp_threads[t]->th.th_current_task.td_team*/
288 .cast("kmp_team_p", 1)
289 .access("t") /*__kmp_threads[t]->th.th_current_task.td_team->t*/
290 .cast("kmp_base_team_t", 0) /*t*/
291 .access("t_level") /*t.t_level*/
292 .castBase(ompd_type_int)
293 .getValue(*current_nesting_level);
294 if (ret != ompd_rc_ok)
295 return ret;
296
297 ret = taskdata.cast("kmp_taskdata_t", 1)
298 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
299 .cast("kmp_internal_control_t", 0)
300 .access(
301 "nproc") /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
302 .castBase(ompd_type_int)
303 .getValue(*nproc);
304 if (ret != ompd_rc_ok)
305 return ret;
306
307 return ompd_rc_ok;
308 }
309
ompd_get_nthreads(ompd_thread_handle_t * thread_handle,ompd_word_t * nthreads_var_val)310 static ompd_rc_t ompd_get_nthreads(
311 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
312 ompd_word_t *nthreads_var_val /* OUT: nthreads-var (of integer type)
313 value */
314 ) {
315 uint32_t used;
316 uint32_t nproc;
317 uint32_t current_nesting_level;
318
319 ompd_rc_t ret;
320 ret = ompd_get_nthreads_aux(thread_handle, &used, ¤t_nesting_level,
321 &nproc);
322 if (ret != ompd_rc_ok)
323 return ret;
324
325 /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
326 *nthreads_var_val = nproc;
327 /* If the nthreads-var is a list with more than one element, then the value of
328 this ICV cannot be represented by an integer type. In this case,
329 ompd_rc_incomplete is returned. The tool can check the return value and
330 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
331 if (current_nesting_level < used - 1) {
332 return ompd_rc_incomplete;
333 }
334 return ompd_rc_ok;
335 }
336
ompd_get_nthreads(ompd_thread_handle_t * thread_handle,const char ** nthreads_list_string)337 static ompd_rc_t ompd_get_nthreads(
338 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
339 const char **nthreads_list_string /* OUT: string list of comma separated
340 nthreads values */
341 ) {
342 uint32_t used;
343 uint32_t nproc;
344 uint32_t current_nesting_level;
345
346 ompd_rc_t ret;
347 ret = ompd_get_nthreads_aux(thread_handle, &used, ¤t_nesting_level,
348 &nproc);
349 if (ret != ompd_rc_ok)
350 return ret;
351
352 uint32_t num_list_elems;
353 if (used == 0 || current_nesting_level >= used) {
354 num_list_elems = 1;
355 } else {
356 num_list_elems = used - current_nesting_level;
357 }
358 size_t buffer_size = 16 /* digits per element including the comma separator */
359 * num_list_elems +
360 1; /* string terminator NULL */
361 char *nthreads_list_str;
362 ret = callbacks->alloc_memory(buffer_size, (void **)&nthreads_list_str);
363 if (ret != ompd_rc_ok)
364 return ret;
365
366 /* The nthreads-var list would be:
367 [__kmp_threads[t]->th.th_current_task->td_icvs.nproc,
368 __kmp_nested_nth.nth[current_nesting_level + 1],
369 __kmp_nested_nth.nth[current_nesting_level + 2],
370 …,
371 __kmp_nested_nth.nth[used - 1]]*/
372
373 sprintf(nthreads_list_str, "%d", nproc);
374 *nthreads_list_string = nthreads_list_str;
375 if (num_list_elems == 1) {
376 return ompd_rc_ok;
377 }
378
379 char temp_value[16];
380 uint32_t nth_value;
381
382 for (current_nesting_level++; /* the list element for this nesting
383 * level has already been accounted for
384 by nproc */
385 current_nesting_level < used; current_nesting_level++) {
386
387 ret = TValue(thread_handle->ah->context, "__kmp_nested_nth")
388 .cast("kmp_nested_nthreads_t")
389 .access("nth")
390 .cast("int", 1)
391 .getArrayElement(current_nesting_level)
392 .castBase(ompd_type_int)
393 .getValue(nth_value);
394
395 if (ret != ompd_rc_ok)
396 return ret;
397
398 sprintf(temp_value, ",%d", nth_value);
399 strcat(nthreads_list_str, temp_value);
400 }
401
402 return ompd_rc_ok;
403 }
404
ompd_get_display_affinity(ompd_address_space_handle_t * addr_handle,ompd_word_t * display_affinity_val)405 static ompd_rc_t ompd_get_display_affinity(
406 ompd_address_space_handle_t
407 *addr_handle, /* IN: handle for the address space */
408 ompd_word_t *display_affinity_val /* OUT: display affinity value */
409 ) {
410 ompd_address_space_context_t *context = addr_handle->context;
411 if (!context)
412 return ompd_rc_stale_handle;
413 ompd_rc_t ret;
414
415 if (!callbacks) {
416 return ompd_rc_callback_error;
417 }
418 ret = TValue(context, "__kmp_display_affinity")
419 .castBase("__kmp_display_affinity")
420 .getValue(*display_affinity_val);
421 return ret;
422 }
423
ompd_get_affinity_format(ompd_address_space_handle_t * addr_handle,const char ** affinity_format_string)424 static ompd_rc_t ompd_get_affinity_format(
425 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
426 const char **affinity_format_string /* OUT: affinity format string */
427 ) {
428 ompd_address_space_context_t *context = addr_handle->context;
429 if (!context)
430 return ompd_rc_stale_handle;
431
432 if (!callbacks) {
433 return ompd_rc_callback_error;
434 }
435 ompd_rc_t ret;
436 ret = TValue(context, "__kmp_affinity_format")
437 .cast("char", 1)
438 .getString(affinity_format_string);
439 return ret;
440 }
441
ompd_get_tool_libraries(ompd_address_space_handle_t * addr_handle,const char ** tool_libraries_string)442 static ompd_rc_t ompd_get_tool_libraries(
443 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
444 const char **tool_libraries_string /* OUT: tool libraries string */
445 ) {
446 if (!tool_libraries_string)
447 return ompd_rc_bad_input;
448
449 ompd_address_space_context_t *context = addr_handle->context;
450 if (!context)
451 return ompd_rc_stale_handle;
452
453 if (!callbacks) {
454 return ompd_rc_callback_error;
455 }
456 ompd_rc_t ret;
457 ret = TValue(context, "__kmp_tool_libraries")
458 .cast("char", 1)
459 .getString(tool_libraries_string);
460 if (ret == ompd_rc_unsupported) {
461 ret = create_empty_string(tool_libraries_string);
462 }
463 return ret;
464 }
465
ompd_get_default_device(ompd_thread_handle_t * thread_handle,ompd_word_t * default_device_val)466 static ompd_rc_t ompd_get_default_device(
467 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
468 ompd_word_t *default_device_val /* OUT: default device value */
469 ) {
470 if (!thread_handle)
471 return ompd_rc_stale_handle;
472 if (!thread_handle->ah)
473 return ompd_rc_stale_handle;
474 ompd_address_space_context_t *context = thread_handle->ah->context;
475 if (!context)
476 return ompd_rc_stale_handle;
477 if (!callbacks)
478 return ompd_rc_callback_error;
479
480 ompd_rc_t ret =
481 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
482 .cast("kmp_base_info_t")
483 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
484 .cast("kmp_taskdata_t", 1)
485 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
486 .cast("kmp_internal_control_t", 0)
487 /*__kmp_threads[t]->th.th_current_task->td_icvs.default_device*/
488 .access("default_device")
489 .castBase()
490 .getValue(*default_device_val);
491 return ret;
492 }
493
494 static ompd_rc_t
ompd_get_tool(ompd_address_space_handle_t * addr_handle,ompd_word_t * tool_val)495 ompd_get_tool(ompd_address_space_handle_t
496 *addr_handle, /* IN: handle for the address space */
497 ompd_word_t *tool_val /* OUT: tool value */
498 ) {
499 ompd_address_space_context_t *context = addr_handle->context;
500 if (!context)
501 return ompd_rc_stale_handle;
502 if (!callbacks) {
503 return ompd_rc_callback_error;
504 }
505 ompd_rc_t ret;
506
507 ret =
508 TValue(context, "__kmp_tool").castBase("__kmp_tool").getValue(*tool_val);
509 return ret;
510 }
511
ompd_get_tool_verbose_init(ompd_address_space_handle_t * addr_handle,const char ** tool_verbose_init_string)512 static ompd_rc_t ompd_get_tool_verbose_init(
513 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
514 const char **tool_verbose_init_string /* OUT: tool verbose init string */
515 ) {
516 ompd_address_space_context_t *context = addr_handle->context;
517 if (!context)
518 return ompd_rc_stale_handle;
519
520 if (!callbacks) {
521 return ompd_rc_callback_error;
522 }
523 ompd_rc_t ret;
524 ret = TValue(context, "__kmp_tool_verbose_init")
525 .cast("char", 1)
526 .getString(tool_verbose_init_string);
527 if (ret == ompd_rc_unsupported) {
528 ret = create_empty_string(tool_verbose_init_string);
529 }
530 return ret;
531 }
532
ompd_get_level(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)533 static ompd_rc_t ompd_get_level(
534 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
535 ompd_word_t *val /* OUT: nesting level */
536 ) {
537 if (!parallel_handle->ah)
538 return ompd_rc_stale_handle;
539 ompd_address_space_context_t *context = parallel_handle->ah->context;
540 if (!context)
541 return ompd_rc_stale_handle;
542
543 if (!callbacks) {
544 return ompd_rc_callback_error;
545 }
546
547 uint32_t res;
548
549 ompd_rc_t ret = TValue(context, parallel_handle->th)
550 .cast("kmp_base_team_t", 0) /*t*/
551 .access("t_level") /*t.t_level*/
552 .castBase()
553 .getValue(res);
554 *val = res;
555 return ret;
556 }
557
ompd_get_active_level(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)558 static ompd_rc_t ompd_get_active_level(
559 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
560 ompd_word_t *val /* OUT: active nesting level */
561 ) {
562 if (!parallel_handle->ah)
563 return ompd_rc_stale_handle;
564 ompd_address_space_context_t *context = parallel_handle->ah->context;
565 if (!context)
566 return ompd_rc_stale_handle;
567 if (!callbacks) {
568 return ompd_rc_callback_error;
569 }
570
571 uint32_t res;
572
573 ompd_rc_t ret = TValue(context, parallel_handle->th)
574 .cast("kmp_base_team_t", 0) /*t*/
575 .access("t_active_level") /*t.t_active_level*/
576 .castBase()
577 .getValue(res);
578 *val = res;
579 return ret;
580 }
581
582 static ompd_rc_t
ompd_get_num_procs(ompd_address_space_handle_t * addr_handle,ompd_word_t * val)583 ompd_get_num_procs(ompd_address_space_handle_t
584 *addr_handle, /* IN: handle for the address space */
585 ompd_word_t *val /* OUT: number of processes */
586 ) {
587 ompd_address_space_context_t *context = addr_handle->context;
588 if (!context)
589 return ompd_rc_stale_handle;
590 if (!callbacks) {
591 return ompd_rc_callback_error;
592 }
593
594 if (!val)
595 return ompd_rc_bad_input;
596 ompd_rc_t ret;
597
598 int nth;
599 ret = TValue(context, "__kmp_avail_proc")
600 .castBase("__kmp_avail_proc")
601 .getValue(nth);
602 *val = nth;
603 return ret;
604 }
605
ompd_get_thread_limit(ompd_task_handle_t * task_handle,ompd_word_t * val)606 static ompd_rc_t ompd_get_thread_limit(
607 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
608 ompd_word_t *val /* OUT: max number of threads */
609 ) {
610 if (!task_handle->ah)
611 return ompd_rc_stale_handle;
612 ompd_address_space_context_t *context = task_handle->ah->context;
613 if (!context)
614 return ompd_rc_stale_handle;
615 if (!callbacks) {
616 return ompd_rc_callback_error;
617 }
618
619 ompd_rc_t ret = TValue(context, task_handle->th)
620 .cast("kmp_taskdata_t") // td
621 .access("td_icvs") // td->td_icvs
622 .cast("kmp_internal_control_t", 0)
623 .access("thread_limit") // td->td_icvs.thread_limit
624 .castBase()
625 .getValue(*val);
626
627 return ret;
628 }
629
ompd_get_thread_num(ompd_thread_handle_t * thread_handle,ompd_word_t * val)630 static ompd_rc_t ompd_get_thread_num(
631 ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle*/
632 ompd_word_t *val /* OUT: number of the thread within the team */
633 ) {
634 if (!thread_handle)
635 return ompd_rc_stale_handle;
636 if (!thread_handle->ah)
637 return ompd_rc_stale_handle;
638 ompd_address_space_context_t *context = thread_handle->ah->context;
639 if (!context)
640 return ompd_rc_stale_handle;
641 if (!callbacks) {
642 return ompd_rc_callback_error;
643 }
644
645 ompd_rc_t ret =
646 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
647 .cast("kmp_base_info_t")
648 .access("th_info") /*__kmp_threads[t]->th.th_info*/
649 .cast("kmp_desc_t")
650 .access("ds") /*__kmp_threads[t]->th.th_info.ds*/
651 .cast("kmp_desc_base_t")
652 .access("ds_tid") /*__kmp_threads[t]->th.th_info.ds.ds_tid*/
653 .castBase()
654 .getValue(*val);
655 return ret;
656 }
657
658 static ompd_rc_t
ompd_in_final(ompd_task_handle_t * task_handle,ompd_word_t * val)659 ompd_in_final(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
660 ompd_word_t *val /* OUT: max number of threads */
661 ) {
662 if (!task_handle->ah)
663 return ompd_rc_stale_handle;
664 ompd_address_space_context_t *context = task_handle->ah->context;
665 if (!context)
666 return ompd_rc_stale_handle;
667 if (!callbacks) {
668 return ompd_rc_callback_error;
669 }
670
671 ompd_rc_t ret = TValue(context, task_handle->th)
672 .cast("kmp_taskdata_t") // td
673 .access("td_flags") // td->td_flags
674 .cast("kmp_tasking_flags_t")
675 .check("final", val); // td->td_flags.tasktype
676
677 return ret;
678 }
679
ompd_get_max_active_levels(ompd_task_handle_t * task_handle,ompd_word_t * val)680 static ompd_rc_t ompd_get_max_active_levels(
681 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
682 ompd_word_t *val /* OUT: max number of threads */
683 ) {
684 if (!task_handle->ah)
685 return ompd_rc_stale_handle;
686 ompd_address_space_context_t *context = task_handle->ah->context;
687 if (!context)
688 return ompd_rc_stale_handle;
689 if (!callbacks) {
690 return ompd_rc_callback_error;
691 }
692
693 ompd_rc_t ret =
694 TValue(context, task_handle->th)
695 .cast("kmp_taskdata_t") // td
696 .access("td_icvs") // td->td_icvs
697 .cast("kmp_internal_control_t", 0)
698 .access("max_active_levels") // td->td_icvs.max_active_levels
699 .castBase()
700 .getValue(*val);
701
702 return ret;
703 }
704
ompd_get_run_schedule(ompd_task_handle_t * task_handle,const char ** run_sched_string)705 static ompd_rc_t ompd_get_run_schedule(
706 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
707 const char **run_sched_string /* OUT: Run Schedule String
708 consisting of kind and modifier */
709 ) {
710 if (!task_handle->ah)
711 return ompd_rc_stale_handle;
712 ompd_address_space_context_t *context = task_handle->ah->context;
713 if (!context)
714 return ompd_rc_stale_handle;
715 if (!callbacks) {
716 return ompd_rc_callback_error;
717 }
718
719 int kind;
720
721 TValue sched = TValue(context, task_handle->th)
722 .cast("kmp_taskdata_t") // td
723 .access("td_icvs") // td->td_icvs
724 .cast("kmp_internal_control_t", 0)
725 .access("sched") // td->td_icvs.sched
726 .cast("kmp_r_sched_t", 0);
727
728 ompd_rc_t ret = sched
729 .access("r_sched_type") // td->td_icvs.sched.r_sched_type
730 .castBase()
731 .getValue(kind);
732 if (ret != ompd_rc_ok) {
733 return ret;
734 }
735 int chunk = 0;
736 ret = sched
737 .access("chunk") // td->td_icvs.sched.chunk
738 .castBase()
739 .getValue(chunk);
740 if (ret != ompd_rc_ok) {
741 return ret;
742 }
743 char *run_sched_var_string;
744 ret = callbacks->alloc_memory(100, (void **)&run_sched_var_string);
745 if (ret != ompd_rc_ok) {
746 return ret;
747 }
748 run_sched_var_string[0] = '\0';
749 if (SCHEDULE_HAS_MONOTONIC(kind)) {
750 strcpy(run_sched_var_string, "monotonic:");
751 } else if (SCHEDULE_HAS_NONMONOTONIC(kind)) {
752 strcpy(run_sched_var_string, "nonmonotonic:");
753 }
754
755 bool static_unchunked = false;
756 switch (SCHEDULE_WITHOUT_MODIFIERS(kind)) {
757 case kmp_sch_static:
758 case kmp_sch_static_greedy:
759 case kmp_sch_static_balanced:
760 static_unchunked = true;
761 strcat(run_sched_var_string, "static");
762 break;
763 case kmp_sch_static_chunked:
764 strcat(run_sched_var_string, "static");
765 break;
766 case kmp_sch_dynamic_chunked:
767 strcat(run_sched_var_string, "dynamic");
768 break;
769 case kmp_sch_guided_chunked:
770 case kmp_sch_guided_iterative_chunked:
771 case kmp_sch_guided_analytical_chunked:
772 strcat(run_sched_var_string, "guided");
773 break;
774 case kmp_sch_auto:
775 strcat(run_sched_var_string, "auto");
776 break;
777 case kmp_sch_trapezoidal:
778 strcat(run_sched_var_string, "trapezoidal");
779 break;
780 case kmp_sch_static_steal:
781 strcat(run_sched_var_string, "static_steal");
782 break;
783 default:
784 ret = callbacks->free_memory((void *)(run_sched_var_string));
785 if (ret != ompd_rc_ok) {
786 return ret;
787 }
788 ret = create_empty_string(run_sched_string);
789 return ret;
790 }
791
792 if (static_unchunked == true) {
793 // To be in sync with what OMPT returns.
794 // Chunk was not set. Shown with a zero value.
795 chunk = 0;
796 }
797
798 char temp_str[16];
799 sprintf(temp_str, ",%d", chunk);
800 strcat(run_sched_var_string, temp_str);
801 *run_sched_string = run_sched_var_string;
802 return ret;
803 }
804
805 /* Helper routine for the ompd_get_proc_bind routines */
ompd_get_proc_bind_aux(ompd_task_handle_t * task_handle,uint32_t * used,uint32_t * current_nesting_level,uint32_t * proc_bind)806 static ompd_rc_t ompd_get_proc_bind_aux(ompd_task_handle_t *task_handle,
807 uint32_t *used,
808 uint32_t *current_nesting_level,
809 uint32_t *proc_bind) {
810 if (!task_handle->ah)
811 return ompd_rc_stale_handle;
812 ompd_address_space_context_t *context = task_handle->ah->context;
813 if (!context)
814 return ompd_rc_stale_handle;
815 if (!callbacks) {
816 return ompd_rc_callback_error;
817 }
818
819 ompd_rc_t ret = TValue(context, "__kmp_nested_proc_bind")
820 .cast("kmp_nested_proc_bind_t")
821 .access("used")
822 .castBase(ompd_type_int)
823 .getValue(*used);
824 if (ret != ompd_rc_ok)
825 return ret;
826
827 TValue taskdata = TValue(context, task_handle->th) /* td */
828 .cast("kmp_taskdata_t");
829
830 ret = taskdata
831 .access("td_team") /* td->td_team*/
832 .cast("kmp_team_p", 1)
833 .access("t") /* td->td_team->t*/
834 .cast("kmp_base_team_t", 0) /*t*/
835 .access("t_level") /*t.t_level*/
836 .castBase(ompd_type_int)
837 .getValue(*current_nesting_level);
838 if (ret != ompd_rc_ok)
839 return ret;
840
841 ret = taskdata
842 .access("td_icvs") /* td->td_icvs */
843 .cast("kmp_internal_control_t", 0)
844 .access("proc_bind") /* td->td_icvs.proc_bind */
845 .castBase()
846 .getValue(*proc_bind);
847 return ret;
848 }
849
850 static ompd_rc_t
ompd_get_proc_bind(ompd_task_handle_t * task_handle,ompd_word_t * bind)851 ompd_get_proc_bind(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle */
852 ompd_word_t *bind /* OUT: Kind of proc-binding */
853 ) {
854 uint32_t used;
855 uint32_t proc_bind;
856 uint32_t current_nesting_level;
857
858 ompd_rc_t ret;
859 ret = ompd_get_proc_bind_aux(task_handle, &used, ¤t_nesting_level,
860 &proc_bind);
861 if (ret != ompd_rc_ok)
862 return ret;
863
864 *bind = proc_bind;
865 /* If bind-var is a list with more than one element, then the value of
866 this ICV cannot be represented by an integer type. In this case,
867 ompd_rc_incomplete is returned. The tool can check the return value and
868 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
869 if (current_nesting_level < used - 1) {
870 return ompd_rc_incomplete;
871 }
872 return ompd_rc_ok;
873 }
874
ompd_get_proc_bind(ompd_task_handle_t * task_handle,const char ** proc_bind_list_string)875 static ompd_rc_t ompd_get_proc_bind(
876 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle */
877 const char **proc_bind_list_string /* OUT: string list of comma separated
878 bind-var values */
879 ) {
880 uint32_t used;
881 uint32_t proc_bind;
882 uint32_t current_nesting_level;
883
884 ompd_rc_t ret;
885 ret = ompd_get_proc_bind_aux(task_handle, &used, ¤t_nesting_level,
886 &proc_bind);
887 if (ret != ompd_rc_ok)
888 return ret;
889
890 uint32_t num_list_elems;
891 if (used == 0 || current_nesting_level >= used) {
892 num_list_elems = 1;
893 } else {
894 num_list_elems = used - current_nesting_level;
895 }
896 size_t buffer_size = 16 /* digits per element including the comma separator */
897 * num_list_elems +
898 1; /* string terminator NULL */
899 char *proc_bind_list_str;
900 ret = callbacks->alloc_memory(buffer_size, (void **)&proc_bind_list_str);
901 if (ret != ompd_rc_ok)
902 return ret;
903
904 /* The bind-var list would be:
905 [td->td_icvs.proc_bind,
906 __kmp_nested_proc_bind.bind_types[current_nesting_level + 1],
907 __kmp_nested_proc_bind.bind_types[current_nesting_level + 2],
908 …,
909 __kmp_nested_proc_bind.bind_types[used - 1]]*/
910
911 sprintf(proc_bind_list_str, "%d", proc_bind);
912 *proc_bind_list_string = proc_bind_list_str;
913 if (num_list_elems == 1) {
914 return ompd_rc_ok;
915 }
916
917 char temp_value[16];
918 uint32_t bind_types_value;
919
920 for (current_nesting_level++; /* the list element for this nesting
921 level has already been accounted for
922 by proc_bind */
923 current_nesting_level < used; current_nesting_level++) {
924
925 ret = TValue(task_handle->ah->context, "__kmp_nested_proc_bind")
926 .cast("kmp_nested_proc_bind_t")
927 .access("bind_types")
928 .cast("int", 1)
929 .getArrayElement(current_nesting_level)
930 .castBase(ompd_type_int)
931 .getValue(bind_types_value);
932
933 if (ret != ompd_rc_ok)
934 return ret;
935
936 sprintf(temp_value, ",%d", bind_types_value);
937 strcat(proc_bind_list_str, temp_value);
938 }
939
940 return ompd_rc_ok;
941 }
942
943 static ompd_rc_t
ompd_is_implicit(ompd_task_handle_t * task_handle,ompd_word_t * val)944 ompd_is_implicit(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
945 ompd_word_t *val /* OUT: max number of threads */
946 ) {
947 if (!task_handle)
948 return ompd_rc_stale_handle;
949 if (!task_handle->ah)
950 return ompd_rc_stale_handle;
951 ompd_address_space_context_t *context = task_handle->ah->context;
952 if (!context)
953 return ompd_rc_stale_handle;
954 if (!callbacks) {
955 return ompd_rc_callback_error;
956 }
957
958 ompd_rc_t ret = TValue(context, task_handle->th)
959 .cast("kmp_taskdata_t") // td
960 .access("td_flags") // td->td_flags
961 .cast("kmp_tasking_flags_t")
962 .check("tasktype", val); // td->td_flags.tasktype
963 *val ^= 1; // tasktype: explicit = 1, implicit = 0 => invert the value
964 return ret;
965 }
966
ompd_get_num_threads(ompd_parallel_handle_t * parallel_handle,ompd_word_t * val)967 ompd_rc_t ompd_get_num_threads(
968 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
969 ompd_word_t *val /* OUT: number of threads */
970 ) {
971 if (!parallel_handle->ah)
972 return ompd_rc_stale_handle;
973 ompd_address_space_context_t *context = parallel_handle->ah->context;
974 if (!context)
975 return ompd_rc_stale_handle;
976 if (!callbacks) {
977 return ompd_rc_callback_error;
978 }
979
980 ompd_rc_t ret = ompd_rc_ok;
981 if (parallel_handle->lwt.address != 0) {
982 *val = 1;
983 } else {
984 uint32_t res;
985 ret = TValue(context, parallel_handle->th)
986 .cast("kmp_base_team_t", 0) /*t*/
987 .access("t_nproc") /*t.t_nproc*/
988 .castBase()
989 .getValue(res);
990 *val = res;
991 }
992 return ret;
993 }
994
ompd_get_icv_from_scope(void * handle,ompd_scope_t scope,ompd_icv_id_t icv_id,ompd_word_t * icv_value)995 ompd_rc_t ompd_get_icv_from_scope(void *handle, ompd_scope_t scope,
996 ompd_icv_id_t icv_id,
997 ompd_word_t *icv_value) {
998 if (!handle) {
999 return ompd_rc_stale_handle;
1000 }
1001 if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1002 return ompd_rc_bad_input;
1003 }
1004 if (scope != ompd_icv_scope_values[icv_id]) {
1005 return ompd_rc_bad_input;
1006 }
1007
1008 ompd_device_t device_kind;
1009
1010 switch (scope) {
1011 case ompd_scope_thread:
1012 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1013 break;
1014 case ompd_scope_parallel:
1015 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1016 break;
1017 case ompd_scope_address_space:
1018 device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1019 break;
1020 case ompd_scope_task:
1021 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1022 break;
1023 default:
1024 return ompd_rc_bad_input;
1025 }
1026
1027 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1028 switch (icv_id) {
1029 case ompd_icv_dyn_var:
1030 return ompd_get_dynamic((ompd_thread_handle_t *)handle, icv_value);
1031 case ompd_icv_run_sched_var:
1032 return ompd_rc_incompatible;
1033 case ompd_icv_stacksize_var:
1034 return ompd_get_stacksize((ompd_address_space_handle_t *)handle,
1035 icv_value);
1036 case ompd_icv_cancel_var:
1037 return ompd_get_cancellation((ompd_address_space_handle_t *)handle,
1038 icv_value);
1039 case ompd_icv_max_task_priority_var:
1040 return ompd_get_max_task_priority((ompd_address_space_handle_t *)handle,
1041 icv_value);
1042 case ompd_icv_debug_var:
1043 return ompd_get_debug((ompd_address_space_handle_t *)handle, icv_value);
1044 case ompd_icv_nthreads_var:
1045 return ompd_get_nthreads((ompd_thread_handle_t *)handle, icv_value);
1046 case ompd_icv_display_affinity_var:
1047 return ompd_get_display_affinity((ompd_address_space_handle_t *)handle,
1048 icv_value);
1049 case ompd_icv_affinity_format_var:
1050 return ompd_rc_incompatible;
1051 case ompd_icv_tool_libraries_var:
1052 return ompd_rc_incompatible;
1053 case ompd_icv_default_device_var:
1054 return ompd_get_default_device((ompd_thread_handle_t *)handle, icv_value);
1055 case ompd_icv_tool_var:
1056 return ompd_get_tool((ompd_address_space_handle_t *)handle, icv_value);
1057 case ompd_icv_tool_verbose_init_var:
1058 return ompd_rc_incompatible;
1059 case ompd_icv_levels_var:
1060 return ompd_get_level((ompd_parallel_handle_t *)handle, icv_value);
1061 case ompd_icv_active_levels_var:
1062 return ompd_get_active_level((ompd_parallel_handle_t *)handle, icv_value);
1063 case ompd_icv_thread_limit_var:
1064 return ompd_get_thread_limit((ompd_task_handle_t *)handle, icv_value);
1065 case ompd_icv_max_active_levels_var:
1066 return ompd_get_max_active_levels((ompd_task_handle_t *)handle,
1067 icv_value);
1068 case ompd_icv_bind_var:
1069 return ompd_get_proc_bind((ompd_task_handle_t *)handle, icv_value);
1070 case ompd_icv_num_procs_var:
1071 case ompd_icv_ompd_num_procs_var:
1072 return ompd_get_num_procs((ompd_address_space_handle_t *)handle,
1073 icv_value);
1074 case ompd_icv_thread_num_var:
1075 case ompd_icv_ompd_thread_num_var:
1076 return ompd_get_thread_num((ompd_thread_handle_t *)handle, icv_value);
1077 case ompd_icv_final_var:
1078 case ompd_icv_ompd_final_var:
1079 case ompd_icv_ompd_final_task_var:
1080 return ompd_in_final((ompd_task_handle_t *)handle, icv_value);
1081 case ompd_icv_implicit_var:
1082 case ompd_icv_ompd_implicit_var:
1083 case ompd_icv_ompd_implicit_task_var:
1084 return ompd_is_implicit((ompd_task_handle_t *)handle, icv_value);
1085 case ompd_icv_team_size_var:
1086 case ompd_icv_ompd_team_size_var:
1087 return ompd_get_num_threads((ompd_parallel_handle_t *)handle, icv_value);
1088 default:
1089 return ompd_rc_unsupported;
1090 }
1091 }
1092 return ompd_rc_unsupported;
1093 }
1094
ompd_get_icv_string_from_scope(void * handle,ompd_scope_t scope,ompd_icv_id_t icv_id,const char ** icv_string)1095 ompd_rc_t ompd_get_icv_string_from_scope(void *handle, ompd_scope_t scope,
1096 ompd_icv_id_t icv_id,
1097 const char **icv_string) {
1098 if (!handle) {
1099 return ompd_rc_stale_handle;
1100 }
1101 if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1102 return ompd_rc_bad_input;
1103 }
1104 if (scope != ompd_icv_scope_values[icv_id]) {
1105 return ompd_rc_bad_input;
1106 }
1107
1108 ompd_device_t device_kind;
1109
1110 switch (scope) {
1111 case ompd_scope_thread:
1112 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1113 break;
1114 case ompd_scope_parallel:
1115 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1116 break;
1117 case ompd_scope_address_space:
1118 device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1119 break;
1120 case ompd_scope_task:
1121 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1122 break;
1123 default:
1124 return ompd_rc_bad_input;
1125 }
1126
1127 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1128 switch (icv_id) {
1129 case ompd_icv_run_sched_var:
1130 return ompd_get_run_schedule((ompd_task_handle_t *)handle, icv_string);
1131 case ompd_icv_nthreads_var:
1132 return ompd_get_nthreads((ompd_thread_handle_t *)handle, icv_string);
1133 case ompd_icv_bind_var:
1134 return ompd_get_proc_bind((ompd_task_handle_t *)handle, icv_string);
1135 case ompd_icv_affinity_format_var:
1136 return ompd_get_affinity_format((ompd_address_space_handle_t *)handle,
1137 icv_string);
1138 case ompd_icv_tool_libraries_var:
1139 return ompd_get_tool_libraries((ompd_address_space_handle_t *)handle,
1140 icv_string);
1141 case ompd_icv_tool_verbose_init_var:
1142 return ompd_get_tool_verbose_init((ompd_address_space_handle_t *)handle,
1143 icv_string);
1144 default:
1145 return ompd_rc_unsupported;
1146 }
1147 }
1148 return ompd_rc_unsupported;
1149 }
1150
__ompd_get_tool_data(TValue & dataValue,ompd_word_t * value,ompd_address_t * ptr)1151 static ompd_rc_t __ompd_get_tool_data(TValue &dataValue, ompd_word_t *value,
1152 ompd_address_t *ptr) {
1153 ompd_rc_t ret = dataValue.getError();
1154 if (ret != ompd_rc_ok)
1155 return ret;
1156 ret = dataValue.access("value").castBase().getValue(*value);
1157 if (ret != ompd_rc_ok)
1158 return ret;
1159 ptr->segment = OMPD_SEGMENT_UNSPECIFIED;
1160 ret = dataValue.access("ptr").castBase().getValue(ptr->address);
1161 return ret;
1162 }
1163
ompd_get_task_data(ompd_task_handle_t * task_handle,ompd_word_t * value,ompd_address_t * ptr)1164 ompd_rc_t ompd_get_task_data(ompd_task_handle_t *task_handle,
1165 ompd_word_t *value, ompd_address_t *ptr) {
1166 ompd_address_space_context_t *context = task_handle->ah->context;
1167 if (!context)
1168 return ompd_rc_stale_handle;
1169 if (!callbacks) {
1170 return ompd_rc_callback_error;
1171 }
1172
1173 TValue dataValue;
1174 if (task_handle->lwt.address) {
1175 dataValue = TValue(context, task_handle->lwt)
1176 .cast("ompt_lw_taskteam_t") /*lwt*/
1177 .access("ompt_task_info") // lwt->ompt_task_info
1178 .cast("ompt_task_info_t")
1179 .access("task_data") // lwt->ompd_task_info.task_data
1180 .cast("ompt_data_t");
1181 } else {
1182 dataValue = TValue(context, task_handle->th)
1183 .cast("kmp_taskdata_t") /*td*/
1184 .access("ompt_task_info") // td->ompt_task_info
1185 .cast("ompt_task_info_t")
1186 .access("task_data") // td->ompd_task_info.task_data
1187 .cast("ompt_data_t");
1188 }
1189 return __ompd_get_tool_data(dataValue, value, ptr);
1190 }
1191
ompd_get_parallel_data(ompd_parallel_handle_t * parallel_handle,ompd_word_t * value,ompd_address_t * ptr)1192 ompd_rc_t ompd_get_parallel_data(ompd_parallel_handle_t *parallel_handle,
1193 ompd_word_t *value, ompd_address_t *ptr) {
1194 ompd_address_space_context_t *context = parallel_handle->ah->context;
1195 if (!context)
1196 return ompd_rc_stale_handle;
1197 if (!callbacks) {
1198 return ompd_rc_callback_error;
1199 }
1200
1201 TValue dataValue;
1202 if (parallel_handle->lwt.address) {
1203 dataValue =
1204 TValue(context, parallel_handle->lwt)
1205 .cast("ompt_lw_taskteam_t") /*lwt*/
1206 .access("ompt_team_info") // lwt->ompt_team_info
1207 .cast("ompt_team_info_t")
1208 .access("parallel_data") // lwt->ompt_team_info.parallel_data
1209 .cast("ompt_data_t");
1210 } else {
1211 dataValue = TValue(context, parallel_handle->th)
1212 .cast("kmp_base_team_t") /*t*/
1213 .access("ompt_team_info") // t->ompt_team_info
1214 .cast("ompt_team_info_t")
1215 .access("parallel_data") // t->ompt_team_info.parallel_data
1216 .cast("ompt_data_t");
1217 }
1218 return __ompd_get_tool_data(dataValue, value, ptr);
1219 }
1220
ompd_get_thread_data(ompd_thread_handle_t * thread_handle,ompd_word_t * value,ompd_address_t * ptr)1221 ompd_rc_t ompd_get_thread_data(ompd_thread_handle_t *thread_handle,
1222 ompd_word_t *value, ompd_address_t *ptr) {
1223 ompd_address_space_context_t *context = thread_handle->ah->context;
1224 if (!context)
1225 return ompd_rc_stale_handle;
1226 if (!callbacks) {
1227 return ompd_rc_callback_error;
1228 }
1229
1230 TValue dataValue =
1231 TValue(context, thread_handle->th)
1232 .cast("kmp_base_info_t") /*th*/
1233 .access("ompt_thread_info") // th->ompt_thread_info
1234 .cast("ompt_thread_info_t")
1235 .access("thread_data") // th->ompt_thread_info.thread_data
1236 .cast("ompt_data_t");
1237 return __ompd_get_tool_data(dataValue, value, ptr);
1238 }
1239
ompd_get_tool_data(void * handle,ompd_scope_t scope,ompd_word_t * value,ompd_address_t * ptr)1240 ompd_rc_t ompd_get_tool_data(void *handle, ompd_scope_t scope,
1241 ompd_word_t *value, ompd_address_t *ptr) {
1242 if (!handle) {
1243 return ompd_rc_stale_handle;
1244 }
1245
1246 ompd_device_t device_kind;
1247
1248 switch (scope) {
1249 case ompd_scope_thread:
1250 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1251 break;
1252 case ompd_scope_parallel:
1253 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1254 break;
1255 case ompd_scope_task:
1256 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1257 break;
1258 default:
1259 return ompd_rc_bad_input;
1260 }
1261
1262 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1263 switch (scope) {
1264 case ompd_scope_thread:
1265 return ompd_get_thread_data((ompd_thread_handle_t *)handle, value, ptr);
1266 case ompd_scope_parallel:
1267 return ompd_get_parallel_data((ompd_parallel_handle_t *)handle, value,
1268 ptr);
1269 case ompd_scope_task:
1270 return ompd_get_task_data((ompd_task_handle_t *)handle, value, ptr);
1271 default:
1272 return ompd_rc_unsupported;
1273 }
1274 }
1275 return ompd_rc_unsupported;
1276 }
1277