1 //****************************************************************************** 2 // include files 3 //****************************************************************************** 4 5 #include "kmp.h" 6 #include "ompt-internal.h" 7 #include "ompt-specific.h" 8 9 //****************************************************************************** 10 // macros 11 //****************************************************************************** 12 13 #define GTID_TO_OMPT_THREAD_ID(id) ((ompt_thread_id_t)(id >= 0) ? id + 1 : 0) 14 15 #define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info; 16 17 #define OMPT_THREAD_ID_BITS 16 18 19 // 2013 08 24 - John Mellor-Crummey 20 // ideally, a thread should assign its own ids based on thread private data. 21 // however, the way the intel runtime reinitializes thread data structures 22 // when it creates teams makes it difficult to maintain persistent thread 23 // data. using a shared variable instead is simple. I leave it to intel to 24 // sort out how to implement a higher performance version in their runtime. 25 26 // when using fetch_and_add to generate the IDs, there isn't any reason to waste 27 // bits for thread id. 28 #if 0 29 #define NEXT_ID(id_ptr, tid) \ 30 ((KMP_TEST_THEN_INC64(id_ptr) << OMPT_THREAD_ID_BITS) | (tid)) 31 #else 32 #define NEXT_ID(id_ptr, tid) (KMP_TEST_THEN_INC64((volatile kmp_int64 *)id_ptr)) 33 #endif 34 35 //****************************************************************************** 36 // private operations 37 //****************************************************************************** 38 39 //---------------------------------------------------------- 40 // traverse the team and task hierarchy 41 // note: __ompt_get_teaminfo and __ompt_get_taskinfo 42 // traverse the hierarchy similarly and need to be 43 // kept consistent 44 //---------------------------------------------------------- 45 46 ompt_team_info_t *__ompt_get_teaminfo(int depth, int *size) { 47 kmp_info_t *thr = ompt_get_thread(); 48 49 if (thr) { 50 kmp_team *team = thr->th.th_team; 51 if (team == NULL) 52 return NULL; 53 54 ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(team); 55 56 while (depth > 0) { 57 // next lightweight team (if any) 58 if (lwt) 59 lwt = lwt->parent; 60 61 // next heavyweight team (if any) after 62 // lightweight teams are exhausted 63 if (!lwt && team) { 64 team = team->t.t_parent; 65 if (team) { 66 lwt = LWT_FROM_TEAM(team); 67 } 68 } 69 70 depth--; 71 } 72 73 if (lwt) { 74 // lightweight teams have one task 75 if (size) 76 *size = 1; 77 78 // return team info for lightweight team 79 return &lwt->ompt_team_info; 80 } else if (team) { 81 // extract size from heavyweight team 82 if (size) 83 *size = team->t.t_nproc; 84 85 // return team info for heavyweight team 86 return &team->t.ompt_team_info; 87 } 88 } 89 90 return NULL; 91 } 92 93 ompt_task_info_t *__ompt_get_taskinfo(int depth) { 94 ompt_task_info_t *info = NULL; 95 kmp_info_t *thr = ompt_get_thread(); 96 97 if (thr) { 98 kmp_taskdata_t *taskdata = thr->th.th_current_task; 99 ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(taskdata->td_team); 100 101 while (depth > 0) { 102 // next lightweight team (if any) 103 if (lwt) 104 lwt = lwt->parent; 105 106 // next heavyweight team (if any) after 107 // lightweight teams are exhausted 108 if (!lwt && taskdata) { 109 taskdata = taskdata->td_parent; 110 if (taskdata) { 111 lwt = LWT_FROM_TEAM(taskdata->td_team); 112 } 113 } 114 depth--; 115 } 116 117 if (lwt) { 118 info = &lwt->ompt_task_info; 119 } else if (taskdata) { 120 info = &taskdata->ompt_task_info; 121 } 122 } 123 124 return info; 125 } 126 127 //****************************************************************************** 128 // interface operations 129 //****************************************************************************** 130 131 //---------------------------------------------------------- 132 // thread support 133 //---------------------------------------------------------- 134 135 ompt_parallel_id_t __ompt_thread_id_new() { 136 static uint64_t ompt_thread_id = 1; 137 return NEXT_ID(&ompt_thread_id, 0); 138 } 139 140 void __ompt_thread_begin(ompt_thread_type_t thread_type, int gtid) { 141 ompt_callbacks.ompt_callback(ompt_event_thread_begin)( 142 thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); 143 } 144 145 void __ompt_thread_end(ompt_thread_type_t thread_type, int gtid) { 146 ompt_callbacks.ompt_callback(ompt_event_thread_end)( 147 thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); 148 } 149 150 ompt_thread_id_t __ompt_get_thread_id_internal() { 151 // FIXME: until we have a better way of assigning ids, use __kmp_get_gtid 152 // since the return value might be negative, we need to test that before 153 // assigning it to an ompt_thread_id_t, which is unsigned. 154 int id = __kmp_get_gtid(); 155 assert(id >= 0); 156 157 return GTID_TO_OMPT_THREAD_ID(id); 158 } 159 160 //---------------------------------------------------------- 161 // state support 162 //---------------------------------------------------------- 163 164 void __ompt_thread_assign_wait_id(void *variable) { 165 int gtid = __kmp_gtid_get_specific(); 166 kmp_info_t *ti = ompt_get_thread_gtid(gtid); 167 168 ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)variable; 169 } 170 171 ompt_state_t __ompt_get_state_internal(ompt_wait_id_t *ompt_wait_id) { 172 kmp_info_t *ti = ompt_get_thread(); 173 174 if (ti) { 175 if (ompt_wait_id) 176 *ompt_wait_id = ti->th.ompt_thread_info.wait_id; 177 return ti->th.ompt_thread_info.state; 178 } 179 return ompt_state_undefined; 180 } 181 182 //---------------------------------------------------------- 183 // idle frame support 184 //---------------------------------------------------------- 185 186 void *__ompt_get_idle_frame_internal(void) { 187 kmp_info_t *ti = ompt_get_thread(); 188 return ti ? ti->th.ompt_thread_info.idle_frame : NULL; 189 } 190 191 //---------------------------------------------------------- 192 // parallel region support 193 //---------------------------------------------------------- 194 195 ompt_parallel_id_t __ompt_parallel_id_new(int gtid) { 196 static uint64_t ompt_parallel_id = 1; 197 return gtid >= 0 ? NEXT_ID(&ompt_parallel_id, gtid) : 0; 198 } 199 200 void *__ompt_get_parallel_function_internal(int depth) { 201 ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); 202 void *function = info ? info->microtask : NULL; 203 return function; 204 } 205 206 ompt_parallel_id_t __ompt_get_parallel_id_internal(int depth) { 207 ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); 208 ompt_parallel_id_t id = info ? info->parallel_id : 0; 209 return id; 210 } 211 212 int __ompt_get_parallel_team_size_internal(int depth) { 213 // initialize the return value with the error value. 214 // if there is a team at the specified depth, the default 215 // value will be overwritten the size of that team. 216 int size = -1; 217 (void)__ompt_get_teaminfo(depth, &size); 218 return size; 219 } 220 221 //---------------------------------------------------------- 222 // lightweight task team support 223 //---------------------------------------------------------- 224 225 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, int gtid, 226 void *microtask, ompt_parallel_id_t ompt_pid) { 227 lwt->ompt_team_info.parallel_id = ompt_pid; 228 lwt->ompt_team_info.microtask = microtask; 229 lwt->ompt_task_info.task_id = 0; 230 lwt->ompt_task_info.frame.reenter_runtime_frame = NULL; 231 lwt->ompt_task_info.frame.exit_runtime_frame = NULL; 232 lwt->ompt_task_info.function = NULL; 233 lwt->parent = 0; 234 } 235 236 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr) { 237 ompt_lw_taskteam_t *my_parent = thr->th.th_team->t.ompt_serialized_team_info; 238 lwt->parent = my_parent; 239 thr->th.th_team->t.ompt_serialized_team_info = lwt; 240 } 241 242 ompt_lw_taskteam_t *__ompt_lw_taskteam_unlink(kmp_info_t *thr) { 243 ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info; 244 if (lwtask) 245 thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent; 246 return lwtask; 247 } 248 249 //---------------------------------------------------------- 250 // task support 251 //---------------------------------------------------------- 252 253 ompt_task_id_t __ompt_task_id_new(int gtid) { 254 static uint64_t ompt_task_id = 1; 255 return NEXT_ID(&ompt_task_id, gtid); 256 } 257 258 ompt_task_id_t __ompt_get_task_id_internal(int depth) { 259 ompt_task_info_t *info = __ompt_get_taskinfo(depth); 260 ompt_task_id_t task_id = info ? info->task_id : 0; 261 return task_id; 262 } 263 264 void *__ompt_get_task_function_internal(int depth) { 265 ompt_task_info_t *info = __ompt_get_taskinfo(depth); 266 void *function = info ? info->function : NULL; 267 return function; 268 } 269 270 ompt_frame_t *__ompt_get_task_frame_internal(int depth) { 271 ompt_task_info_t *info = __ompt_get_taskinfo(depth); 272 ompt_frame_t *frame = info ? frame = &info->frame : NULL; 273 return frame; 274 } 275 276 //---------------------------------------------------------- 277 // team support 278 //---------------------------------------------------------- 279 280 void __ompt_team_assign_id(kmp_team_t *team, ompt_parallel_id_t ompt_pid) { 281 team->t.ompt_team_info.parallel_id = ompt_pid; 282 } 283