1 /*
2  * kmp_affinity.cpp -- affinity management
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22 
23 // Store the real or imagined machine hierarchy here
24 static hierarchy_info machine_hierarchy;
25 
26 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
27 
28 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
29   kmp_uint32 depth;
30   // The test below is true if affinity is available, but set to "none". Need to
31   // init on first use of hierarchical barrier.
32   if (TCR_1(machine_hierarchy.uninitialized))
33     machine_hierarchy.init(NULL, nproc);
34 
35   // Adjust the hierarchy in case num threads exceeds original
36   if (nproc > machine_hierarchy.base_num_threads)
37     machine_hierarchy.resize(nproc);
38 
39   depth = machine_hierarchy.depth;
40   KMP_DEBUG_ASSERT(depth > 0);
41 
42   thr_bar->depth = depth;
43   __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
44                      &(thr_bar->base_leaf_kids));
45   thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
46 }
47 
48 #if KMP_AFFINITY_SUPPORTED
49 
50 const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) {
51   switch (type) {
52   case KMP_HW_SOCKET:
53     return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
54   case KMP_HW_DIE:
55     return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
56   case KMP_HW_MODULE:
57     return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
58   case KMP_HW_TILE:
59     return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
60   case KMP_HW_NUMA:
61     return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
62   case KMP_HW_L3:
63     return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
64   case KMP_HW_L2:
65     return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
66   case KMP_HW_L1:
67     return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
68   case KMP_HW_CORE:
69     return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
70   case KMP_HW_THREAD:
71     return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
72   case KMP_HW_PROC_GROUP:
73     return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
74   }
75   return KMP_I18N_STR(Unknown);
76 }
77 
78 // This function removes the topology levels that are radix 1 and don't offer
79 // further information about the topology.  The most common example is when you
80 // have one thread context per core, we don't want the extra thread context
81 // level if it offers no unique labels.  So they are removed.
82 // return value: the new depth of address2os
83 static int __kmp_affinity_remove_radix_one_levels(AddrUnsPair *addrP, int nTh,
84                                                   int depth, kmp_hw_t *types) {
85   int preference[KMP_HW_LAST];
86   int top_index1, top_index2;
87   // Set up preference associative array
88   preference[KMP_HW_PROC_GROUP] = 110;
89   preference[KMP_HW_SOCKET] = 100;
90   preference[KMP_HW_CORE] = 95;
91   preference[KMP_HW_THREAD] = 90;
92   preference[KMP_HW_DIE] = 85;
93   preference[KMP_HW_NUMA] = 80;
94   preference[KMP_HW_TILE] = 75;
95   preference[KMP_HW_MODULE] = 73;
96   preference[KMP_HW_L3] = 70;
97   preference[KMP_HW_L2] = 65;
98   preference[KMP_HW_L1] = 60;
99   top_index1 = 0;
100   top_index2 = 1;
101   while (top_index1 < depth - 1 && top_index2 < depth) {
102     KMP_DEBUG_ASSERT(top_index1 >= 0 && top_index1 < depth);
103     KMP_DEBUG_ASSERT(top_index2 >= 0 && top_index2 < depth);
104     kmp_hw_t type1 = types[top_index1];
105     kmp_hw_t type2 = types[top_index2];
106     if (type1 == KMP_HW_SOCKET && type2 == KMP_HW_CORE) {
107       top_index1 = top_index2++;
108       continue;
109     }
110     bool radix1 = true;
111     bool all_same = true;
112     unsigned id1 = addrP[0].first.labels[top_index1];
113     unsigned id2 = addrP[0].first.labels[top_index2];
114     int pref1 = preference[type1];
115     int pref2 = preference[type2];
116     for (int hwidx = 1; hwidx < nTh; ++hwidx) {
117       if (addrP[hwidx].first.labels[top_index1] == id1 &&
118           addrP[hwidx].first.labels[top_index2] != id2) {
119         radix1 = false;
120         break;
121       }
122       if (addrP[hwidx].first.labels[top_index2] != id2)
123         all_same = false;
124       id1 = addrP[hwidx].first.labels[top_index1];
125       id2 = addrP[hwidx].first.labels[top_index2];
126     }
127     if (radix1) {
128       // Select the layer to remove based on preference
129       kmp_hw_t remove_type, keep_type;
130       int remove_layer, remove_layer_ids;
131       if (pref1 > pref2) {
132         remove_type = type2;
133         remove_layer = remove_layer_ids = top_index2;
134         keep_type = type1;
135       } else {
136         remove_type = type1;
137         remove_layer = remove_layer_ids = top_index1;
138         keep_type = type2;
139       }
140       // If all the indexes for the second (deeper) layer are the same.
141       // e.g., all are zero, then make sure to keep the first layer's ids
142       if (all_same)
143         remove_layer_ids = top_index2;
144       // Remove radix one type by setting the equivalence, removing the id from
145       // the hw threads and removing the layer from types and depth
146       for (int idx = 0; idx < nTh; ++idx) {
147         Address &hw_thread = addrP[idx].first;
148         for (int d = remove_layer_ids; d < depth - 1; ++d)
149           hw_thread.labels[d] = hw_thread.labels[d + 1];
150         hw_thread.depth--;
151       }
152       for (int idx = remove_layer; idx < depth - 1; ++idx)
153         types[idx] = types[idx + 1];
154       depth--;
155     } else {
156       top_index1 = top_index2++;
157     }
158   }
159   KMP_ASSERT(depth > 0);
160   return depth;
161 }
162 // Gather the count of each topology layer and the ratio
163 // ratio contains the number of types[i] / types[i+1] and so forth
164 // count contains the absolute number of types[i]
165 static void __kmp_affinity_gather_enumeration_information(AddrUnsPair *addrP,
166                                                           int nTh, int depth,
167                                                           kmp_hw_t *types,
168                                                           int *ratio,
169                                                           int *count) {
170   int previous_id[KMP_HW_LAST];
171   int max[KMP_HW_LAST];
172 
173   for (int i = 0; i < depth; ++i) {
174     previous_id[i] = -1;
175     max[i] = 0;
176     count[i] = 0;
177     ratio[i] = 0;
178   }
179   for (int i = 0; i < nTh; ++i) {
180     Address &hw_thread = addrP[i].first;
181     for (int layer = 0; layer < depth; ++layer) {
182       int id = hw_thread.labels[layer];
183       if (id != previous_id[layer]) {
184         // Add an additional increment to each count
185         for (int l = layer; l < depth; ++l)
186           count[l]++;
187         // Keep track of topology layer ratio statistics
188         max[layer]++;
189         for (int l = layer + 1; l < depth; ++l) {
190           if (max[l] > ratio[l])
191             ratio[l] = max[l];
192           max[l] = 1;
193         }
194         break;
195       }
196     }
197     for (int layer = 0; layer < depth; ++layer) {
198       previous_id[layer] = hw_thread.labels[layer];
199     }
200   }
201   for (int layer = 0; layer < depth; ++layer) {
202     if (max[layer] > ratio[layer])
203       ratio[layer] = max[layer];
204   }
205 }
206 
207 // Find out if the topology is uniform
208 static bool __kmp_affinity_discover_uniformity(int depth, int *ratio,
209                                                int *count) {
210   int num = 1;
211   for (int level = 0; level < depth; ++level)
212     num *= ratio[level];
213   return (num == count[depth - 1]);
214 }
215 
216 // calculate the number of X's per Y
217 static inline int __kmp_affinity_calculate_ratio(int *ratio, int deep_level,
218                                                  int shallow_level) {
219   int retval = 1;
220   if (deep_level < 0 || shallow_level < 0)
221     return retval;
222   for (int level = deep_level; level > shallow_level; --level)
223     retval *= ratio[level];
224   return retval;
225 }
226 
227 static void __kmp_affinity_print_topology(AddrUnsPair *addrP, int len,
228                                           int depth, kmp_hw_t *types) {
229   int proc;
230   kmp_str_buf_t buf;
231   __kmp_str_buf_init(&buf);
232   KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
233   for (proc = 0; proc < len; proc++) {
234     for (int i = 0; i < depth; ++i) {
235       __kmp_str_buf_print(&buf, "%s %d ", __kmp_hw_get_catalog_string(types[i]),
236                           addrP[proc].first.labels[i]);
237     }
238     KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", addrP[proc].second, buf.str);
239     __kmp_str_buf_clear(&buf);
240   }
241   __kmp_str_buf_free(&buf);
242 }
243 
244 // Print out the detailed machine topology map, i.e. the physical locations
245 // of each OS proc.
246 static void __kmp_affinity_print_topology(AddrUnsPair *address2os, int len,
247                                           int depth, int pkgLevel,
248                                           int coreLevel, int threadLevel) {
249   int proc;
250 
251   KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
252   for (proc = 0; proc < len; proc++) {
253     int level;
254     kmp_str_buf_t buf;
255     __kmp_str_buf_init(&buf);
256     for (level = 0; level < depth; level++) {
257       if (level == threadLevel) {
258         __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
259       } else if (level == coreLevel) {
260         __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
261       } else if (level == pkgLevel) {
262         __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
263       } else if (level > pkgLevel) {
264         __kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
265                             level - pkgLevel - 1);
266       } else {
267         __kmp_str_buf_print(&buf, "L%d ", level);
268       }
269       __kmp_str_buf_print(&buf, "%d ", address2os[proc].first.labels[level]);
270     }
271     KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
272                buf.str);
273     __kmp_str_buf_free(&buf);
274   }
275 }
276 
277 bool KMPAffinity::picked_api = false;
278 
279 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
280 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
281 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
282 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
283 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
284 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
285 
286 void KMPAffinity::pick_api() {
287   KMPAffinity *affinity_dispatch;
288   if (picked_api)
289     return;
290 #if KMP_USE_HWLOC
291   // Only use Hwloc if affinity isn't explicitly disabled and
292   // user requests Hwloc topology method
293   if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
294       __kmp_affinity_type != affinity_disabled) {
295     affinity_dispatch = new KMPHwlocAffinity();
296   } else
297 #endif
298   {
299     affinity_dispatch = new KMPNativeAffinity();
300   }
301   __kmp_affinity_dispatch = affinity_dispatch;
302   picked_api = true;
303 }
304 
305 void KMPAffinity::destroy_api() {
306   if (__kmp_affinity_dispatch != NULL) {
307     delete __kmp_affinity_dispatch;
308     __kmp_affinity_dispatch = NULL;
309     picked_api = false;
310   }
311 }
312 
313 #define KMP_ADVANCE_SCAN(scan)                                                 \
314   while (*scan != '\0') {                                                      \
315     scan++;                                                                    \
316   }
317 
318 // Print the affinity mask to the character array in a pretty format.
319 // The format is a comma separated list of non-negative integers or integer
320 // ranges: e.g., 1,2,3-5,7,9-15
321 // The format can also be the string "{<empty>}" if no bits are set in mask
322 char *__kmp_affinity_print_mask(char *buf, int buf_len,
323                                 kmp_affin_mask_t *mask) {
324   int start = 0, finish = 0, previous = 0;
325   bool first_range;
326   KMP_ASSERT(buf);
327   KMP_ASSERT(buf_len >= 40);
328   KMP_ASSERT(mask);
329   char *scan = buf;
330   char *end = buf + buf_len - 1;
331 
332   // Check for empty set.
333   if (mask->begin() == mask->end()) {
334     KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
335     KMP_ADVANCE_SCAN(scan);
336     KMP_ASSERT(scan <= end);
337     return buf;
338   }
339 
340   first_range = true;
341   start = mask->begin();
342   while (1) {
343     // Find next range
344     // [start, previous] is inclusive range of contiguous bits in mask
345     for (finish = mask->next(start), previous = start;
346          finish == previous + 1 && finish != mask->end();
347          finish = mask->next(finish)) {
348       previous = finish;
349     }
350 
351     // The first range does not need a comma printed before it, but the rest
352     // of the ranges do need a comma beforehand
353     if (!first_range) {
354       KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
355       KMP_ADVANCE_SCAN(scan);
356     } else {
357       first_range = false;
358     }
359     // Range with three or more contiguous bits in the affinity mask
360     if (previous - start > 1) {
361       KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
362     } else {
363       // Range with one or two contiguous bits in the affinity mask
364       KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
365       KMP_ADVANCE_SCAN(scan);
366       if (previous - start > 0) {
367         KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
368       }
369     }
370     KMP_ADVANCE_SCAN(scan);
371     // Start over with new start point
372     start = finish;
373     if (start == mask->end())
374       break;
375     // Check for overflow
376     if (end - scan < 2)
377       break;
378   }
379 
380   // Check for overflow
381   KMP_ASSERT(scan <= end);
382   return buf;
383 }
384 #undef KMP_ADVANCE_SCAN
385 
386 // Print the affinity mask to the string buffer object in a pretty format
387 // The format is a comma separated list of non-negative integers or integer
388 // ranges: e.g., 1,2,3-5,7,9-15
389 // The format can also be the string "{<empty>}" if no bits are set in mask
390 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
391                                            kmp_affin_mask_t *mask) {
392   int start = 0, finish = 0, previous = 0;
393   bool first_range;
394   KMP_ASSERT(buf);
395   KMP_ASSERT(mask);
396 
397   __kmp_str_buf_clear(buf);
398 
399   // Check for empty set.
400   if (mask->begin() == mask->end()) {
401     __kmp_str_buf_print(buf, "%s", "{<empty>}");
402     return buf;
403   }
404 
405   first_range = true;
406   start = mask->begin();
407   while (1) {
408     // Find next range
409     // [start, previous] is inclusive range of contiguous bits in mask
410     for (finish = mask->next(start), previous = start;
411          finish == previous + 1 && finish != mask->end();
412          finish = mask->next(finish)) {
413       previous = finish;
414     }
415 
416     // The first range does not need a comma printed before it, but the rest
417     // of the ranges do need a comma beforehand
418     if (!first_range) {
419       __kmp_str_buf_print(buf, "%s", ",");
420     } else {
421       first_range = false;
422     }
423     // Range with three or more contiguous bits in the affinity mask
424     if (previous - start > 1) {
425       __kmp_str_buf_print(buf, "%u-%u", start, previous);
426     } else {
427       // Range with one or two contiguous bits in the affinity mask
428       __kmp_str_buf_print(buf, "%u", start);
429       if (previous - start > 0) {
430         __kmp_str_buf_print(buf, ",%u", previous);
431       }
432     }
433     // Start over with new start point
434     start = finish;
435     if (start == mask->end())
436       break;
437   }
438   return buf;
439 }
440 
441 void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
442   KMP_CPU_ZERO(mask);
443 
444 #if KMP_GROUP_AFFINITY
445 
446   if (__kmp_num_proc_groups > 1) {
447     int group;
448     KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
449     for (group = 0; group < __kmp_num_proc_groups; group++) {
450       int i;
451       int num = __kmp_GetActiveProcessorCount(group);
452       for (i = 0; i < num; i++) {
453         KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
454       }
455     }
456   } else
457 
458 #endif /* KMP_GROUP_AFFINITY */
459 
460   {
461     int proc;
462     for (proc = 0; proc < __kmp_xproc; proc++) {
463       KMP_CPU_SET(proc, mask);
464     }
465   }
466 }
467 
468 // When sorting by labels, __kmp_affinity_assign_child_nums() must first be
469 // called to renumber the labels from [0..n] and place them into the child_num
470 // vector of the address object.  This is done in case the labels used for
471 // the children at one node of the hierarchy differ from those used for
472 // another node at the same level.  Example:  suppose the machine has 2 nodes
473 // with 2 packages each.  The first node contains packages 601 and 602, and
474 // second node contains packages 603 and 604.  If we try to sort the table
475 // for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
476 // because we are paying attention to the labels themselves, not the ordinal
477 // child numbers.  By using the child numbers in the sort, the result is
478 // {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
479 static void __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
480                                              int numAddrs) {
481   KMP_DEBUG_ASSERT(numAddrs > 0);
482   int depth = address2os->first.depth;
483   unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
484   unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
485   int labCt;
486   for (labCt = 0; labCt < depth; labCt++) {
487     address2os[0].first.childNums[labCt] = counts[labCt] = 0;
488     lastLabel[labCt] = address2os[0].first.labels[labCt];
489   }
490   int i;
491   for (i = 1; i < numAddrs; i++) {
492     for (labCt = 0; labCt < depth; labCt++) {
493       if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
494         int labCt2;
495         for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
496           counts[labCt2] = 0;
497           lastLabel[labCt2] = address2os[i].first.labels[labCt2];
498         }
499         counts[labCt]++;
500         lastLabel[labCt] = address2os[i].first.labels[labCt];
501         break;
502       }
503     }
504     for (labCt = 0; labCt < depth; labCt++) {
505       address2os[i].first.childNums[labCt] = counts[labCt];
506     }
507     for (; labCt < (int)Address::maxDepth; labCt++) {
508       address2os[i].first.childNums[labCt] = 0;
509     }
510   }
511   __kmp_free(lastLabel);
512   __kmp_free(counts);
513 }
514 
515 // All of the __kmp_affinity_create_*_map() routines should set
516 // __kmp_affinity_masks to a vector of affinity mask objects of length
517 // __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and return
518 // the number of levels in the machine topology tree (zero if
519 // __kmp_affinity_type == affinity_none).
520 //
521 // All of the __kmp_affinity_create_*_map() routines should set
522 // *__kmp_affin_fullMask to the affinity mask for the initialization thread.
523 // They need to save and restore the mask, and it could be needed later, so
524 // saving it is just an optimization to avoid calling kmp_get_system_affinity()
525 // again.
526 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
527 
528 static int nCoresPerPkg, nPackages;
529 static int __kmp_nThreadsPerCore;
530 #ifndef KMP_DFLT_NTH_CORES
531 static int __kmp_ncores;
532 #endif
533 static int *__kmp_pu_os_idx = NULL;
534 static int nDiesPerPkg = 1;
535 
536 // __kmp_affinity_uniform_topology() doesn't work when called from
537 // places which support arbitrarily many levels in the machine topology
538 // map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
539 // __kmp_affinity_create_x2apicid_map().
540 inline static bool __kmp_affinity_uniform_topology() {
541   return __kmp_avail_proc ==
542          (__kmp_nThreadsPerCore * nCoresPerPkg * nDiesPerPkg * nPackages);
543 }
544 
545 #if KMP_USE_HWLOC
546 
547 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
548 #if HWLOC_API_VERSION >= 0x00020000
549   return hwloc_obj_type_is_cache(obj->type);
550 #else
551   return obj->type == HWLOC_OBJ_CACHE;
552 #endif
553 }
554 
555 // Returns KMP_HW_* type derived from HWLOC_* type
556 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
557 
558   if (__kmp_hwloc_is_cache_type(obj)) {
559     if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
560       return KMP_HW_UNKNOWN;
561     switch (obj->attr->cache.depth) {
562     case 1:
563       return KMP_HW_L1;
564     case 2:
565 #if KMP_MIC_SUPPORTED
566       if (__kmp_mic_type == mic3) {
567         return KMP_HW_TILE;
568       }
569 #endif
570       return KMP_HW_L2;
571     case 3:
572       return KMP_HW_L3;
573     }
574     return KMP_HW_UNKNOWN;
575   }
576 
577   switch (obj->type) {
578   case HWLOC_OBJ_PACKAGE:
579     return KMP_HW_SOCKET;
580   case HWLOC_OBJ_NUMANODE:
581     return KMP_HW_NUMA;
582   case HWLOC_OBJ_CORE:
583     return KMP_HW_CORE;
584   case HWLOC_OBJ_PU:
585     return KMP_HW_THREAD;
586   }
587   return KMP_HW_UNKNOWN;
588 }
589 
590 // Returns the number of objects of type 'type' below 'obj' within the topology
591 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
592 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
593 // object.
594 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
595                                            hwloc_obj_type_t type) {
596   int retval = 0;
597   hwloc_obj_t first;
598   for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
599                                            obj->logical_index, type, 0);
600        first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
601                                                        obj->type, first) == obj;
602        first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
603                                           first)) {
604     ++retval;
605   }
606   return retval;
607 }
608 
609 static int __kmp_hwloc_count_children_by_depth(hwloc_topology_t t,
610                                                hwloc_obj_t o,
611                                                kmp_hwloc_depth_t depth,
612                                                hwloc_obj_t *f) {
613   if (o->depth == depth) {
614     if (*f == NULL)
615       *f = o; // output first descendant found
616     return 1;
617   }
618   int sum = 0;
619   for (unsigned i = 0; i < o->arity; i++)
620     sum += __kmp_hwloc_count_children_by_depth(t, o->children[i], depth, f);
621   return sum; // will be 0 if no one found (as PU arity is 0)
622 }
623 
624 static int __kmp_hwloc_count_children_by_type(hwloc_topology_t t, hwloc_obj_t o,
625                                               hwloc_obj_type_t type,
626                                               hwloc_obj_t *f) {
627   if (!hwloc_compare_types(o->type, type)) {
628     if (*f == NULL)
629       *f = o; // output first descendant found
630     return 1;
631   }
632   int sum = 0;
633   for (unsigned i = 0; i < o->arity; i++)
634     sum += __kmp_hwloc_count_children_by_type(t, o->children[i], type, f);
635   return sum; // will be 0 if no one found (as PU arity is 0)
636 }
637 
638 // This gets the sub_id for a lower object under a higher object in the
639 // topology tree
640 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
641                                   hwloc_obj_t lower) {
642   hwloc_obj_t obj;
643   hwloc_obj_type_t ltype = lower->type;
644   int lindex = lower->logical_index - 1;
645   int sub_id = 0;
646   // Get the previous lower object
647   obj = hwloc_get_obj_by_type(t, ltype, lindex);
648   while (obj && lindex >= 0 &&
649          hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
650     if (obj->userdata) {
651       sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
652       break;
653     }
654     sub_id++;
655     lindex--;
656     obj = hwloc_get_obj_by_type(t, ltype, lindex);
657   }
658   // store sub_id + 1 so that 0 is differed from NULL
659   lower->userdata = RCAST(void *, sub_id + 1);
660   return sub_id;
661 }
662 
663 static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
664                                            kmp_i18n_id_t *const msg_id) {
665   kmp_hw_t type;
666   int hw_thread_index, sub_id, nActiveThreads;
667   int depth;
668   hwloc_obj_t pu, obj, root, prev;
669   int ratio[KMP_HW_LAST];
670   int count[KMP_HW_LAST];
671   kmp_hw_t types[KMP_HW_LAST];
672 
673   hwloc_topology_t tp = __kmp_hwloc_topology;
674   *msg_id = kmp_i18n_null;
675 
676   // Save the affinity mask for the current thread.
677   kmp_affin_mask_t *oldMask;
678   KMP_CPU_ALLOC(oldMask);
679   __kmp_get_system_affinity(oldMask, TRUE);
680 
681   if (!KMP_AFFINITY_CAPABLE()) {
682     // Hack to try and infer the machine topology using only the data
683     // available from cpuid on the current thread, and __kmp_xproc.
684     KMP_ASSERT(__kmp_affinity_type == affinity_none);
685     // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
686     hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
687     if (o != NULL)
688       nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
689     else
690       nCoresPerPkg = 1; // no PACKAGE found
691     o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
692     if (o != NULL)
693       __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
694     else
695       __kmp_nThreadsPerCore = 1; // no CORE found
696     __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
697     if (nCoresPerPkg == 0)
698       nCoresPerPkg = 1; // to prevent possible division by 0
699     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
700     if (__kmp_affinity_verbose) {
701       KMP_INFORM(AffNotUsingHwloc, "KMP_AFFINITY");
702       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
703       if (__kmp_affinity_uniform_topology()) {
704         KMP_INFORM(Uniform, "KMP_AFFINITY");
705       } else {
706         KMP_INFORM(NonUniform, "KMP_AFFINITY");
707       }
708       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
709                  __kmp_nThreadsPerCore, __kmp_ncores);
710     }
711     KMP_CPU_FREE(oldMask);
712     return 0;
713   }
714 
715   root = hwloc_get_root_obj(tp);
716 
717   // Figure out the depth and types in the topology
718   depth = 0;
719   pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
720   obj = pu;
721   types[depth] = KMP_HW_THREAD;
722   depth++;
723   while (obj != root && obj != NULL) {
724     obj = obj->parent;
725 #if HWLOC_API_VERSION >= 0x00020000
726     if (obj->memory_arity) {
727       hwloc_obj_t memory;
728       for (memory = obj->memory_first_child; memory;
729            memory = hwloc_get_next_child(tp, obj, memory)) {
730         if (memory->type == HWLOC_OBJ_NUMANODE)
731           break;
732       }
733       if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
734         types[depth] = KMP_HW_NUMA;
735         depth++;
736       }
737     }
738 #endif
739     type = __kmp_hwloc_type_2_topology_type(obj);
740     if (type != KMP_HW_UNKNOWN) {
741       types[depth] = type;
742       depth++;
743     }
744   }
745   KMP_ASSERT(depth > 0 && depth <= KMP_HW_LAST);
746 
747   // Get the order for the types correct
748   for (int i = 0, j = depth - 1; i < j; ++i, --j) {
749     kmp_hw_t temp = types[i];
750     types[i] = types[j];
751     types[j] = temp;
752   }
753 
754   // Allocate the data structure to be returned.
755   AddrUnsPair *retval =
756       (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
757   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
758   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
759 
760   hw_thread_index = 0;
761   pu = NULL;
762   nActiveThreads = 0;
763   while (pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu)) {
764     int index = depth - 1;
765     bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
766     Address hw_thread(depth);
767     if (included) {
768       hw_thread.labels[index] = pu->logical_index;
769       __kmp_pu_os_idx[hw_thread_index] = pu->os_index;
770       index--;
771       nActiveThreads++;
772     }
773     obj = pu;
774     prev = obj;
775     while (obj != root && obj != NULL) {
776       obj = obj->parent;
777 #if HWLOC_API_VERSION >= 0x00020000
778       // NUMA Nodes are handled differently since they are not within the
779       // parent/child structure anymore.  They are separate children
780       // of obj (memory_first_child points to first memory child)
781       if (obj->memory_arity) {
782         hwloc_obj_t memory;
783         for (memory = obj->memory_first_child; memory;
784              memory = hwloc_get_next_child(tp, obj, memory)) {
785           if (memory->type == HWLOC_OBJ_NUMANODE)
786             break;
787         }
788         if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
789           sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
790           if (included) {
791             hw_thread.labels[index] = memory->logical_index;
792             hw_thread.labels[index + 1] = sub_id;
793             index--;
794           }
795           prev = memory;
796         }
797       }
798 #endif
799       type = __kmp_hwloc_type_2_topology_type(obj);
800       if (type != KMP_HW_UNKNOWN) {
801         sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
802         if (included) {
803           hw_thread.labels[index] = obj->logical_index;
804           hw_thread.labels[index + 1] = sub_id;
805           index--;
806         }
807         prev = obj;
808       }
809     }
810     if (included) {
811       retval[hw_thread_index] = AddrUnsPair(hw_thread, pu->os_index);
812       hw_thread_index++;
813     }
814   }
815 
816   // If there's only one thread context to bind to, return now.
817   KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
818   KMP_ASSERT(nActiveThreads > 0);
819   if (nActiveThreads == 1) {
820     __kmp_ncores = nPackages = 1;
821     __kmp_nThreadsPerCore = nCoresPerPkg = 1;
822     if (__kmp_affinity_verbose) {
823       KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
824       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
825       KMP_INFORM(Uniform, "KMP_AFFINITY");
826       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
827                  __kmp_nThreadsPerCore, __kmp_ncores);
828     }
829 
830     if (__kmp_affinity_type == affinity_none) {
831       __kmp_free(retval);
832       KMP_CPU_FREE(oldMask);
833       return 0;
834     }
835 
836     // Form an Address object which only includes the package level.
837     Address addr(1);
838     addr.labels[0] = retval[0].first.labels[0];
839     retval[0].first = addr;
840 
841     if (__kmp_affinity_gran_levels < 0) {
842       __kmp_affinity_gran_levels = 0;
843     }
844 
845     if (__kmp_affinity_verbose) {
846       __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
847     }
848 
849     *address2os = retval;
850     KMP_CPU_FREE(oldMask);
851     return 1;
852   }
853 
854   // Sort the table by physical Id.
855   qsort(retval, nActiveThreads, sizeof(*retval),
856         __kmp_affinity_cmp_Address_labels);
857 
858   // Find any levels with radiix 1, and remove them from the map
859   // (except for the package level).
860   depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
861                                                  types);
862 
863   __kmp_affinity_gather_enumeration_information(retval, nActiveThreads, depth,
864                                                 types, ratio, count);
865 
866   for (int level = 0; level < depth; ++level) {
867     if ((types[level] == KMP_HW_L2 || types[level] == KMP_HW_L3))
868       __kmp_tile_depth = level;
869   }
870 
871   // This routine should set __kmp_ncores, as well as
872   // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
873   int thread_level, core_level, tile_level, numa_level, socket_level;
874   thread_level = core_level = tile_level = numa_level = socket_level = -1;
875   for (int level = 0; level < depth; ++level) {
876     if (types[level] == KMP_HW_THREAD)
877       thread_level = level;
878     else if (types[level] == KMP_HW_CORE)
879       core_level = level;
880     else if (types[level] == KMP_HW_SOCKET)
881       socket_level = level;
882     else if (types[level] == KMP_HW_TILE)
883       tile_level = level;
884     else if (types[level] == KMP_HW_NUMA)
885       numa_level = level;
886   }
887   __kmp_nThreadsPerCore =
888       __kmp_affinity_calculate_ratio(ratio, thread_level, core_level);
889   nCoresPerPkg =
890       __kmp_affinity_calculate_ratio(ratio, core_level, socket_level);
891   if (socket_level >= 0)
892     nPackages = count[socket_level];
893   else
894     nPackages = 1;
895   if (core_level >= 0)
896     __kmp_ncores = count[core_level];
897   else
898     __kmp_ncores = 1;
899 
900   unsigned uniform = __kmp_affinity_discover_uniformity(depth, ratio, count);
901 
902   // Print the machine topology summary.
903   if (__kmp_affinity_verbose) {
904     kmp_hw_t numerator_type, denominator_type;
905     kmp_str_buf_t buf;
906     __kmp_str_buf_init(&buf);
907     KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
908     if (uniform) {
909       KMP_INFORM(Uniform, "KMP_AFFINITY");
910     } else {
911       KMP_INFORM(NonUniform, "KMP_AFFINITY");
912     }
913 
914     __kmp_str_buf_clear(&buf);
915 
916     if (core_level < 0)
917       core_level = depth - 1;
918     int ncores = count[core_level];
919 
920     denominator_type = KMP_HW_UNKNOWN;
921     for (int level = 0; level < depth; ++level) {
922       int c;
923       bool plural;
924       numerator_type = types[level];
925       c = ratio[level];
926       plural = (c > 1);
927       if (level == 0) {
928         __kmp_str_buf_print(
929             &buf, "%d %s", c,
930             __kmp_hw_get_catalog_string(numerator_type, plural));
931       } else {
932         __kmp_str_buf_print(&buf, " x %d %s/%s", c,
933                             __kmp_hw_get_catalog_string(numerator_type, plural),
934                             __kmp_hw_get_catalog_string(denominator_type));
935       }
936       denominator_type = numerator_type;
937     }
938     KMP_INFORM(TopologyGeneric, "KMP_AFFINITY", buf.str, ncores);
939     __kmp_str_buf_free(&buf);
940   }
941 
942   if (__kmp_affinity_type == affinity_none) {
943     __kmp_free(retval);
944     KMP_CPU_FREE(oldMask);
945     return 0;
946   }
947 
948   // Set the granularity level based on what levels are modeled
949   // in the machine topology map.
950   if (__kmp_affinity_gran == affinity_gran_node)
951     __kmp_affinity_gran = affinity_gran_numa;
952   KMP_DEBUG_ASSERT(__kmp_affinity_gran != affinity_gran_default);
953   if (__kmp_affinity_gran_levels < 0) {
954     __kmp_affinity_gran_levels = 0; // lowest level (e.g. fine)
955     if ((thread_level >= 0) && (__kmp_affinity_gran > affinity_gran_thread))
956       __kmp_affinity_gran_levels++;
957     if ((core_level >= 0) && (__kmp_affinity_gran > affinity_gran_core))
958       __kmp_affinity_gran_levels++;
959     if ((tile_level >= 0) && (__kmp_affinity_gran > affinity_gran_tile))
960       __kmp_affinity_gran_levels++;
961     if ((numa_level >= 0) && (__kmp_affinity_gran > affinity_gran_numa))
962       __kmp_affinity_gran_levels++;
963     if ((socket_level >= 0) && (__kmp_affinity_gran > affinity_gran_package))
964       __kmp_affinity_gran_levels++;
965   }
966 
967   if (__kmp_affinity_verbose)
968     __kmp_affinity_print_topology(retval, nActiveThreads, depth, types);
969 
970   KMP_CPU_FREE(oldMask);
971   *address2os = retval;
972   return depth;
973 }
974 #endif // KMP_USE_HWLOC
975 
976 // If we don't know how to retrieve the machine's processor topology, or
977 // encounter an error in doing so, this routine is called to form a "flat"
978 // mapping of os thread id's <-> processor id's.
979 static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
980                                           kmp_i18n_id_t *const msg_id) {
981   *address2os = NULL;
982   *msg_id = kmp_i18n_null;
983 
984   // Even if __kmp_affinity_type == affinity_none, this routine might still
985   // called to set __kmp_ncores, as well as
986   // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
987   if (!KMP_AFFINITY_CAPABLE()) {
988     KMP_ASSERT(__kmp_affinity_type == affinity_none);
989     __kmp_ncores = nPackages = __kmp_xproc;
990     __kmp_nThreadsPerCore = nCoresPerPkg = 1;
991     if (__kmp_affinity_verbose) {
992       KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
993       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
994       KMP_INFORM(Uniform, "KMP_AFFINITY");
995       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
996                  __kmp_nThreadsPerCore, __kmp_ncores);
997     }
998     return 0;
999   }
1000 
1001   // When affinity is off, this routine will still be called to set
1002   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1003   // Make sure all these vars are set correctly, and return now if affinity is
1004   // not enabled.
1005   __kmp_ncores = nPackages = __kmp_avail_proc;
1006   __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1007   if (__kmp_affinity_verbose) {
1008     KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
1009     KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1010     KMP_INFORM(Uniform, "KMP_AFFINITY");
1011     KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1012                __kmp_nThreadsPerCore, __kmp_ncores);
1013   }
1014   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1015   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1016   if (__kmp_affinity_type == affinity_none) {
1017     int avail_ct = 0;
1018     int i;
1019     KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1020       if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask))
1021         continue;
1022       __kmp_pu_os_idx[avail_ct++] = i; // suppose indices are flat
1023     }
1024     return 0;
1025   }
1026 
1027   // Construct the data structure to be returned.
1028   *address2os =
1029       (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
1030   int avail_ct = 0;
1031   int i;
1032   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1033     // Skip this proc if it is not included in the machine model.
1034     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1035       continue;
1036     }
1037     __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
1038     Address addr(1);
1039     addr.labels[0] = i;
1040     (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
1041   }
1042   if (__kmp_affinity_verbose) {
1043     KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
1044   }
1045 
1046   if (__kmp_affinity_gran_levels < 0) {
1047     // Only the package level is modeled in the machine topology map,
1048     // so the #levels of granularity is either 0 or 1.
1049     if (__kmp_affinity_gran > affinity_gran_package) {
1050       __kmp_affinity_gran_levels = 1;
1051     } else {
1052       __kmp_affinity_gran_levels = 0;
1053     }
1054   }
1055   return 1;
1056 }
1057 
1058 #if KMP_GROUP_AFFINITY
1059 
1060 // If multiple Windows* OS processor groups exist, we can create a 2-level
1061 // topology map with the groups at level 0 and the individual procs at level 1.
1062 // This facilitates letting the threads float among all procs in a group,
1063 // if granularity=group (the default when there are multiple groups).
1064 static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
1065                                                 kmp_i18n_id_t *const msg_id) {
1066   *address2os = NULL;
1067   *msg_id = kmp_i18n_null;
1068 
1069   // If we aren't affinity capable, then return now.
1070   // The flat mapping will be used.
1071   if (!KMP_AFFINITY_CAPABLE()) {
1072     // FIXME set *msg_id
1073     return -1;
1074   }
1075 
1076   // Construct the data structure to be returned.
1077   *address2os =
1078       (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
1079   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1080   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1081   int avail_ct = 0;
1082   int i;
1083   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1084     // Skip this proc if it is not included in the machine model.
1085     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1086       continue;
1087     }
1088     __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
1089     Address addr(2);
1090     addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
1091     addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
1092     (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
1093 
1094     if (__kmp_affinity_verbose) {
1095       KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
1096                  addr.labels[1]);
1097     }
1098   }
1099 
1100   if (__kmp_affinity_gran_levels < 0) {
1101     if (__kmp_affinity_gran == affinity_gran_group) {
1102       __kmp_affinity_gran_levels = 1;
1103     } else if ((__kmp_affinity_gran == affinity_gran_fine) ||
1104                (__kmp_affinity_gran == affinity_gran_thread)) {
1105       __kmp_affinity_gran_levels = 0;
1106     } else {
1107       const char *gran_str = NULL;
1108       if (__kmp_affinity_gran == affinity_gran_core) {
1109         gran_str = "core";
1110       } else if (__kmp_affinity_gran == affinity_gran_package) {
1111         gran_str = "package";
1112       } else if (__kmp_affinity_gran == affinity_gran_node) {
1113         gran_str = "node";
1114       } else {
1115         KMP_ASSERT(0);
1116       }
1117 
1118       // Warning: can't use affinity granularity \"gran\" with group topology
1119       // method, using "thread"
1120       __kmp_affinity_gran_levels = 0;
1121     }
1122   }
1123   return 2;
1124 }
1125 
1126 #endif /* KMP_GROUP_AFFINITY */
1127 
1128 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1129 
1130 /*
1131  * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
1132     Bits            Bits            Bits           Bits
1133     31-16           15-8            7-4            4-0
1134 ---+-----------+--------------+-------------+-----------------+
1135 EAX| reserved  |   reserved   |   reserved  |  Bits to Shift  |
1136 ---+-----------|--------------+-------------+-----------------|
1137 EBX| reserved  | Num logical processors at level (16 bits)    |
1138 ---+-----------|--------------+-------------------------------|
1139 ECX| reserved  |   Level Type |      Level Number (8 bits)    |
1140 ---+-----------+--------------+-------------------------------|
1141 EDX|                    X2APIC ID (32 bits)                   |
1142 ---+----------------------------------------------------------+
1143 */
1144 
1145 enum {
1146   INTEL_LEVEL_TYPE_INVALID = 0, // Package level
1147   INTEL_LEVEL_TYPE_SMT = 1,
1148   INTEL_LEVEL_TYPE_CORE = 2,
1149   INTEL_LEVEL_TYPE_TILE = 3,
1150   INTEL_LEVEL_TYPE_MODULE = 4,
1151   INTEL_LEVEL_TYPE_DIE = 5,
1152   INTEL_LEVEL_TYPE_LAST = 6,
1153 };
1154 
1155 struct cpuid_level_info_t {
1156   unsigned level_type, mask, mask_width, nitems, cache_mask;
1157 };
1158 
1159 template <kmp_uint32 LSB, kmp_uint32 MSB>
1160 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1161   const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB;
1162   const kmp_uint32 SHIFT_RIGHT = LSB;
1163   kmp_uint32 retval = v;
1164   retval <<= SHIFT_LEFT;
1165   retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1166   return retval;
1167 }
1168 
1169 static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) {
1170   switch (intel_type) {
1171   case INTEL_LEVEL_TYPE_INVALID:
1172     return KMP_HW_SOCKET;
1173   case INTEL_LEVEL_TYPE_SMT:
1174     return KMP_HW_THREAD;
1175   case INTEL_LEVEL_TYPE_CORE:
1176     return KMP_HW_CORE;
1177   // TODO: add support for the tile and module
1178   case INTEL_LEVEL_TYPE_TILE:
1179     return KMP_HW_UNKNOWN;
1180   case INTEL_LEVEL_TYPE_MODULE:
1181     return KMP_HW_UNKNOWN;
1182   case INTEL_LEVEL_TYPE_DIE:
1183     return KMP_HW_DIE;
1184   }
1185   return KMP_HW_UNKNOWN;
1186 }
1187 
1188 // This function takes the topology leaf, a levels array to store the levels
1189 // detected and a bitmap of the known levels.
1190 // Returns the number of levels in the topology
1191 static unsigned
1192 __kmp_x2apicid_get_levels(int leaf,
1193                           cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
1194                           kmp_uint64 known_levels) {
1195   unsigned level, levels_index;
1196   unsigned level_type, mask_width, nitems;
1197   kmp_cpuid buf;
1198 
1199   // The new algorithm has known topology layers act as highest unknown topology
1200   // layers when unknown topology layers exist.
1201   // e.g., Suppose layers were SMT CORE <Y> <Z> PACKAGE
1202   // Then CORE will take the characteristics (nitems and mask width) of <Z>.
1203   // In developing the id mask for each layer, this eliminates unknown portions
1204   // of the topology while still keeping the correct underlying structure.
1205   level = levels_index = 0;
1206   do {
1207     __kmp_x86_cpuid(leaf, level, &buf);
1208     level_type = __kmp_extract_bits<8, 15>(buf.ecx);
1209     mask_width = __kmp_extract_bits<0, 4>(buf.eax);
1210     nitems = __kmp_extract_bits<0, 15>(buf.ebx);
1211     if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
1212       return 0;
1213 
1214     if (known_levels & (1ull << level_type)) {
1215       // Add a new level to the topology
1216       KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
1217       levels[levels_index].level_type = level_type;
1218       levels[levels_index].mask_width = mask_width;
1219       levels[levels_index].nitems = nitems;
1220       levels_index++;
1221     } else {
1222       // If it is an unknown level, then logically move the previous layer up
1223       if (levels_index > 0) {
1224         levels[levels_index - 1].mask_width = mask_width;
1225         levels[levels_index - 1].nitems = nitems;
1226       }
1227     }
1228     level++;
1229   } while (level_type != INTEL_LEVEL_TYPE_INVALID);
1230 
1231   // Set the masks to & with apicid
1232   for (unsigned i = 0; i < levels_index; ++i) {
1233     if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
1234       levels[i].mask = ~((-1) << levels[i].mask_width);
1235       levels[i].cache_mask = (-1) << levels[i].mask_width;
1236       for (unsigned j = 0; j < i; ++j)
1237         levels[i].mask ^= levels[j].mask;
1238     } else {
1239       KMP_DEBUG_ASSERT(levels_index > 0);
1240       levels[i].mask = (-1) << levels[i - 1].mask_width;
1241       levels[i].cache_mask = 0;
1242     }
1243   }
1244   return levels_index;
1245 }
1246 
1247 static int __kmp_cpuid_mask_width(int count) {
1248   int r = 0;
1249 
1250   while ((1 << r) < count)
1251     ++r;
1252   return r;
1253 }
1254 
1255 class apicThreadInfo {
1256 public:
1257   unsigned osId; // param to __kmp_affinity_bind_thread
1258   unsigned apicId; // from cpuid after binding
1259   unsigned maxCoresPerPkg; //      ""
1260   unsigned maxThreadsPerPkg; //      ""
1261   unsigned pkgId; // inferred from above values
1262   unsigned coreId; //      ""
1263   unsigned threadId; //      ""
1264 };
1265 
1266 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1267                                                      const void *b) {
1268   const apicThreadInfo *aa = (const apicThreadInfo *)a;
1269   const apicThreadInfo *bb = (const apicThreadInfo *)b;
1270   if (aa->pkgId < bb->pkgId)
1271     return -1;
1272   if (aa->pkgId > bb->pkgId)
1273     return 1;
1274   if (aa->coreId < bb->coreId)
1275     return -1;
1276   if (aa->coreId > bb->coreId)
1277     return 1;
1278   if (aa->threadId < bb->threadId)
1279     return -1;
1280   if (aa->threadId > bb->threadId)
1281     return 1;
1282   return 0;
1283 }
1284 
1285 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
1286 // an algorithm which cycles through the available os threads, setting
1287 // the current thread's affinity mask to that thread, and then retrieves
1288 // the Apic Id for each thread context using the cpuid instruction.
1289 static int __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
1290                                             kmp_i18n_id_t *const msg_id) {
1291   kmp_cpuid buf;
1292   *address2os = NULL;
1293   *msg_id = kmp_i18n_null;
1294 
1295   // Check if cpuid leaf 4 is supported.
1296   __kmp_x86_cpuid(0, 0, &buf);
1297   if (buf.eax < 4) {
1298     *msg_id = kmp_i18n_str_NoLeaf4Support;
1299     return -1;
1300   }
1301 
1302   // The algorithm used starts by setting the affinity to each available thread
1303   // and retrieving info from the cpuid instruction, so if we are not capable of
1304   // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1305   // need to do something else - use the defaults that we calculated from
1306   // issuing cpuid without binding to each proc.
1307   if (!KMP_AFFINITY_CAPABLE()) {
1308     // Hack to try and infer the machine topology using only the data
1309     // available from cpuid on the current thread, and __kmp_xproc.
1310     KMP_ASSERT(__kmp_affinity_type == affinity_none);
1311 
1312     // Get an upper bound on the number of threads per package using cpuid(1).
1313     // On some OS/chps combinations where HT is supported by the chip but is
1314     // disabled, this value will be 2 on a single core chip. Usually, it will be
1315     // 2 if HT is enabled and 1 if HT is disabled.
1316     __kmp_x86_cpuid(1, 0, &buf);
1317     int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1318     if (maxThreadsPerPkg == 0) {
1319       maxThreadsPerPkg = 1;
1320     }
1321 
1322     // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
1323     // value.
1324     //
1325     // The author of cpu_count.cpp treated this only an upper bound on the
1326     // number of cores, but I haven't seen any cases where it was greater than
1327     // the actual number of cores, so we will treat it as exact in this block of
1328     // code.
1329     //
1330     // First, we need to check if cpuid(4) is supported on this chip. To see if
1331     // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
1332     // greater.
1333     __kmp_x86_cpuid(0, 0, &buf);
1334     if (buf.eax >= 4) {
1335       __kmp_x86_cpuid(4, 0, &buf);
1336       nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1337     } else {
1338       nCoresPerPkg = 1;
1339     }
1340 
1341     // There is no way to reliably tell if HT is enabled without issuing the
1342     // cpuid instruction from every thread, can correlating the cpuid info, so
1343     // if the machine is not affinity capable, we assume that HT is off. We have
1344     // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
1345     // does not support HT.
1346     //
1347     // - Older OSes are usually found on machines with older chips, which do not
1348     //   support HT.
1349     // - The performance penalty for mistakenly identifying a machine as HT when
1350     //   it isn't (which results in blocktime being incorrectly set to 0) is
1351     //   greater than the penalty when for mistakenly identifying a machine as
1352     //   being 1 thread/core when it is really HT enabled (which results in
1353     //   blocktime being incorrectly set to a positive value).
1354     __kmp_ncores = __kmp_xproc;
1355     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1356     __kmp_nThreadsPerCore = 1;
1357     if (__kmp_affinity_verbose) {
1358       KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
1359       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1360       if (__kmp_affinity_uniform_topology()) {
1361         KMP_INFORM(Uniform, "KMP_AFFINITY");
1362       } else {
1363         KMP_INFORM(NonUniform, "KMP_AFFINITY");
1364       }
1365       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1366                  __kmp_nThreadsPerCore, __kmp_ncores);
1367     }
1368     return 0;
1369   }
1370 
1371   // From here on, we can assume that it is safe to call
1372   // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1373   // __kmp_affinity_type = affinity_none.
1374 
1375   // Save the affinity mask for the current thread.
1376   kmp_affin_mask_t *oldMask;
1377   KMP_CPU_ALLOC(oldMask);
1378   KMP_ASSERT(oldMask != NULL);
1379   __kmp_get_system_affinity(oldMask, TRUE);
1380 
1381   // Run through each of the available contexts, binding the current thread
1382   // to it, and obtaining the pertinent information using the cpuid instr.
1383   //
1384   // The relevant information is:
1385   // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
1386   //     has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
1387   // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
1388   //     of this field determines the width of the core# + thread# fields in the
1389   //     Apic Id. It is also an upper bound on the number of threads per
1390   //     package, but it has been verified that situations happen were it is not
1391   //     exact. In particular, on certain OS/chip combinations where Intel(R)
1392   //     Hyper-Threading Technology is supported by the chip but has been
1393   //     disabled, the value of this field will be 2 (for a single core chip).
1394   //     On other OS/chip combinations supporting Intel(R) Hyper-Threading
1395   //     Technology, the value of this field will be 1 when Intel(R)
1396   //     Hyper-Threading Technology is disabled and 2 when it is enabled.
1397   // - Max Cores Per Pkg:  Bits 26:31 of eax after issuing cpuid(4). The value
1398   //     of this field (+1) determines the width of the core# field in the Apic
1399   //     Id. The comments in "cpucount.cpp" say that this value is an upper
1400   //     bound, but the IA-32 architecture manual says that it is exactly the
1401   //     number of cores per package, and I haven't seen any case where it
1402   //     wasn't.
1403   //
1404   // From this information, deduce the package Id, core Id, and thread Id,
1405   // and set the corresponding fields in the apicThreadInfo struct.
1406   unsigned i;
1407   apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1408       __kmp_avail_proc * sizeof(apicThreadInfo));
1409   unsigned nApics = 0;
1410   KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1411     // Skip this proc if it is not included in the machine model.
1412     if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1413       continue;
1414     }
1415     KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
1416 
1417     __kmp_affinity_dispatch->bind_thread(i);
1418     threadInfo[nApics].osId = i;
1419 
1420     // The apic id and max threads per pkg come from cpuid(1).
1421     __kmp_x86_cpuid(1, 0, &buf);
1422     if (((buf.edx >> 9) & 1) == 0) {
1423       __kmp_set_system_affinity(oldMask, TRUE);
1424       __kmp_free(threadInfo);
1425       KMP_CPU_FREE(oldMask);
1426       *msg_id = kmp_i18n_str_ApicNotPresent;
1427       return -1;
1428     }
1429     threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1430     threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1431     if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1432       threadInfo[nApics].maxThreadsPerPkg = 1;
1433     }
1434 
1435     // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
1436     // value.
1437     //
1438     // First, we need to check if cpuid(4) is supported on this chip. To see if
1439     // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
1440     // or greater.
1441     __kmp_x86_cpuid(0, 0, &buf);
1442     if (buf.eax >= 4) {
1443       __kmp_x86_cpuid(4, 0, &buf);
1444       threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1445     } else {
1446       threadInfo[nApics].maxCoresPerPkg = 1;
1447     }
1448 
1449     // Infer the pkgId / coreId / threadId using only the info obtained locally.
1450     int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
1451     threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1452 
1453     int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
1454     int widthT = widthCT - widthC;
1455     if (widthT < 0) {
1456       // I've never seen this one happen, but I suppose it could, if the cpuid
1457       // instruction on a chip was really screwed up. Make sure to restore the
1458       // affinity mask before the tail call.
1459       __kmp_set_system_affinity(oldMask, TRUE);
1460       __kmp_free(threadInfo);
1461       KMP_CPU_FREE(oldMask);
1462       *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1463       return -1;
1464     }
1465 
1466     int maskC = (1 << widthC) - 1;
1467     threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
1468 
1469     int maskT = (1 << widthT) - 1;
1470     threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
1471 
1472     nApics++;
1473   }
1474 
1475   // We've collected all the info we need.
1476   // Restore the old affinity mask for this thread.
1477   __kmp_set_system_affinity(oldMask, TRUE);
1478 
1479   // If there's only one thread context to bind to, form an Address object
1480   // with depth 1 and return immediately (or, if affinity is off, set
1481   // address2os to NULL and return).
1482   //
1483   // If it is configured to omit the package level when there is only a single
1484   // package, the logic at the end of this routine won't work if there is only
1485   // a single thread - it would try to form an Address object with depth 0.
1486   KMP_ASSERT(nApics > 0);
1487   if (nApics == 1) {
1488     __kmp_ncores = nPackages = 1;
1489     __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1490     if (__kmp_affinity_verbose) {
1491       KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1492       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1493       KMP_INFORM(Uniform, "KMP_AFFINITY");
1494       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1495                  __kmp_nThreadsPerCore, __kmp_ncores);
1496     }
1497 
1498     if (__kmp_affinity_type == affinity_none) {
1499       __kmp_free(threadInfo);
1500       KMP_CPU_FREE(oldMask);
1501       return 0;
1502     }
1503 
1504     *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
1505     Address addr(1);
1506     addr.labels[0] = threadInfo[0].pkgId;
1507     (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1508 
1509     if (__kmp_affinity_gran_levels < 0) {
1510       __kmp_affinity_gran_levels = 0;
1511     }
1512 
1513     if (__kmp_affinity_verbose) {
1514       __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1515     }
1516 
1517     __kmp_free(threadInfo);
1518     KMP_CPU_FREE(oldMask);
1519     return 1;
1520   }
1521 
1522   // Sort the threadInfo table by physical Id.
1523   qsort(threadInfo, nApics, sizeof(*threadInfo),
1524         __kmp_affinity_cmp_apicThreadInfo_phys_id);
1525 
1526   // The table is now sorted by pkgId / coreId / threadId, but we really don't
1527   // know the radix of any of the fields. pkgId's may be sparsely assigned among
1528   // the chips on a system. Although coreId's are usually assigned
1529   // [0 .. coresPerPkg-1] and threadId's are usually assigned
1530   // [0..threadsPerCore-1], we don't want to make any such assumptions.
1531   //
1532   // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
1533   // total # packages) are at this point - we want to determine that now. We
1534   // only have an upper bound on the first two figures.
1535   //
1536   // We also perform a consistency check at this point: the values returned by
1537   // the cpuid instruction for any thread bound to a given package had better
1538   // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
1539   nPackages = 1;
1540   nCoresPerPkg = 1;
1541   __kmp_nThreadsPerCore = 1;
1542   unsigned nCores = 1;
1543 
1544   unsigned pkgCt = 1; // to determine radii
1545   unsigned lastPkgId = threadInfo[0].pkgId;
1546   unsigned coreCt = 1;
1547   unsigned lastCoreId = threadInfo[0].coreId;
1548   unsigned threadCt = 1;
1549   unsigned lastThreadId = threadInfo[0].threadId;
1550 
1551   // intra-pkg consist checks
1552   unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1553   unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1554 
1555   for (i = 1; i < nApics; i++) {
1556     if (threadInfo[i].pkgId != lastPkgId) {
1557       nCores++;
1558       pkgCt++;
1559       lastPkgId = threadInfo[i].pkgId;
1560       if ((int)coreCt > nCoresPerPkg)
1561         nCoresPerPkg = coreCt;
1562       coreCt = 1;
1563       lastCoreId = threadInfo[i].coreId;
1564       if ((int)threadCt > __kmp_nThreadsPerCore)
1565         __kmp_nThreadsPerCore = threadCt;
1566       threadCt = 1;
1567       lastThreadId = threadInfo[i].threadId;
1568 
1569       // This is a different package, so go on to the next iteration without
1570       // doing any consistency checks. Reset the consistency check vars, though.
1571       prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1572       prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1573       continue;
1574     }
1575 
1576     if (threadInfo[i].coreId != lastCoreId) {
1577       nCores++;
1578       coreCt++;
1579       lastCoreId = threadInfo[i].coreId;
1580       if ((int)threadCt > __kmp_nThreadsPerCore)
1581         __kmp_nThreadsPerCore = threadCt;
1582       threadCt = 1;
1583       lastThreadId = threadInfo[i].threadId;
1584     } else if (threadInfo[i].threadId != lastThreadId) {
1585       threadCt++;
1586       lastThreadId = threadInfo[i].threadId;
1587     } else {
1588       __kmp_free(threadInfo);
1589       KMP_CPU_FREE(oldMask);
1590       *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1591       return -1;
1592     }
1593 
1594     // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
1595     // fields agree between all the threads bounds to a given package.
1596     if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
1597         (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1598       __kmp_free(threadInfo);
1599       KMP_CPU_FREE(oldMask);
1600       *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1601       return -1;
1602     }
1603   }
1604   nPackages = pkgCt;
1605   if ((int)coreCt > nCoresPerPkg)
1606     nCoresPerPkg = coreCt;
1607   if ((int)threadCt > __kmp_nThreadsPerCore)
1608     __kmp_nThreadsPerCore = threadCt;
1609 
1610   // When affinity is off, this routine will still be called to set
1611   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1612   // Make sure all these vars are set correctly, and return now if affinity is
1613   // not enabled.
1614   __kmp_ncores = nCores;
1615   if (__kmp_affinity_verbose) {
1616     KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1617     KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1618     if (__kmp_affinity_uniform_topology()) {
1619       KMP_INFORM(Uniform, "KMP_AFFINITY");
1620     } else {
1621       KMP_INFORM(NonUniform, "KMP_AFFINITY");
1622     }
1623     KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1624                __kmp_nThreadsPerCore, __kmp_ncores);
1625   }
1626   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1627   KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
1628   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1629   for (i = 0; i < nApics; ++i) {
1630     __kmp_pu_os_idx[i] = threadInfo[i].osId;
1631   }
1632   if (__kmp_affinity_type == affinity_none) {
1633     __kmp_free(threadInfo);
1634     KMP_CPU_FREE(oldMask);
1635     return 0;
1636   }
1637 
1638   // Now that we've determined the number of packages, the number of cores per
1639   // package, and the number of threads per core, we can construct the data
1640   // structure that is to be returned.
1641   int pkgLevel = 0;
1642   int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1643   int threadLevel =
1644       (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1645   unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1646 
1647   KMP_ASSERT(depth > 0);
1648   *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1649 
1650   for (i = 0; i < nApics; ++i) {
1651     Address addr(depth);
1652     unsigned os = threadInfo[i].osId;
1653     int d = 0;
1654 
1655     if (pkgLevel >= 0) {
1656       addr.labels[d++] = threadInfo[i].pkgId;
1657     }
1658     if (coreLevel >= 0) {
1659       addr.labels[d++] = threadInfo[i].coreId;
1660     }
1661     if (threadLevel >= 0) {
1662       addr.labels[d++] = threadInfo[i].threadId;
1663     }
1664     (*address2os)[i] = AddrUnsPair(addr, os);
1665   }
1666 
1667   if (__kmp_affinity_gran_levels < 0) {
1668     // Set the granularity level based on what levels are modeled in the machine
1669     // topology map.
1670     __kmp_affinity_gran_levels = 0;
1671     if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1672       __kmp_affinity_gran_levels++;
1673     }
1674     if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1675       __kmp_affinity_gran_levels++;
1676     }
1677     if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1678       __kmp_affinity_gran_levels++;
1679     }
1680   }
1681 
1682   if (__kmp_affinity_verbose) {
1683     __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1684                                   coreLevel, threadLevel);
1685   }
1686 
1687   __kmp_free(threadInfo);
1688   KMP_CPU_FREE(oldMask);
1689   return depth;
1690 }
1691 
1692 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
1693 // architectures support a newer interface for specifying the x2APIC Ids,
1694 // based on CPUID.B or CPUID.1F
1695 static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1696                                               kmp_i18n_id_t *const msg_id) {
1697 
1698   cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
1699   int ratio[KMP_HW_LAST];
1700   int count[KMP_HW_LAST];
1701   kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
1702   unsigned levels_index;
1703   kmp_cpuid buf;
1704   kmp_uint64 known_levels;
1705   int topology_leaf, highest_leaf, apic_id;
1706   int num_leaves;
1707   static int leaves[] = {0, 0};
1708 
1709   kmp_i18n_id_t leaf_message_id;
1710 
1711   KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
1712 
1713   *msg_id = kmp_i18n_null;
1714 
1715   // Figure out the known topology levels
1716   known_levels = 0ull;
1717   for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
1718     if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
1719       known_levels |= (1ull << i);
1720     }
1721   }
1722 
1723   // Get the highest cpuid leaf supported
1724   __kmp_x86_cpuid(0, 0, &buf);
1725   highest_leaf = buf.eax;
1726 
1727   // If a specific topology method was requested, only allow that specific leaf
1728   // otherwise, try both leaves 31 and 11 in that order
1729   num_leaves = 0;
1730   if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
1731     num_leaves = 1;
1732     leaves[0] = 11;
1733     leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1734   } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
1735     num_leaves = 1;
1736     leaves[0] = 31;
1737     leaf_message_id = kmp_i18n_str_NoLeaf31Support;
1738   } else {
1739     num_leaves = 2;
1740     leaves[0] = 31;
1741     leaves[1] = 11;
1742     leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1743   }
1744 
1745   // Check to see if cpuid leaf 31 or 11 is supported.
1746   __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1747   topology_leaf = -1;
1748   for (int i = 0; i < num_leaves; ++i) {
1749     int leaf = leaves[i];
1750     if (highest_leaf < leaf)
1751       continue;
1752     __kmp_x86_cpuid(leaf, 0, &buf);
1753     if (buf.ebx == 0)
1754       continue;
1755     topology_leaf = leaf;
1756     levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
1757     if (levels_index == 0)
1758       continue;
1759     break;
1760   }
1761   if (topology_leaf == -1 || levels_index == 0) {
1762     *msg_id = leaf_message_id;
1763     return -1;
1764   }
1765   KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
1766 
1767   // The algorithm used starts by setting the affinity to each available thread
1768   // and retrieving info from the cpuid instruction, so if we are not capable of
1769   // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
1770   // we need to do something else - use the defaults that we calculated from
1771   // issuing cpuid without binding to each proc.
1772   if (!KMP_AFFINITY_CAPABLE()) {
1773     // Hack to try and infer the machine topology using only the data
1774     // available from cpuid on the current thread, and __kmp_xproc.
1775     KMP_ASSERT(__kmp_affinity_type == affinity_none);
1776 
1777     for (unsigned i = 0; i < levels_index; ++i) {
1778       if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
1779         __kmp_nThreadsPerCore = levels[i].nitems;
1780       } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
1781         nCoresPerPkg = levels[i].nitems;
1782       } else if (levels[i].level_type == INTEL_LEVEL_TYPE_DIE) {
1783         nDiesPerPkg = levels[i].nitems;
1784       }
1785     }
1786     __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1787     nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1788     if (__kmp_affinity_verbose) {
1789       KMP_INFORM(AffNotCapableUseLocCpuidL, "KMP_AFFINITY", topology_leaf);
1790       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1791       if (__kmp_affinity_uniform_topology()) {
1792         KMP_INFORM(Uniform, "KMP_AFFINITY");
1793       } else {
1794         KMP_INFORM(NonUniform, "KMP_AFFINITY");
1795       }
1796       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1797                  __kmp_nThreadsPerCore, __kmp_ncores);
1798     }
1799     return 0;
1800   }
1801 
1802   // From here on, we can assume that it is safe to call
1803   // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1804   // __kmp_affinity_type = affinity_none.
1805 
1806   // Save the affinity mask for the current thread.
1807   kmp_affin_mask_t *oldMask;
1808   KMP_CPU_ALLOC(oldMask);
1809   __kmp_get_system_affinity(oldMask, TRUE);
1810 
1811   // Allocate the data structure to be returned.
1812   int depth = levels_index;
1813   for (int i = depth - 1, j = 0; i >= 0; --i, ++j)
1814     types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
1815   AddrUnsPair *retval =
1816       (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
1817 
1818   // Run through each of the available contexts, binding the current thread
1819   // to it, and obtaining the pertinent information using the cpuid instr.
1820   unsigned int proc;
1821   int nApics = 0;
1822   KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
1823     cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
1824     unsigned my_levels_index;
1825 
1826     // Skip this proc if it is not included in the machine model.
1827     if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
1828       continue;
1829     }
1830     KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1831 
1832     __kmp_affinity_dispatch->bind_thread(proc);
1833 
1834     // New algorithm
1835     __kmp_x86_cpuid(topology_leaf, 0, &buf);
1836     apic_id = buf.edx;
1837     Address addr(depth);
1838     my_levels_index =
1839         __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
1840     if (my_levels_index == 0 || my_levels_index != levels_index) {
1841       KMP_CPU_FREE(oldMask);
1842       *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1843       return -1;
1844     }
1845     // Put in topology information
1846     for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
1847       addr.labels[idx] = apic_id & my_levels[j].mask;
1848       if (j > 0)
1849         addr.labels[idx] >>= my_levels[j - 1].mask_width;
1850     }
1851     retval[nApics++] = AddrUnsPair(addr, proc);
1852   }
1853 
1854   // We've collected all the info we need.
1855   // Restore the old affinity mask for this thread.
1856   __kmp_set_system_affinity(oldMask, TRUE);
1857 
1858   // If there's only one thread context to bind to, return now.
1859   KMP_ASSERT(nApics > 0);
1860   if (nApics == 1) {
1861     int pkg_level;
1862     __kmp_ncores = nPackages = 1;
1863     __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1864     if (__kmp_affinity_verbose) {
1865       KMP_INFORM(AffUseGlobCpuidL, "KMP_AFFINITY", topology_leaf);
1866       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1867       KMP_INFORM(Uniform, "KMP_AFFINITY");
1868       KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1869                  __kmp_nThreadsPerCore, __kmp_ncores);
1870     }
1871 
1872     if (__kmp_affinity_type == affinity_none) {
1873       __kmp_free(retval);
1874       KMP_CPU_FREE(oldMask);
1875       return 0;
1876     }
1877 
1878     pkg_level = 0;
1879     for (int i = 0; i < depth; ++i)
1880       if (types[i] == KMP_HW_SOCKET) {
1881         pkg_level = i;
1882         break;
1883       }
1884     // Form an Address object which only includes the package level.
1885     Address addr(1);
1886     addr.labels[0] = retval[0].first.labels[pkg_level];
1887     retval[0].first = addr;
1888 
1889     if (__kmp_affinity_gran_levels < 0) {
1890       __kmp_affinity_gran_levels = 0;
1891     }
1892 
1893     if (__kmp_affinity_verbose) {
1894       __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1895     }
1896 
1897     *address2os = retval;
1898     KMP_CPU_FREE(oldMask);
1899     return 1;
1900   }
1901 
1902   // Sort the table by physical Id.
1903   qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1904 
1905   __kmp_affinity_gather_enumeration_information(retval, nApics, depth, types,
1906                                                 ratio, count);
1907 
1908   // When affinity is off, this routine will still be called to set
1909   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1910   // Make sure all these vars are set correctly, and return if affinity is not
1911   // enabled.
1912   int thread_level, core_level, socket_level, die_level;
1913   thread_level = core_level = die_level = socket_level = -1;
1914   for (int level = 0; level < depth; ++level) {
1915     if (types[level] == KMP_HW_THREAD)
1916       thread_level = level;
1917     else if (types[level] == KMP_HW_CORE)
1918       core_level = level;
1919     else if (types[level] == KMP_HW_DIE)
1920       die_level = level;
1921     else if (types[level] == KMP_HW_SOCKET)
1922       socket_level = level;
1923   }
1924   __kmp_nThreadsPerCore =
1925       __kmp_affinity_calculate_ratio(ratio, thread_level, core_level);
1926   if (die_level > 0) {
1927     nDiesPerPkg =
1928         __kmp_affinity_calculate_ratio(ratio, die_level, socket_level);
1929     nCoresPerPkg = __kmp_affinity_calculate_ratio(ratio, core_level, die_level);
1930   } else {
1931     nCoresPerPkg =
1932         __kmp_affinity_calculate_ratio(ratio, core_level, socket_level);
1933   }
1934   if (socket_level >= 0)
1935     nPackages = count[socket_level];
1936   else
1937     nPackages = 1;
1938   if (core_level >= 0)
1939     __kmp_ncores = count[core_level];
1940   else
1941     __kmp_ncores = 1;
1942 
1943   // Check to see if the machine topology is uniform
1944   unsigned uniform = __kmp_affinity_discover_uniformity(depth, ratio, count);
1945 
1946   // Print the machine topology summary.
1947   if (__kmp_affinity_verbose) {
1948     kmp_hw_t numerator_type, denominator_type;
1949     KMP_INFORM(AffUseGlobCpuidL, "KMP_AFFINITY", topology_leaf);
1950     KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1951     if (uniform) {
1952       KMP_INFORM(Uniform, "KMP_AFFINITY");
1953     } else {
1954       KMP_INFORM(NonUniform, "KMP_AFFINITY");
1955     }
1956 
1957     kmp_str_buf_t buf;
1958     __kmp_str_buf_init(&buf);
1959 
1960     if (core_level < 0)
1961       core_level = depth - 1;
1962     int ncores = count[core_level];
1963 
1964     denominator_type = KMP_HW_UNKNOWN;
1965     for (int level = 0; level < depth; ++level) {
1966       int c;
1967       bool plural;
1968       numerator_type = types[level];
1969       c = ratio[level];
1970       plural = (c > 1);
1971       if (level == 0) {
1972         __kmp_str_buf_print(
1973             &buf, "%d %s", c,
1974             __kmp_hw_get_catalog_string(numerator_type, plural));
1975       } else {
1976         __kmp_str_buf_print(&buf, " x %d %s/%s", c,
1977                             __kmp_hw_get_catalog_string(numerator_type, plural),
1978                             __kmp_hw_get_catalog_string(denominator_type));
1979       }
1980       denominator_type = numerator_type;
1981     }
1982     KMP_INFORM(TopologyGeneric, "KMP_AFFINITY", buf.str, ncores);
1983     __kmp_str_buf_free(&buf);
1984   }
1985 
1986   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1987   KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1988   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1989   for (proc = 0; (int)proc < nApics; ++proc) {
1990     __kmp_pu_os_idx[proc] = retval[proc].second;
1991   }
1992   if (__kmp_affinity_type == affinity_none) {
1993     __kmp_free(retval);
1994     KMP_CPU_FREE(oldMask);
1995     return 0;
1996   }
1997 
1998   // Find any levels with radix 1, and remove them from the map
1999   // (except for the package level).
2000   depth = __kmp_affinity_remove_radix_one_levels(retval, nApics, depth, types);
2001   thread_level = core_level = die_level = socket_level = -1;
2002   for (int level = 0; level < depth; ++level) {
2003     if (types[level] == KMP_HW_THREAD)
2004       thread_level = level;
2005     else if (types[level] == KMP_HW_CORE)
2006       core_level = level;
2007     else if (types[level] == KMP_HW_DIE)
2008       die_level = level;
2009     else if (types[level] == KMP_HW_SOCKET)
2010       socket_level = level;
2011   }
2012 
2013   if (__kmp_affinity_gran_levels < 0) {
2014     // Set the granularity level based on what levels are modeled
2015     // in the machine topology map.
2016     __kmp_affinity_gran_levels = 0;
2017     if ((thread_level >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
2018       __kmp_affinity_gran_levels++;
2019     }
2020     if ((core_level >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
2021       __kmp_affinity_gran_levels++;
2022     }
2023     if ((die_level >= 0) && (__kmp_affinity_gran > affinity_gran_die)) {
2024       __kmp_affinity_gran_levels++;
2025     }
2026     if (__kmp_affinity_gran > affinity_gran_package) {
2027       __kmp_affinity_gran_levels++;
2028     }
2029   }
2030 
2031   if (__kmp_affinity_verbose) {
2032     __kmp_affinity_print_topology(retval, nApics, depth, types);
2033   }
2034 
2035   KMP_CPU_FREE(oldMask);
2036   *address2os = retval;
2037   return depth;
2038 }
2039 
2040 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2041 
2042 #define osIdIndex 0
2043 #define threadIdIndex 1
2044 #define coreIdIndex 2
2045 #define pkgIdIndex 3
2046 #define nodeIdIndex 4
2047 
2048 typedef unsigned *ProcCpuInfo;
2049 static unsigned maxIndex = pkgIdIndex;
2050 
2051 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
2052                                                   const void *b) {
2053   unsigned i;
2054   const unsigned *aa = *(unsigned *const *)a;
2055   const unsigned *bb = *(unsigned *const *)b;
2056   for (i = maxIndex;; i--) {
2057     if (aa[i] < bb[i])
2058       return -1;
2059     if (aa[i] > bb[i])
2060       return 1;
2061     if (i == osIdIndex)
2062       break;
2063   }
2064   return 0;
2065 }
2066 
2067 #if KMP_USE_HIER_SCHED
2068 // Set the array sizes for the hierarchy layers
2069 static void __kmp_dispatch_set_hierarchy_values() {
2070   // Set the maximum number of L1's to number of cores
2071   // Set the maximum number of L2's to to either number of cores / 2 for
2072   // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2073   // Or the number of cores for Intel(R) Xeon(R) processors
2074   // Set the maximum number of NUMA nodes and L3's to number of packages
2075   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2076       nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2077   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2078 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) &&   \
2079     KMP_MIC_SUPPORTED
2080   if (__kmp_mic_type >= mic3)
2081     __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2082   else
2083 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2084     __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2085   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2086   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2087   __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2088   // Set the number of threads per unit
2089   // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2090   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2091   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2092       __kmp_nThreadsPerCore;
2093 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) &&   \
2094     KMP_MIC_SUPPORTED
2095   if (__kmp_mic_type >= mic3)
2096     __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2097         2 * __kmp_nThreadsPerCore;
2098   else
2099 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2100     __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2101         __kmp_nThreadsPerCore;
2102   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2103       nCoresPerPkg * __kmp_nThreadsPerCore;
2104   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2105       nCoresPerPkg * __kmp_nThreadsPerCore;
2106   __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2107       nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2108 }
2109 
2110 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2111 // i.e., this thread's L1 or this thread's L2, etc.
2112 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2113   int index = type + 1;
2114   int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2115   KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2116   if (type == kmp_hier_layer_e::LAYER_THREAD)
2117     return tid;
2118   else if (type == kmp_hier_layer_e::LAYER_LOOP)
2119     return 0;
2120   KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2121   if (tid >= num_hw_threads)
2122     tid = tid % num_hw_threads;
2123   return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2124 }
2125 
2126 // Return the number of t1's per t2
2127 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2128   int i1 = t1 + 1;
2129   int i2 = t2 + 1;
2130   KMP_DEBUG_ASSERT(i1 <= i2);
2131   KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2132   KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2133   KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2134   // (nthreads/t2) / (nthreads/t1) = t1 / t2
2135   return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2136 }
2137 #endif // KMP_USE_HIER_SCHED
2138 
2139 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2140 // affinity map.
2141 static int __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
2142                                              int *line,
2143                                              kmp_i18n_id_t *const msg_id,
2144                                              FILE *f) {
2145   *address2os = NULL;
2146   *msg_id = kmp_i18n_null;
2147 
2148   // Scan of the file, and count the number of "processor" (osId) fields,
2149   // and find the highest value of <n> for a node_<n> field.
2150   char buf[256];
2151   unsigned num_records = 0;
2152   while (!feof(f)) {
2153     buf[sizeof(buf) - 1] = 1;
2154     if (!fgets(buf, sizeof(buf), f)) {
2155       // Read errors presumably because of EOF
2156       break;
2157     }
2158 
2159     char s1[] = "processor";
2160     if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2161       num_records++;
2162       continue;
2163     }
2164 
2165     // FIXME - this will match "node_<n> <garbage>"
2166     unsigned level;
2167     if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2168       if (nodeIdIndex + level >= maxIndex) {
2169         maxIndex = nodeIdIndex + level;
2170       }
2171       continue;
2172     }
2173   }
2174 
2175   // Check for empty file / no valid processor records, or too many. The number
2176   // of records can't exceed the number of valid bits in the affinity mask.
2177   if (num_records == 0) {
2178     *line = 0;
2179     *msg_id = kmp_i18n_str_NoProcRecords;
2180     return -1;
2181   }
2182   if (num_records > (unsigned)__kmp_xproc) {
2183     *line = 0;
2184     *msg_id = kmp_i18n_str_TooManyProcRecords;
2185     return -1;
2186   }
2187 
2188   // Set the file pointer back to the beginning, so that we can scan the file
2189   // again, this time performing a full parse of the data. Allocate a vector of
2190   // ProcCpuInfo object, where we will place the data. Adding an extra element
2191   // at the end allows us to remove a lot of extra checks for termination
2192   // conditions.
2193   if (fseek(f, 0, SEEK_SET) != 0) {
2194     *line = 0;
2195     *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2196     return -1;
2197   }
2198 
2199   // Allocate the array of records to store the proc info in.  The dummy
2200   // element at the end makes the logic in filling them out easier to code.
2201   unsigned **threadInfo =
2202       (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2203   unsigned i;
2204   for (i = 0; i <= num_records; i++) {
2205     threadInfo[i] =
2206         (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2207   }
2208 
2209 #define CLEANUP_THREAD_INFO                                                    \
2210   for (i = 0; i <= num_records; i++) {                                         \
2211     __kmp_free(threadInfo[i]);                                                 \
2212   }                                                                            \
2213   __kmp_free(threadInfo);
2214 
2215   // A value of UINT_MAX means that we didn't find the field
2216   unsigned __index;
2217 
2218 #define INIT_PROC_INFO(p)                                                      \
2219   for (__index = 0; __index <= maxIndex; __index++) {                          \
2220     (p)[__index] = UINT_MAX;                                                   \
2221   }
2222 
2223   for (i = 0; i <= num_records; i++) {
2224     INIT_PROC_INFO(threadInfo[i]);
2225   }
2226 
2227   unsigned num_avail = 0;
2228   *line = 0;
2229   while (!feof(f)) {
2230     // Create an inner scoping level, so that all the goto targets at the end of
2231     // the loop appear in an outer scoping level. This avoids warnings about
2232     // jumping past an initialization to a target in the same block.
2233     {
2234       buf[sizeof(buf) - 1] = 1;
2235       bool long_line = false;
2236       if (!fgets(buf, sizeof(buf), f)) {
2237         // Read errors presumably because of EOF
2238         // If there is valid data in threadInfo[num_avail], then fake
2239         // a blank line in ensure that the last address gets parsed.
2240         bool valid = false;
2241         for (i = 0; i <= maxIndex; i++) {
2242           if (threadInfo[num_avail][i] != UINT_MAX) {
2243             valid = true;
2244           }
2245         }
2246         if (!valid) {
2247           break;
2248         }
2249         buf[0] = 0;
2250       } else if (!buf[sizeof(buf) - 1]) {
2251         // The line is longer than the buffer.  Set a flag and don't
2252         // emit an error if we were going to ignore the line, anyway.
2253         long_line = true;
2254 
2255 #define CHECK_LINE                                                             \
2256   if (long_line) {                                                             \
2257     CLEANUP_THREAD_INFO;                                                       \
2258     *msg_id = kmp_i18n_str_LongLineCpuinfo;                                    \
2259     return -1;                                                                 \
2260   }
2261       }
2262       (*line)++;
2263 
2264       char s1[] = "processor";
2265       if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2266         CHECK_LINE;
2267         char *p = strchr(buf + sizeof(s1) - 1, ':');
2268         unsigned val;
2269         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2270           goto no_val;
2271         if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2272 #if KMP_ARCH_AARCH64
2273           // Handle the old AArch64 /proc/cpuinfo layout differently,
2274           // it contains all of the 'processor' entries listed in a
2275           // single 'Processor' section, therefore the normal looking
2276           // for duplicates in that section will always fail.
2277           num_avail++;
2278 #else
2279           goto dup_field;
2280 #endif
2281         threadInfo[num_avail][osIdIndex] = val;
2282 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2283         char path[256];
2284         KMP_SNPRINTF(
2285             path, sizeof(path),
2286             "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2287             threadInfo[num_avail][osIdIndex]);
2288         __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2289 
2290         KMP_SNPRINTF(path, sizeof(path),
2291                      "/sys/devices/system/cpu/cpu%u/topology/core_id",
2292                      threadInfo[num_avail][osIdIndex]);
2293         __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2294         continue;
2295 #else
2296       }
2297       char s2[] = "physical id";
2298       if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2299         CHECK_LINE;
2300         char *p = strchr(buf + sizeof(s2) - 1, ':');
2301         unsigned val;
2302         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2303           goto no_val;
2304         if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2305           goto dup_field;
2306         threadInfo[num_avail][pkgIdIndex] = val;
2307         continue;
2308       }
2309       char s3[] = "core id";
2310       if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2311         CHECK_LINE;
2312         char *p = strchr(buf + sizeof(s3) - 1, ':');
2313         unsigned val;
2314         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2315           goto no_val;
2316         if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2317           goto dup_field;
2318         threadInfo[num_avail][coreIdIndex] = val;
2319         continue;
2320 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
2321       }
2322       char s4[] = "thread id";
2323       if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2324         CHECK_LINE;
2325         char *p = strchr(buf + sizeof(s4) - 1, ':');
2326         unsigned val;
2327         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2328           goto no_val;
2329         if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2330           goto dup_field;
2331         threadInfo[num_avail][threadIdIndex] = val;
2332         continue;
2333       }
2334       unsigned level;
2335       if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2336         CHECK_LINE;
2337         char *p = strchr(buf + sizeof(s4) - 1, ':');
2338         unsigned val;
2339         if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2340           goto no_val;
2341         KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2342         if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
2343           goto dup_field;
2344         threadInfo[num_avail][nodeIdIndex + level] = val;
2345         continue;
2346       }
2347 
2348       // We didn't recognize the leading token on the line. There are lots of
2349       // leading tokens that we don't recognize - if the line isn't empty, go on
2350       // to the next line.
2351       if ((*buf != 0) && (*buf != '\n')) {
2352         // If the line is longer than the buffer, read characters
2353         // until we find a newline.
2354         if (long_line) {
2355           int ch;
2356           while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
2357             ;
2358         }
2359         continue;
2360       }
2361 
2362       // A newline has signalled the end of the processor record.
2363       // Check that there aren't too many procs specified.
2364       if ((int)num_avail == __kmp_xproc) {
2365         CLEANUP_THREAD_INFO;
2366         *msg_id = kmp_i18n_str_TooManyEntries;
2367         return -1;
2368       }
2369 
2370       // Check for missing fields.  The osId field must be there, and we
2371       // currently require that the physical id field is specified, also.
2372       if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2373         CLEANUP_THREAD_INFO;
2374         *msg_id = kmp_i18n_str_MissingProcField;
2375         return -1;
2376       }
2377       if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2378         CLEANUP_THREAD_INFO;
2379         *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2380         return -1;
2381       }
2382 
2383       // Skip this proc if it is not included in the machine model.
2384       if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
2385                          __kmp_affin_fullMask)) {
2386         INIT_PROC_INFO(threadInfo[num_avail]);
2387         continue;
2388       }
2389 
2390       // We have a successful parse of this proc's info.
2391       // Increment the counter, and prepare for the next proc.
2392       num_avail++;
2393       KMP_ASSERT(num_avail <= num_records);
2394       INIT_PROC_INFO(threadInfo[num_avail]);
2395     }
2396     continue;
2397 
2398   no_val:
2399     CLEANUP_THREAD_INFO;
2400     *msg_id = kmp_i18n_str_MissingValCpuinfo;
2401     return -1;
2402 
2403   dup_field:
2404     CLEANUP_THREAD_INFO;
2405     *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2406     return -1;
2407   }
2408   *line = 0;
2409 
2410 #if KMP_MIC && REDUCE_TEAM_SIZE
2411   unsigned teamSize = 0;
2412 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2413 
2414   // check for num_records == __kmp_xproc ???
2415 
2416   // If there's only one thread context to bind to, form an Address object with
2417   // depth 1 and return immediately (or, if affinity is off, set address2os to
2418   // NULL and return).
2419   //
2420   // If it is configured to omit the package level when there is only a single
2421   // package, the logic at the end of this routine won't work if there is only a
2422   // single thread - it would try to form an Address object with depth 0.
2423   KMP_ASSERT(num_avail > 0);
2424   KMP_ASSERT(num_avail <= num_records);
2425   if (num_avail == 1) {
2426     __kmp_ncores = 1;
2427     __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2428     if (__kmp_affinity_verbose) {
2429       if (!KMP_AFFINITY_CAPABLE()) {
2430         KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2431         KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2432         KMP_INFORM(Uniform, "KMP_AFFINITY");
2433       } else {
2434         KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2435         KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2436         KMP_INFORM(Uniform, "KMP_AFFINITY");
2437       }
2438       int index;
2439       kmp_str_buf_t buf;
2440       __kmp_str_buf_init(&buf);
2441       __kmp_str_buf_print(&buf, "1");
2442       for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2443         __kmp_str_buf_print(&buf, " x 1");
2444       }
2445       KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
2446       __kmp_str_buf_free(&buf);
2447     }
2448 
2449     if (__kmp_affinity_type == affinity_none) {
2450       CLEANUP_THREAD_INFO;
2451       return 0;
2452     }
2453 
2454     *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
2455     Address addr(1);
2456     addr.labels[0] = threadInfo[0][pkgIdIndex];
2457     (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2458 
2459     if (__kmp_affinity_gran_levels < 0) {
2460       __kmp_affinity_gran_levels = 0;
2461     }
2462 
2463     if (__kmp_affinity_verbose) {
2464       __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2465     }
2466 
2467     CLEANUP_THREAD_INFO;
2468     return 1;
2469   }
2470 
2471   // Sort the threadInfo table by physical Id.
2472   qsort(threadInfo, num_avail, sizeof(*threadInfo),
2473         __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2474 
2475   // The table is now sorted by pkgId / coreId / threadId, but we really don't
2476   // know the radix of any of the fields. pkgId's may be sparsely assigned among
2477   // the chips on a system. Although coreId's are usually assigned
2478   // [0 .. coresPerPkg-1] and threadId's are usually assigned
2479   // [0..threadsPerCore-1], we don't want to make any such assumptions.
2480   //
2481   // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2482   // total # packages) are at this point - we want to determine that now. We
2483   // only have an upper bound on the first two figures.
2484   unsigned *counts =
2485       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2486   unsigned *maxCt =
2487       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2488   unsigned *totals =
2489       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2490   unsigned *lastId =
2491       (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2492 
2493   bool assign_thread_ids = false;
2494   unsigned threadIdCt;
2495   unsigned index;
2496 
2497 restart_radix_check:
2498   threadIdCt = 0;
2499 
2500   // Initialize the counter arrays with data from threadInfo[0].
2501   if (assign_thread_ids) {
2502     if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2503       threadInfo[0][threadIdIndex] = threadIdCt++;
2504     } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2505       threadIdCt = threadInfo[0][threadIdIndex] + 1;
2506     }
2507   }
2508   for (index = 0; index <= maxIndex; index++) {
2509     counts[index] = 1;
2510     maxCt[index] = 1;
2511     totals[index] = 1;
2512     lastId[index] = threadInfo[0][index];
2513     ;
2514   }
2515 
2516   // Run through the rest of the OS procs.
2517   for (i = 1; i < num_avail; i++) {
2518     // Find the most significant index whose id differs from the id for the
2519     // previous OS proc.
2520     for (index = maxIndex; index >= threadIdIndex; index--) {
2521       if (assign_thread_ids && (index == threadIdIndex)) {
2522         // Auto-assign the thread id field if it wasn't specified.
2523         if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2524           threadInfo[i][threadIdIndex] = threadIdCt++;
2525         }
2526         // Apparently the thread id field was specified for some entries and not
2527         // others. Start the thread id counter off at the next higher thread id.
2528         else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2529           threadIdCt = threadInfo[i][threadIdIndex] + 1;
2530         }
2531       }
2532       if (threadInfo[i][index] != lastId[index]) {
2533         // Run through all indices which are less significant, and reset the
2534         // counts to 1. At all levels up to and including index, we need to
2535         // increment the totals and record the last id.
2536         unsigned index2;
2537         for (index2 = threadIdIndex; index2 < index; index2++) {
2538           totals[index2]++;
2539           if (counts[index2] > maxCt[index2]) {
2540             maxCt[index2] = counts[index2];
2541           }
2542           counts[index2] = 1;
2543           lastId[index2] = threadInfo[i][index2];
2544         }
2545         counts[index]++;
2546         totals[index]++;
2547         lastId[index] = threadInfo[i][index];
2548 
2549         if (assign_thread_ids && (index > threadIdIndex)) {
2550 
2551 #if KMP_MIC && REDUCE_TEAM_SIZE
2552           // The default team size is the total #threads in the machine
2553           // minus 1 thread for every core that has 3 or more threads.
2554           teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2555 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2556 
2557           // Restart the thread counter, as we are on a new core.
2558           threadIdCt = 0;
2559 
2560           // Auto-assign the thread id field if it wasn't specified.
2561           if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2562             threadInfo[i][threadIdIndex] = threadIdCt++;
2563           }
2564 
2565           // Apparently the thread id field was specified for some entries and
2566           // not others. Start the thread id counter off at the next higher
2567           // thread id.
2568           else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2569             threadIdCt = threadInfo[i][threadIdIndex] + 1;
2570           }
2571         }
2572         break;
2573       }
2574     }
2575     if (index < threadIdIndex) {
2576       // If thread ids were specified, it is an error if they are not unique.
2577       // Also, check that we waven't already restarted the loop (to be safe -
2578       // shouldn't need to).
2579       if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
2580         __kmp_free(lastId);
2581         __kmp_free(totals);
2582         __kmp_free(maxCt);
2583         __kmp_free(counts);
2584         CLEANUP_THREAD_INFO;
2585         *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2586         return -1;
2587       }
2588 
2589       // If the thread ids were not specified and we see entries entries that
2590       // are duplicates, start the loop over and assign the thread ids manually.
2591       assign_thread_ids = true;
2592       goto restart_radix_check;
2593     }
2594   }
2595 
2596 #if KMP_MIC && REDUCE_TEAM_SIZE
2597   // The default team size is the total #threads in the machine
2598   // minus 1 thread for every core that has 3 or more threads.
2599   teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2600 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2601 
2602   for (index = threadIdIndex; index <= maxIndex; index++) {
2603     if (counts[index] > maxCt[index]) {
2604       maxCt[index] = counts[index];
2605     }
2606   }
2607 
2608   __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2609   nCoresPerPkg = maxCt[coreIdIndex];
2610   nPackages = totals[pkgIdIndex];
2611 
2612   // Check to see if the machine topology is uniform
2613   unsigned prod = totals[maxIndex];
2614   for (index = threadIdIndex; index < maxIndex; index++) {
2615     prod *= maxCt[index];
2616   }
2617   bool uniform = (prod == totals[threadIdIndex]);
2618 
2619   // When affinity is off, this routine will still be called to set
2620   // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2621   // Make sure all these vars are set correctly, and return now if affinity is
2622   // not enabled.
2623   __kmp_ncores = totals[coreIdIndex];
2624 
2625   if (__kmp_affinity_verbose) {
2626     if (!KMP_AFFINITY_CAPABLE()) {
2627       KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2628       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2629       if (uniform) {
2630         KMP_INFORM(Uniform, "KMP_AFFINITY");
2631       } else {
2632         KMP_INFORM(NonUniform, "KMP_AFFINITY");
2633       }
2634     } else {
2635       KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2636       KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2637       if (uniform) {
2638         KMP_INFORM(Uniform, "KMP_AFFINITY");
2639       } else {
2640         KMP_INFORM(NonUniform, "KMP_AFFINITY");
2641       }
2642     }
2643     kmp_str_buf_t buf;
2644     __kmp_str_buf_init(&buf);
2645 
2646     __kmp_str_buf_print(&buf, "%d", totals[maxIndex]);
2647     for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2648       __kmp_str_buf_print(&buf, " x %d", maxCt[index]);
2649     }
2650     KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2651                maxCt[threadIdIndex], __kmp_ncores);
2652 
2653     __kmp_str_buf_free(&buf);
2654   }
2655 
2656 #if KMP_MIC && REDUCE_TEAM_SIZE
2657   // Set the default team size.
2658   if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2659     __kmp_dflt_team_nth = teamSize;
2660     KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
2661                   "__kmp_dflt_team_nth = %d\n",
2662                   __kmp_dflt_team_nth));
2663   }
2664 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2665 
2666   KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
2667   KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
2668   __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
2669   for (i = 0; i < num_avail; ++i) { // fill the os indices
2670     __kmp_pu_os_idx[i] = threadInfo[i][osIdIndex];
2671   }
2672 
2673   if (__kmp_affinity_type == affinity_none) {
2674     __kmp_free(lastId);
2675     __kmp_free(totals);
2676     __kmp_free(maxCt);
2677     __kmp_free(counts);
2678     CLEANUP_THREAD_INFO;
2679     return 0;
2680   }
2681 
2682   // Count the number of levels which have more nodes at that level than at the
2683   // parent's level (with there being an implicit root node of the top level).
2684   // This is equivalent to saying that there is at least one node at this level
2685   // which has a sibling. These levels are in the map, and the package level is
2686   // always in the map.
2687   bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
2688   for (index = threadIdIndex; index < maxIndex; index++) {
2689     KMP_ASSERT(totals[index] >= totals[index + 1]);
2690     inMap[index] = (totals[index] > totals[index + 1]);
2691   }
2692   inMap[maxIndex] = (totals[maxIndex] > 1);
2693   inMap[pkgIdIndex] = true;
2694 
2695   int depth = 0;
2696   for (index = threadIdIndex; index <= maxIndex; index++) {
2697     if (inMap[index]) {
2698       depth++;
2699     }
2700   }
2701   KMP_ASSERT(depth > 0);
2702 
2703   // Construct the data structure that is to be returned.
2704   *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * num_avail);
2705   int pkgLevel = -1;
2706   int coreLevel = -1;
2707   int threadLevel = -1;
2708 
2709   for (i = 0; i < num_avail; ++i) {
2710     Address addr(depth);
2711     unsigned os = threadInfo[i][osIdIndex];
2712     int src_index;
2713     int dst_index = 0;
2714 
2715     for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2716       if (!inMap[src_index]) {
2717         continue;
2718       }
2719       addr.labels[dst_index] = threadInfo[i][src_index];
2720       if (src_index == pkgIdIndex) {
2721         pkgLevel = dst_index;
2722       } else if (src_index == coreIdIndex) {
2723         coreLevel = dst_index;
2724       } else if (src_index == threadIdIndex) {
2725         threadLevel = dst_index;
2726       }
2727       dst_index++;
2728     }
2729     (*address2os)[i] = AddrUnsPair(addr, os);
2730   }
2731 
2732   if (__kmp_affinity_gran_levels < 0) {
2733     // Set the granularity level based on what levels are modeled
2734     // in the machine topology map.
2735     unsigned src_index;
2736     __kmp_affinity_gran_levels = 0;
2737     for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2738       if (!inMap[src_index]) {
2739         continue;
2740       }
2741       switch (src_index) {
2742       case threadIdIndex:
2743         if (__kmp_affinity_gran > affinity_gran_thread) {
2744           __kmp_affinity_gran_levels++;
2745         }
2746 
2747         break;
2748       case coreIdIndex:
2749         if (__kmp_affinity_gran > affinity_gran_core) {
2750           __kmp_affinity_gran_levels++;
2751         }
2752         break;
2753 
2754       case pkgIdIndex:
2755         if (__kmp_affinity_gran > affinity_gran_package) {
2756           __kmp_affinity_gran_levels++;
2757         }
2758         break;
2759       }
2760     }
2761   }
2762 
2763   if (__kmp_affinity_verbose) {
2764     __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2765                                   coreLevel, threadLevel);
2766   }
2767 
2768   __kmp_free(inMap);
2769   __kmp_free(lastId);
2770   __kmp_free(totals);
2771   __kmp_free(maxCt);
2772   __kmp_free(counts);
2773   CLEANUP_THREAD_INFO;
2774   return depth;
2775 }
2776 
2777 // Create and return a table of affinity masks, indexed by OS thread ID.
2778 // This routine handles OR'ing together all the affinity masks of threads
2779 // that are sufficiently close, if granularity > fine.
2780 static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
2781                                             unsigned *numUnique,
2782                                             AddrUnsPair *address2os,
2783                                             unsigned numAddrs) {
2784   // First form a table of affinity masks in order of OS thread id.
2785   unsigned depth;
2786   unsigned maxOsId;
2787   unsigned i;
2788 
2789   KMP_ASSERT(numAddrs > 0);
2790   depth = address2os[0].first.depth;
2791 
2792   maxOsId = 0;
2793   for (i = numAddrs - 1;; --i) {
2794     unsigned osId = address2os[i].second;
2795     if (osId > maxOsId) {
2796       maxOsId = osId;
2797     }
2798     if (i == 0)
2799       break;
2800   }
2801   kmp_affin_mask_t *osId2Mask;
2802   KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
2803 
2804   // Sort the address2os table according to physical order. Doing so will put
2805   // all threads on the same core/package/node in consecutive locations.
2806   qsort(address2os, numAddrs, sizeof(*address2os),
2807         __kmp_affinity_cmp_Address_labels);
2808 
2809   KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2810   if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2811     KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
2812   }
2813   if (__kmp_affinity_gran_levels >= (int)depth) {
2814     if (__kmp_affinity_verbose ||
2815         (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
2816       KMP_WARNING(AffThreadsMayMigrate);
2817     }
2818   }
2819 
2820   // Run through the table, forming the masks for all threads on each core.
2821   // Threads on the same core will have identical "Address" objects, not
2822   // considering the last level, which must be the thread id. All threads on a
2823   // core will appear consecutively.
2824   unsigned unique = 0;
2825   unsigned j = 0; // index of 1st thread on core
2826   unsigned leader = 0;
2827   Address *leaderAddr = &(address2os[0].first);
2828   kmp_affin_mask_t *sum;
2829   KMP_CPU_ALLOC_ON_STACK(sum);
2830   KMP_CPU_ZERO(sum);
2831   KMP_CPU_SET(address2os[0].second, sum);
2832   for (i = 1; i < numAddrs; i++) {
2833     // If this thread is sufficiently close to the leader (within the
2834     // granularity setting), then set the bit for this os thread in the
2835     // affinity mask for this group, and go on to the next thread.
2836     if (leaderAddr->isClose(address2os[i].first, __kmp_affinity_gran_levels)) {
2837       KMP_CPU_SET(address2os[i].second, sum);
2838       continue;
2839     }
2840 
2841     // For every thread in this group, copy the mask to the thread's entry in
2842     // the osId2Mask table.  Mark the first address as a leader.
2843     for (; j < i; j++) {
2844       unsigned osId = address2os[j].second;
2845       KMP_DEBUG_ASSERT(osId <= maxOsId);
2846       kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2847       KMP_CPU_COPY(mask, sum);
2848       address2os[j].first.leader = (j == leader);
2849     }
2850     unique++;
2851 
2852     // Start a new mask.
2853     leader = i;
2854     leaderAddr = &(address2os[i].first);
2855     KMP_CPU_ZERO(sum);
2856     KMP_CPU_SET(address2os[i].second, sum);
2857   }
2858 
2859   // For every thread in last group, copy the mask to the thread's
2860   // entry in the osId2Mask table.
2861   for (; j < i; j++) {
2862     unsigned osId = address2os[j].second;
2863     KMP_DEBUG_ASSERT(osId <= maxOsId);
2864     kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2865     KMP_CPU_COPY(mask, sum);
2866     address2os[j].first.leader = (j == leader);
2867   }
2868   unique++;
2869   KMP_CPU_FREE_FROM_STACK(sum);
2870 
2871   *maxIndex = maxOsId;
2872   *numUnique = unique;
2873   return osId2Mask;
2874 }
2875 
2876 // Stuff for the affinity proclist parsers.  It's easier to declare these vars
2877 // as file-static than to try and pass them through the calling sequence of
2878 // the recursive-descent OMP_PLACES parser.
2879 static kmp_affin_mask_t *newMasks;
2880 static int numNewMasks;
2881 static int nextNewMask;
2882 
2883 #define ADD_MASK(_mask)                                                        \
2884   {                                                                            \
2885     if (nextNewMask >= numNewMasks) {                                          \
2886       int i;                                                                   \
2887       numNewMasks *= 2;                                                        \
2888       kmp_affin_mask_t *temp;                                                  \
2889       KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks);                         \
2890       for (i = 0; i < numNewMasks / 2; i++) {                                  \
2891         kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);                    \
2892         kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i);                       \
2893         KMP_CPU_COPY(dest, src);                                               \
2894       }                                                                        \
2895       KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2);                  \
2896       newMasks = temp;                                                         \
2897     }                                                                          \
2898     KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask));               \
2899     nextNewMask++;                                                             \
2900   }
2901 
2902 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId)                             \
2903   {                                                                            \
2904     if (((_osId) > _maxOsId) ||                                                \
2905         (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) {     \
2906       if (__kmp_affinity_verbose ||                                            \
2907           (__kmp_affinity_warnings &&                                          \
2908            (__kmp_affinity_type != affinity_none))) {                          \
2909         KMP_WARNING(AffIgnoreInvalidProcID, _osId);                            \
2910       }                                                                        \
2911     } else {                                                                   \
2912       ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId)));                            \
2913     }                                                                          \
2914   }
2915 
2916 // Re-parse the proclist (for the explicit affinity type), and form the list
2917 // of affinity newMasks indexed by gtid.
2918 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2919                                             unsigned int *out_numMasks,
2920                                             const char *proclist,
2921                                             kmp_affin_mask_t *osId2Mask,
2922                                             int maxOsId) {
2923   int i;
2924   const char *scan = proclist;
2925   const char *next = proclist;
2926 
2927   // We use malloc() for the temporary mask vector, so that we can use
2928   // realloc() to extend it.
2929   numNewMasks = 2;
2930   KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2931   nextNewMask = 0;
2932   kmp_affin_mask_t *sumMask;
2933   KMP_CPU_ALLOC(sumMask);
2934   int setSize = 0;
2935 
2936   for (;;) {
2937     int start, end, stride;
2938 
2939     SKIP_WS(scan);
2940     next = scan;
2941     if (*next == '\0') {
2942       break;
2943     }
2944 
2945     if (*next == '{') {
2946       int num;
2947       setSize = 0;
2948       next++; // skip '{'
2949       SKIP_WS(next);
2950       scan = next;
2951 
2952       // Read the first integer in the set.
2953       KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
2954       SKIP_DIGITS(next);
2955       num = __kmp_str_to_int(scan, *next);
2956       KMP_ASSERT2(num >= 0, "bad explicit proc list");
2957 
2958       // Copy the mask for that osId to the sum (union) mask.
2959       if ((num > maxOsId) ||
2960           (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2961         if (__kmp_affinity_verbose ||
2962             (__kmp_affinity_warnings &&
2963              (__kmp_affinity_type != affinity_none))) {
2964           KMP_WARNING(AffIgnoreInvalidProcID, num);
2965         }
2966         KMP_CPU_ZERO(sumMask);
2967       } else {
2968         KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2969         setSize = 1;
2970       }
2971 
2972       for (;;) {
2973         // Check for end of set.
2974         SKIP_WS(next);
2975         if (*next == '}') {
2976           next++; // skip '}'
2977           break;
2978         }
2979 
2980         // Skip optional comma.
2981         if (*next == ',') {
2982           next++;
2983         }
2984         SKIP_WS(next);
2985 
2986         // Read the next integer in the set.
2987         scan = next;
2988         KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2989 
2990         SKIP_DIGITS(next);
2991         num = __kmp_str_to_int(scan, *next);
2992         KMP_ASSERT2(num >= 0, "bad explicit proc list");
2993 
2994         // Add the mask for that osId to the sum mask.
2995         if ((num > maxOsId) ||
2996             (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2997           if (__kmp_affinity_verbose ||
2998               (__kmp_affinity_warnings &&
2999                (__kmp_affinity_type != affinity_none))) {
3000             KMP_WARNING(AffIgnoreInvalidProcID, num);
3001           }
3002         } else {
3003           KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3004           setSize++;
3005         }
3006       }
3007       if (setSize > 0) {
3008         ADD_MASK(sumMask);
3009       }
3010 
3011       SKIP_WS(next);
3012       if (*next == ',') {
3013         next++;
3014       }
3015       scan = next;
3016       continue;
3017     }
3018 
3019     // Read the first integer.
3020     KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3021     SKIP_DIGITS(next);
3022     start = __kmp_str_to_int(scan, *next);
3023     KMP_ASSERT2(start >= 0, "bad explicit proc list");
3024     SKIP_WS(next);
3025 
3026     // If this isn't a range, then add a mask to the list and go on.
3027     if (*next != '-') {
3028       ADD_MASK_OSID(start, osId2Mask, maxOsId);
3029 
3030       // Skip optional comma.
3031       if (*next == ',') {
3032         next++;
3033       }
3034       scan = next;
3035       continue;
3036     }
3037 
3038     // This is a range.  Skip over the '-' and read in the 2nd int.
3039     next++; // skip '-'
3040     SKIP_WS(next);
3041     scan = next;
3042     KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3043     SKIP_DIGITS(next);
3044     end = __kmp_str_to_int(scan, *next);
3045     KMP_ASSERT2(end >= 0, "bad explicit proc list");
3046 
3047     // Check for a stride parameter
3048     stride = 1;
3049     SKIP_WS(next);
3050     if (*next == ':') {
3051       // A stride is specified.  Skip over the ':" and read the 3rd int.
3052       int sign = +1;
3053       next++; // skip ':'
3054       SKIP_WS(next);
3055       scan = next;
3056       if (*next == '-') {
3057         sign = -1;
3058         next++;
3059         SKIP_WS(next);
3060         scan = next;
3061       }
3062       KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3063       SKIP_DIGITS(next);
3064       stride = __kmp_str_to_int(scan, *next);
3065       KMP_ASSERT2(stride >= 0, "bad explicit proc list");
3066       stride *= sign;
3067     }
3068 
3069     // Do some range checks.
3070     KMP_ASSERT2(stride != 0, "bad explicit proc list");
3071     if (stride > 0) {
3072       KMP_ASSERT2(start <= end, "bad explicit proc list");
3073     } else {
3074       KMP_ASSERT2(start >= end, "bad explicit proc list");
3075     }
3076     KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
3077 
3078     // Add the mask for each OS proc # to the list.
3079     if (stride > 0) {
3080       do {
3081         ADD_MASK_OSID(start, osId2Mask, maxOsId);
3082         start += stride;
3083       } while (start <= end);
3084     } else {
3085       do {
3086         ADD_MASK_OSID(start, osId2Mask, maxOsId);
3087         start += stride;
3088       } while (start >= end);
3089     }
3090 
3091     // Skip optional comma.
3092     SKIP_WS(next);
3093     if (*next == ',') {
3094       next++;
3095     }
3096     scan = next;
3097   }
3098 
3099   *out_numMasks = nextNewMask;
3100   if (nextNewMask == 0) {
3101     *out_masks = NULL;
3102     KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3103     return;
3104   }
3105   KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3106   for (i = 0; i < nextNewMask; i++) {
3107     kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3108     kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3109     KMP_CPU_COPY(dest, src);
3110   }
3111   KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3112   KMP_CPU_FREE(sumMask);
3113 }
3114 
3115 /*-----------------------------------------------------------------------------
3116 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3117 places.  Again, Here is the grammar:
3118 
3119 place_list := place
3120 place_list := place , place_list
3121 place := num
3122 place := place : num
3123 place := place : num : signed
3124 place := { subplacelist }
3125 place := ! place                  // (lowest priority)
3126 subplace_list := subplace
3127 subplace_list := subplace , subplace_list
3128 subplace := num
3129 subplace := num : num
3130 subplace := num : num : signed
3131 signed := num
3132 signed := + signed
3133 signed := - signed
3134 -----------------------------------------------------------------------------*/
3135 static void __kmp_process_subplace_list(const char **scan,
3136                                         kmp_affin_mask_t *osId2Mask,
3137                                         int maxOsId, kmp_affin_mask_t *tempMask,
3138                                         int *setSize) {
3139   const char *next;
3140 
3141   for (;;) {
3142     int start, count, stride, i;
3143 
3144     // Read in the starting proc id
3145     SKIP_WS(*scan);
3146     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3147     next = *scan;
3148     SKIP_DIGITS(next);
3149     start = __kmp_str_to_int(*scan, *next);
3150     KMP_ASSERT(start >= 0);
3151     *scan = next;
3152 
3153     // valid follow sets are ',' ':' and '}'
3154     SKIP_WS(*scan);
3155     if (**scan == '}' || **scan == ',') {
3156       if ((start > maxOsId) ||
3157           (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3158         if (__kmp_affinity_verbose ||
3159             (__kmp_affinity_warnings &&
3160              (__kmp_affinity_type != affinity_none))) {
3161           KMP_WARNING(AffIgnoreInvalidProcID, start);
3162         }
3163       } else {
3164         KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3165         (*setSize)++;
3166       }
3167       if (**scan == '}') {
3168         break;
3169       }
3170       (*scan)++; // skip ','
3171       continue;
3172     }
3173     KMP_ASSERT2(**scan == ':', "bad explicit places list");
3174     (*scan)++; // skip ':'
3175 
3176     // Read count parameter
3177     SKIP_WS(*scan);
3178     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3179     next = *scan;
3180     SKIP_DIGITS(next);
3181     count = __kmp_str_to_int(*scan, *next);
3182     KMP_ASSERT(count >= 0);
3183     *scan = next;
3184 
3185     // valid follow sets are ',' ':' and '}'
3186     SKIP_WS(*scan);
3187     if (**scan == '}' || **scan == ',') {
3188       for (i = 0; i < count; i++) {
3189         if ((start > maxOsId) ||
3190             (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3191           if (__kmp_affinity_verbose ||
3192               (__kmp_affinity_warnings &&
3193                (__kmp_affinity_type != affinity_none))) {
3194             KMP_WARNING(AffIgnoreInvalidProcID, start);
3195           }
3196           break; // don't proliferate warnings for large count
3197         } else {
3198           KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3199           start++;
3200           (*setSize)++;
3201         }
3202       }
3203       if (**scan == '}') {
3204         break;
3205       }
3206       (*scan)++; // skip ','
3207       continue;
3208     }
3209     KMP_ASSERT2(**scan == ':', "bad explicit places list");
3210     (*scan)++; // skip ':'
3211 
3212     // Read stride parameter
3213     int sign = +1;
3214     for (;;) {
3215       SKIP_WS(*scan);
3216       if (**scan == '+') {
3217         (*scan)++; // skip '+'
3218         continue;
3219       }
3220       if (**scan == '-') {
3221         sign *= -1;
3222         (*scan)++; // skip '-'
3223         continue;
3224       }
3225       break;
3226     }
3227     SKIP_WS(*scan);
3228     KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3229     next = *scan;
3230     SKIP_DIGITS(next);
3231     stride = __kmp_str_to_int(*scan, *next);
3232     KMP_ASSERT(stride >= 0);
3233     *scan = next;
3234     stride *= sign;
3235 
3236     // valid follow sets are ',' and '}'
3237     SKIP_WS(*scan);
3238     if (**scan == '}' || **scan == ',') {
3239       for (i = 0; i < count; i++) {
3240         if ((start > maxOsId) ||
3241             (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3242           if (__kmp_affinity_verbose ||
3243               (__kmp_affinity_warnings &&
3244                (__kmp_affinity_type != affinity_none))) {
3245             KMP_WARNING(AffIgnoreInvalidProcID, start);
3246           }
3247           break; // don't proliferate warnings for large count
3248         } else {
3249           KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3250           start += stride;
3251           (*setSize)++;
3252         }
3253       }
3254       if (**scan == '}') {
3255         break;
3256       }
3257       (*scan)++; // skip ','
3258       continue;
3259     }
3260 
3261     KMP_ASSERT2(0, "bad explicit places list");
3262   }
3263 }
3264 
3265 static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3266                                 int maxOsId, kmp_affin_mask_t *tempMask,
3267                                 int *setSize) {
3268   const char *next;
3269 
3270   // valid follow sets are '{' '!' and num
3271   SKIP_WS(*scan);
3272   if (**scan == '{') {
3273     (*scan)++; // skip '{'
3274     __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3275     KMP_ASSERT2(**scan == '}', "bad explicit places list");
3276     (*scan)++; // skip '}'
3277   } else if (**scan == '!') {
3278     (*scan)++; // skip '!'
3279     __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3280     KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3281   } else if ((**scan >= '0') && (**scan <= '9')) {
3282     next = *scan;
3283     SKIP_DIGITS(next);
3284     int num = __kmp_str_to_int(*scan, *next);
3285     KMP_ASSERT(num >= 0);
3286     if ((num > maxOsId) ||
3287         (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3288       if (__kmp_affinity_verbose ||
3289           (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3290         KMP_WARNING(AffIgnoreInvalidProcID, num);
3291       }
3292     } else {
3293       KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3294       (*setSize)++;
3295     }
3296     *scan = next; // skip num
3297   } else {
3298     KMP_ASSERT2(0, "bad explicit places list");
3299   }
3300 }
3301 
3302 // static void
3303 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3304                                       unsigned int *out_numMasks,
3305                                       const char *placelist,
3306                                       kmp_affin_mask_t *osId2Mask,
3307                                       int maxOsId) {
3308   int i, j, count, stride, sign;
3309   const char *scan = placelist;
3310   const char *next = placelist;
3311 
3312   numNewMasks = 2;
3313   KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3314   nextNewMask = 0;
3315 
3316   // tempMask is modified based on the previous or initial
3317   //   place to form the current place
3318   // previousMask contains the previous place
3319   kmp_affin_mask_t *tempMask;
3320   kmp_affin_mask_t *previousMask;
3321   KMP_CPU_ALLOC(tempMask);
3322   KMP_CPU_ZERO(tempMask);
3323   KMP_CPU_ALLOC(previousMask);
3324   KMP_CPU_ZERO(previousMask);
3325   int setSize = 0;
3326 
3327   for (;;) {
3328     __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3329 
3330     // valid follow sets are ',' ':' and EOL
3331     SKIP_WS(scan);
3332     if (*scan == '\0' || *scan == ',') {
3333       if (setSize > 0) {
3334         ADD_MASK(tempMask);
3335       }
3336       KMP_CPU_ZERO(tempMask);
3337       setSize = 0;
3338       if (*scan == '\0') {
3339         break;
3340       }
3341       scan++; // skip ','
3342       continue;
3343     }
3344 
3345     KMP_ASSERT2(*scan == ':', "bad explicit places list");
3346     scan++; // skip ':'
3347 
3348     // Read count parameter
3349     SKIP_WS(scan);
3350     KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3351     next = scan;
3352     SKIP_DIGITS(next);
3353     count = __kmp_str_to_int(scan, *next);
3354     KMP_ASSERT(count >= 0);
3355     scan = next;
3356 
3357     // valid follow sets are ',' ':' and EOL
3358     SKIP_WS(scan);
3359     if (*scan == '\0' || *scan == ',') {
3360       stride = +1;
3361     } else {
3362       KMP_ASSERT2(*scan == ':', "bad explicit places list");
3363       scan++; // skip ':'
3364 
3365       // Read stride parameter
3366       sign = +1;
3367       for (;;) {
3368         SKIP_WS(scan);
3369         if (*scan == '+') {
3370           scan++; // skip '+'
3371           continue;
3372         }
3373         if (*scan == '-') {
3374           sign *= -1;
3375           scan++; // skip '-'
3376           continue;
3377         }
3378         break;
3379       }
3380       SKIP_WS(scan);
3381       KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3382       next = scan;
3383       SKIP_DIGITS(next);
3384       stride = __kmp_str_to_int(scan, *next);
3385       KMP_DEBUG_ASSERT(stride >= 0);
3386       scan = next;
3387       stride *= sign;
3388     }
3389 
3390     // Add places determined by initial_place : count : stride
3391     for (i = 0; i < count; i++) {
3392       if (setSize == 0) {
3393         break;
3394       }
3395       // Add the current place, then build the next place (tempMask) from that
3396       KMP_CPU_COPY(previousMask, tempMask);
3397       ADD_MASK(previousMask);
3398       KMP_CPU_ZERO(tempMask);
3399       setSize = 0;
3400       KMP_CPU_SET_ITERATE(j, previousMask) {
3401         if (!KMP_CPU_ISSET(j, previousMask)) {
3402           continue;
3403         }
3404         if ((j + stride > maxOsId) || (j + stride < 0) ||
3405             (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3406             (!KMP_CPU_ISSET(j + stride,
3407                             KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3408           if ((__kmp_affinity_verbose ||
3409                (__kmp_affinity_warnings &&
3410                 (__kmp_affinity_type != affinity_none))) &&
3411               i < count - 1) {
3412             KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3413           }
3414           continue;
3415         }
3416         KMP_CPU_SET(j + stride, tempMask);
3417         setSize++;
3418       }
3419     }
3420     KMP_CPU_ZERO(tempMask);
3421     setSize = 0;
3422 
3423     // valid follow sets are ',' and EOL
3424     SKIP_WS(scan);
3425     if (*scan == '\0') {
3426       break;
3427     }
3428     if (*scan == ',') {
3429       scan++; // skip ','
3430       continue;
3431     }
3432 
3433     KMP_ASSERT2(0, "bad explicit places list");
3434   }
3435 
3436   *out_numMasks = nextNewMask;
3437   if (nextNewMask == 0) {
3438     *out_masks = NULL;
3439     KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3440     return;
3441   }
3442   KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3443   KMP_CPU_FREE(tempMask);
3444   KMP_CPU_FREE(previousMask);
3445   for (i = 0; i < nextNewMask; i++) {
3446     kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3447     kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3448     KMP_CPU_COPY(dest, src);
3449   }
3450   KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3451 }
3452 
3453 #undef ADD_MASK
3454 #undef ADD_MASK_OSID
3455 
3456 #if KMP_USE_HWLOC
3457 static int __kmp_hwloc_skip_PUs_obj(hwloc_topology_t t, hwloc_obj_t o) {
3458   // skip PUs descendants of the object o
3459   int skipped = 0;
3460   hwloc_obj_t hT = NULL;
3461   int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3462   for (int i = 0; i < N; ++i) {
3463     KMP_DEBUG_ASSERT(hT);
3464     unsigned idx = hT->os_index;
3465     if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3466       KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3467       KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3468       ++skipped;
3469     }
3470     hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3471   }
3472   return skipped; // count number of skipped units
3473 }
3474 
3475 static int __kmp_hwloc_obj_has_PUs(hwloc_topology_t t, hwloc_obj_t o) {
3476   // check if obj has PUs present in fullMask
3477   hwloc_obj_t hT = NULL;
3478   int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3479   for (int i = 0; i < N; ++i) {
3480     KMP_DEBUG_ASSERT(hT);
3481     unsigned idx = hT->os_index;
3482     if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask))
3483       return 1; // found PU
3484     hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3485   }
3486   return 0; // no PUs found
3487 }
3488 #endif // KMP_USE_HWLOC
3489 
3490 static void __kmp_apply_thread_places(AddrUnsPair **pAddr, int depth) {
3491   AddrUnsPair *newAddr;
3492   if (__kmp_hws_requested == 0)
3493     goto _exit; // no topology limiting actions requested, exit
3494 #if KMP_USE_HWLOC
3495   if (__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
3496     // Number of subobjects calculated dynamically, this works fine for
3497     // any non-uniform topology.
3498     // L2 cache objects are determined by depth, other objects - by type.
3499     hwloc_topology_t tp = __kmp_hwloc_topology;
3500     int nS = 0, nN = 0, nL = 0, nC = 0,
3501         nT = 0; // logical index including skipped
3502     int nCr = 0, nTr = 0; // number of requested units
3503     int nPkg = 0, nCo = 0, n_new = 0, n_old = 0, nCpP = 0, nTpC = 0; // counters
3504     hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
3505     int L2depth, idx;
3506 
3507     // check support of extensions ----------------------------------
3508     int numa_support = 0, tile_support = 0;
3509     if (__kmp_pu_os_idx)
3510       hT = hwloc_get_pu_obj_by_os_index(tp,
3511                                         __kmp_pu_os_idx[__kmp_avail_proc - 1]);
3512     else
3513       hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, __kmp_avail_proc - 1);
3514     if (hT == NULL) { // something's gone wrong
3515       KMP_WARNING(AffHWSubsetUnsupported);
3516       goto _exit;
3517     }
3518     // check NUMA node
3519     hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
3520     hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
3521     if (hN != NULL && hN->depth > hS->depth) {
3522       numa_support = 1; // 1 in case socket includes node(s)
3523     } else if (__kmp_hws_node.num > 0) {
3524       // don't support sockets inside NUMA node (no such HW found for testing)
3525       KMP_WARNING(AffHWSubsetUnsupported);
3526       goto _exit;
3527     }
3528     // check L2 cahce, get object by depth because of multiple caches
3529     L2depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
3530     hL = hwloc_get_ancestor_obj_by_depth(tp, L2depth, hT);
3531     if (hL != NULL &&
3532         __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1) {
3533       tile_support = 1; // no sense to count L2 if it includes single core
3534     } else if (__kmp_hws_tile.num > 0) {
3535       if (__kmp_hws_core.num == 0) {
3536         __kmp_hws_core = __kmp_hws_tile; // replace L2 with core
3537         __kmp_hws_tile.num = 0;
3538       } else {
3539         // L2 and core are both requested, but represent same object
3540         KMP_WARNING(AffHWSubsetInvalid);
3541         goto _exit;
3542       }
3543     }
3544     // end of check of extensions -----------------------------------
3545 
3546     // fill in unset items, validate settings -----------------------
3547     if (__kmp_hws_socket.num == 0)
3548       __kmp_hws_socket.num = nPackages; // use all available sockets
3549     if (__kmp_hws_socket.offset >= nPackages) {
3550       KMP_WARNING(AffHWSubsetManySockets);
3551       goto _exit;
3552     }
3553     if (numa_support) {
3554       hN = NULL;
3555       int NN = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE,
3556                                                   &hN); // num nodes in socket
3557       if (__kmp_hws_node.num == 0)
3558         __kmp_hws_node.num = NN; // use all available nodes
3559       if (__kmp_hws_node.offset >= NN) {
3560         KMP_WARNING(AffHWSubsetManyNodes);
3561         goto _exit;
3562       }
3563       if (tile_support) {
3564         // get num tiles in node
3565         int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3566         if (__kmp_hws_tile.num == 0) {
3567           __kmp_hws_tile.num = NL + 1;
3568         } // use all available tiles, some node may have more tiles, thus +1
3569         if (__kmp_hws_tile.offset >= NL) {
3570           KMP_WARNING(AffHWSubsetManyTiles);
3571           goto _exit;
3572         }
3573         int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3574                                                     &hC); // num cores in tile
3575         if (__kmp_hws_core.num == 0)
3576           __kmp_hws_core.num = NC; // use all available cores
3577         if (__kmp_hws_core.offset >= NC) {
3578           KMP_WARNING(AffHWSubsetManyCores);
3579           goto _exit;
3580         }
3581       } else { // tile_support
3582         int NC = __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE,
3583                                                     &hC); // num cores in node
3584         if (__kmp_hws_core.num == 0)
3585           __kmp_hws_core.num = NC; // use all available cores
3586         if (__kmp_hws_core.offset >= NC) {
3587           KMP_WARNING(AffHWSubsetManyCores);
3588           goto _exit;
3589         }
3590       } // tile_support
3591     } else { // numa_support
3592       if (tile_support) {
3593         // get num tiles in socket
3594         int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3595         if (__kmp_hws_tile.num == 0)
3596           __kmp_hws_tile.num = NL; // use all available tiles
3597         if (__kmp_hws_tile.offset >= NL) {
3598           KMP_WARNING(AffHWSubsetManyTiles);
3599           goto _exit;
3600         }
3601         int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3602                                                     &hC); // num cores in tile
3603         if (__kmp_hws_core.num == 0)
3604           __kmp_hws_core.num = NC; // use all available cores
3605         if (__kmp_hws_core.offset >= NC) {
3606           KMP_WARNING(AffHWSubsetManyCores);
3607           goto _exit;
3608         }
3609       } else { // tile_support
3610         int NC = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE,
3611                                                     &hC); // num cores in socket
3612         if (__kmp_hws_core.num == 0)
3613           __kmp_hws_core.num = NC; // use all available cores
3614         if (__kmp_hws_core.offset >= NC) {
3615           KMP_WARNING(AffHWSubsetManyCores);
3616           goto _exit;
3617         }
3618       } // tile_support
3619     }
3620     if (__kmp_hws_proc.num == 0)
3621       __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all available procs
3622     if (__kmp_hws_proc.offset >= __kmp_nThreadsPerCore) {
3623       KMP_WARNING(AffHWSubsetManyProcs);
3624       goto _exit;
3625     }
3626     // end of validation --------------------------------------------
3627 
3628     if (pAddr) // pAddr is NULL in case of affinity_none
3629       newAddr = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) *
3630                                               __kmp_avail_proc); // max size
3631     // main loop to form HW subset ----------------------------------
3632     hS = NULL;
3633     int NP = hwloc_get_nbobjs_by_type(tp, HWLOC_OBJ_PACKAGE);
3634     for (int s = 0; s < NP; ++s) {
3635       // Check Socket -----------------------------------------------
3636       hS = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hS);
3637       if (!__kmp_hwloc_obj_has_PUs(tp, hS))
3638         continue; // skip socket if all PUs are out of fullMask
3639       ++nS; // only count objects those have PUs in affinity mask
3640       if (nS <= __kmp_hws_socket.offset ||
3641           nS > __kmp_hws_socket.num + __kmp_hws_socket.offset) {
3642         n_old += __kmp_hwloc_skip_PUs_obj(tp, hS); // skip socket
3643         continue; // move to next socket
3644       }
3645       nCr = 0; // count number of cores per socket
3646       // socket requested, go down the topology tree
3647       // check 4 cases: (+NUMA+Tile), (+NUMA-Tile), (-NUMA+Tile), (-NUMA-Tile)
3648       if (numa_support) {
3649         nN = 0;
3650         hN = NULL;
3651         // num nodes in current socket
3652         int NN =
3653             __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE, &hN);
3654         for (int n = 0; n < NN; ++n) {
3655           // Check NUMA Node ----------------------------------------
3656           if (!__kmp_hwloc_obj_has_PUs(tp, hN)) {
3657             hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3658             continue; // skip node if all PUs are out of fullMask
3659           }
3660           ++nN;
3661           if (nN <= __kmp_hws_node.offset ||
3662               nN > __kmp_hws_node.num + __kmp_hws_node.offset) {
3663             // skip node as not requested
3664             n_old += __kmp_hwloc_skip_PUs_obj(tp, hN); // skip node
3665             hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3666             continue; // move to next node
3667           }
3668           // node requested, go down the topology tree
3669           if (tile_support) {
3670             nL = 0;
3671             hL = NULL;
3672             int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3673             for (int l = 0; l < NL; ++l) {
3674               // Check L2 (tile) ------------------------------------
3675               if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3676                 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3677                 continue; // skip tile if all PUs are out of fullMask
3678               }
3679               ++nL;
3680               if (nL <= __kmp_hws_tile.offset ||
3681                   nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3682                 // skip tile as not requested
3683                 n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3684                 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3685                 continue; // move to next tile
3686               }
3687               // tile requested, go down the topology tree
3688               nC = 0;
3689               hC = NULL;
3690               // num cores in current tile
3691               int NC = __kmp_hwloc_count_children_by_type(tp, hL,
3692                                                           HWLOC_OBJ_CORE, &hC);
3693               for (int c = 0; c < NC; ++c) {
3694                 // Check Core ---------------------------------------
3695                 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3696                   hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3697                   continue; // skip core if all PUs are out of fullMask
3698                 }
3699                 ++nC;
3700                 if (nC <= __kmp_hws_core.offset ||
3701                     nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3702                   // skip node as not requested
3703                   n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3704                   hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3705                   continue; // move to next node
3706                 }
3707                 // core requested, go down to PUs
3708                 nT = 0;
3709                 nTr = 0;
3710                 hT = NULL;
3711                 // num procs in current core
3712                 int NT = __kmp_hwloc_count_children_by_type(tp, hC,
3713                                                             HWLOC_OBJ_PU, &hT);
3714                 for (int t = 0; t < NT; ++t) {
3715                   // Check PU ---------------------------------------
3716                   idx = hT->os_index;
3717                   if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3718                     hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3719                     continue; // skip PU if not in fullMask
3720                   }
3721                   ++nT;
3722                   if (nT <= __kmp_hws_proc.offset ||
3723                       nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3724                     // skip PU
3725                     KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3726                     ++n_old;
3727                     KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3728                     hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3729                     continue; // move to next node
3730                   }
3731                   ++nTr;
3732                   if (pAddr) // collect requested thread's data
3733                     newAddr[n_new] = (*pAddr)[n_old];
3734                   ++n_new;
3735                   ++n_old;
3736                   hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3737                 } // threads loop
3738                 if (nTr > 0) {
3739                   ++nCr; // num cores per socket
3740                   ++nCo; // total num cores
3741                   if (nTr > nTpC)
3742                     nTpC = nTr; // calc max threads per core
3743                 }
3744                 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3745               } // cores loop
3746               hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3747             } // tiles loop
3748           } else { // tile_support
3749             // no tiles, check cores
3750             nC = 0;
3751             hC = NULL;
3752             // num cores in current node
3753             int NC =
3754                 __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE, &hC);
3755             for (int c = 0; c < NC; ++c) {
3756               // Check Core ---------------------------------------
3757               if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3758                 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3759                 continue; // skip core if all PUs are out of fullMask
3760               }
3761               ++nC;
3762               if (nC <= __kmp_hws_core.offset ||
3763                   nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3764                 // skip node as not requested
3765                 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3766                 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3767                 continue; // move to next node
3768               }
3769               // core requested, go down to PUs
3770               nT = 0;
3771               nTr = 0;
3772               hT = NULL;
3773               int NT =
3774                   __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3775               for (int t = 0; t < NT; ++t) {
3776                 // Check PU ---------------------------------------
3777                 idx = hT->os_index;
3778                 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3779                   hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3780                   continue; // skip PU if not in fullMask
3781                 }
3782                 ++nT;
3783                 if (nT <= __kmp_hws_proc.offset ||
3784                     nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3785                   // skip PU
3786                   KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3787                   ++n_old;
3788                   KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3789                   hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3790                   continue; // move to next node
3791                 }
3792                 ++nTr;
3793                 if (pAddr) // collect requested thread's data
3794                   newAddr[n_new] = (*pAddr)[n_old];
3795                 ++n_new;
3796                 ++n_old;
3797                 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3798               } // threads loop
3799               if (nTr > 0) {
3800                 ++nCr; // num cores per socket
3801                 ++nCo; // total num cores
3802                 if (nTr > nTpC)
3803                   nTpC = nTr; // calc max threads per core
3804               }
3805               hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3806             } // cores loop
3807           } // tiles support
3808           hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3809         } // nodes loop
3810       } else { // numa_support
3811         // no NUMA support
3812         if (tile_support) {
3813           nL = 0;
3814           hL = NULL;
3815           // num tiles in current socket
3816           int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3817           for (int l = 0; l < NL; ++l) {
3818             // Check L2 (tile) ------------------------------------
3819             if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3820               hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3821               continue; // skip tile if all PUs are out of fullMask
3822             }
3823             ++nL;
3824             if (nL <= __kmp_hws_tile.offset ||
3825                 nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3826               // skip tile as not requested
3827               n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3828               hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3829               continue; // move to next tile
3830             }
3831             // tile requested, go down the topology tree
3832             nC = 0;
3833             hC = NULL;
3834             // num cores per tile
3835             int NC =
3836                 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC);
3837             for (int c = 0; c < NC; ++c) {
3838               // Check Core ---------------------------------------
3839               if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3840                 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3841                 continue; // skip core if all PUs are out of fullMask
3842               }
3843               ++nC;
3844               if (nC <= __kmp_hws_core.offset ||
3845                   nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3846                 // skip node as not requested
3847                 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3848                 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3849                 continue; // move to next node
3850               }
3851               // core requested, go down to PUs
3852               nT = 0;
3853               nTr = 0;
3854               hT = NULL;
3855               // num procs per core
3856               int NT =
3857                   __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3858               for (int t = 0; t < NT; ++t) {
3859                 // Check PU ---------------------------------------
3860                 idx = hT->os_index;
3861                 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3862                   hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3863                   continue; // skip PU if not in fullMask
3864                 }
3865                 ++nT;
3866                 if (nT <= __kmp_hws_proc.offset ||
3867                     nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3868                   // skip PU
3869                   KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3870                   ++n_old;
3871                   KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3872                   hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3873                   continue; // move to next node
3874                 }
3875                 ++nTr;
3876                 if (pAddr) // collect requested thread's data
3877                   newAddr[n_new] = (*pAddr)[n_old];
3878                 ++n_new;
3879                 ++n_old;
3880                 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3881               } // threads loop
3882               if (nTr > 0) {
3883                 ++nCr; // num cores per socket
3884                 ++nCo; // total num cores
3885                 if (nTr > nTpC)
3886                   nTpC = nTr; // calc max threads per core
3887               }
3888               hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3889             } // cores loop
3890             hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3891           } // tiles loop
3892         } else { // tile_support
3893           // no tiles, check cores
3894           nC = 0;
3895           hC = NULL;
3896           // num cores in socket
3897           int NC =
3898               __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE, &hC);
3899           for (int c = 0; c < NC; ++c) {
3900             // Check Core -------------------------------------------
3901             if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3902               hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3903               continue; // skip core if all PUs are out of fullMask
3904             }
3905             ++nC;
3906             if (nC <= __kmp_hws_core.offset ||
3907                 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3908               // skip node as not requested
3909               n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3910               hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3911               continue; // move to next node
3912             }
3913             // core requested, go down to PUs
3914             nT = 0;
3915             nTr = 0;
3916             hT = NULL;
3917             // num procs per core
3918             int NT =
3919                 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3920             for (int t = 0; t < NT; ++t) {
3921               // Check PU ---------------------------------------
3922               idx = hT->os_index;
3923               if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3924                 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3925                 continue; // skip PU if not in fullMask
3926               }
3927               ++nT;
3928               if (nT <= __kmp_hws_proc.offset ||
3929                   nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3930                 // skip PU
3931                 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3932                 ++n_old;
3933                 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3934                 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3935                 continue; // move to next node
3936               }
3937               ++nTr;
3938               if (pAddr) // collect requested thread's data
3939                 newAddr[n_new] = (*pAddr)[n_old];
3940               ++n_new;
3941               ++n_old;
3942               hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3943             } // threads loop
3944             if (nTr > 0) {
3945               ++nCr; // num cores per socket
3946               ++nCo; // total num cores
3947               if (nTr > nTpC)
3948                 nTpC = nTr; // calc max threads per core
3949             }
3950             hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3951           } // cores loop
3952         } // tiles support
3953       } // numa_support
3954       if (nCr > 0) { // found cores?
3955         ++nPkg; // num sockets
3956         if (nCr > nCpP)
3957           nCpP = nCr; // calc max cores per socket
3958       }
3959     } // sockets loop
3960 
3961     // check the subset is valid
3962     KMP_DEBUG_ASSERT(n_old == __kmp_avail_proc);
3963     KMP_DEBUG_ASSERT(nPkg > 0);
3964     KMP_DEBUG_ASSERT(nCpP > 0);
3965     KMP_DEBUG_ASSERT(nTpC > 0);
3966     KMP_DEBUG_ASSERT(nCo > 0);
3967     KMP_DEBUG_ASSERT(nPkg <= nPackages);
3968     KMP_DEBUG_ASSERT(nCpP <= nCoresPerPkg);
3969     KMP_DEBUG_ASSERT(nTpC <= __kmp_nThreadsPerCore);
3970     KMP_DEBUG_ASSERT(nCo <= __kmp_ncores);
3971 
3972     nPackages = nPkg; // correct num sockets
3973     nCoresPerPkg = nCpP; // correct num cores per socket
3974     __kmp_nThreadsPerCore = nTpC; // correct num threads per core
3975     __kmp_avail_proc = n_new; // correct num procs
3976     __kmp_ncores = nCo; // correct num cores
3977     // hwloc topology method end
3978   } else
3979 #endif // KMP_USE_HWLOC
3980   {
3981     int n_old = 0, n_new = 0, proc_num = 0;
3982     if (__kmp_hws_node.num > 0 || __kmp_hws_tile.num > 0) {
3983       KMP_WARNING(AffHWSubsetNoHWLOC);
3984       goto _exit;
3985     }
3986     if (__kmp_hws_socket.num == 0)
3987       __kmp_hws_socket.num = nPackages; // use all available sockets
3988     if (__kmp_hws_die.num == 0)
3989       __kmp_hws_die.num = nDiesPerPkg; // use all available dies
3990     if (__kmp_hws_core.num == 0)
3991       __kmp_hws_core.num = nCoresPerPkg; // use all available cores
3992     if (__kmp_hws_proc.num == 0 || __kmp_hws_proc.num > __kmp_nThreadsPerCore)
3993       __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all HW contexts
3994     if (!__kmp_affinity_uniform_topology()) {
3995       KMP_WARNING(AffHWSubsetNonUniform);
3996       goto _exit; // don't support non-uniform topology
3997     }
3998     if (depth > 4) {
3999       KMP_WARNING(AffHWSubsetNonThreeLevel);
4000       goto _exit; // don't support not-3-level topology
4001     }
4002     if (__kmp_hws_socket.offset + __kmp_hws_socket.num > nPackages) {
4003       KMP_WARNING(AffHWSubsetManySockets);
4004       goto _exit;
4005     }
4006     if (depth == 4 && __kmp_hws_die.offset + __kmp_hws_die.num > nDiesPerPkg) {
4007       KMP_WARNING(AffHWSubsetManyDies);
4008       goto _exit;
4009     }
4010     if (__kmp_hws_core.offset + __kmp_hws_core.num > nCoresPerPkg) {
4011       KMP_WARNING(AffHWSubsetManyCores);
4012       goto _exit;
4013     }
4014     // Form the requested subset
4015     if (pAddr) // pAddr is NULL in case of affinity_none
4016       newAddr = (AddrUnsPair *)__kmp_allocate(
4017           sizeof(AddrUnsPair) * __kmp_hws_socket.num * __kmp_hws_die.num *
4018           __kmp_hws_core.num * __kmp_hws_proc.num);
4019     for (int i = 0; i < nPackages; ++i) {
4020       if (i < __kmp_hws_socket.offset ||
4021           i >= __kmp_hws_socket.offset + __kmp_hws_socket.num) {
4022         // skip not-requested socket
4023         n_old += nDiesPerPkg * nCoresPerPkg * __kmp_nThreadsPerCore;
4024         if (__kmp_pu_os_idx != NULL) {
4025           // walk through skipped socket
4026           for (int l = 0; l < nDiesPerPkg; ++l) {
4027             for (int j = 0; j < nCoresPerPkg; ++j) {
4028               for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4029                 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
4030                 ++proc_num;
4031               }
4032             }
4033           }
4034         }
4035       } else {
4036         // walk through requested socket
4037         for (int l = 0; l < nDiesPerPkg; ++l) {
4038           // skip unwanted die
4039           if (l < __kmp_hws_die.offset ||
4040               l >= __kmp_hws_die.offset + __kmp_hws_die.num) {
4041             n_old += nCoresPerPkg;
4042             if (__kmp_pu_os_idx != NULL) {
4043               for (int k = 0; k < nCoresPerPkg; ++k) {
4044                 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
4045                 ++proc_num;
4046               }
4047             }
4048           } else {
4049             for (int j = 0; j < nCoresPerPkg; ++j) {
4050               if (j < __kmp_hws_core.offset ||
4051                   j >= __kmp_hws_core.offset +
4052                            __kmp_hws_core.num) { // skip not-requested core
4053                 n_old += __kmp_nThreadsPerCore;
4054                 if (__kmp_pu_os_idx != NULL) {
4055                   for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4056                     KMP_CPU_CLR(__kmp_pu_os_idx[proc_num],
4057                                 __kmp_affin_fullMask);
4058                     ++proc_num;
4059                   }
4060                 }
4061               } else {
4062                 // walk through requested core
4063                 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4064                   if (k < __kmp_hws_proc.num) {
4065                     if (pAddr) // collect requested thread's data
4066                       newAddr[n_new] = (*pAddr)[n_old];
4067                     n_new++;
4068                   } else {
4069                     if (__kmp_pu_os_idx != NULL)
4070                       KMP_CPU_CLR(__kmp_pu_os_idx[proc_num],
4071                                   __kmp_affin_fullMask);
4072                   }
4073                   n_old++;
4074                   ++proc_num;
4075                 }
4076               }
4077             }
4078           }
4079         }
4080       }
4081     }
4082     KMP_DEBUG_ASSERT(n_old == nPackages * nDiesPerPkg * nCoresPerPkg *
4083                                   __kmp_nThreadsPerCore);
4084     KMP_DEBUG_ASSERT(n_new == __kmp_hws_socket.num * __kmp_hws_die.num *
4085                                   __kmp_hws_core.num * __kmp_hws_proc.num);
4086     nPackages = __kmp_hws_socket.num; // correct nPackages
4087     nCoresPerPkg = __kmp_hws_core.num; // correct nCoresPerPkg
4088     nDiesPerPkg = __kmp_hws_die.num; // correct nDiesPerPkg
4089     __kmp_nThreadsPerCore = __kmp_hws_proc.num; // correct __kmp_nThreadsPerCore
4090     __kmp_avail_proc = n_new; // correct avail_proc
4091     __kmp_ncores =
4092         nPackages * nDiesPerPkg * __kmp_hws_core.num; // correct ncores
4093   } // non-hwloc topology method
4094   if (pAddr) {
4095     __kmp_free(*pAddr);
4096     *pAddr = newAddr; // replace old topology with new one
4097   }
4098   if (__kmp_affinity_verbose) {
4099     KMP_INFORM(AvailableOSProc, "KMP_HW_SUBSET", __kmp_avail_proc);
4100     kmp_str_buf_t buf;
4101     __kmp_str_buf_init(&buf);
4102     __kmp_str_buf_print(&buf, "%d", nPackages);
4103     KMP_INFORM(TopologyExtra, "KMP_HW_SUBSET", buf.str, nCoresPerPkg,
4104                __kmp_nThreadsPerCore, __kmp_ncores);
4105     __kmp_str_buf_free(&buf);
4106   }
4107 _exit:
4108   if (__kmp_pu_os_idx != NULL) {
4109     __kmp_free(__kmp_pu_os_idx);
4110     __kmp_pu_os_idx = NULL;
4111   }
4112 }
4113 
4114 // This function figures out the deepest level at which there is at least one
4115 // cluster/core with more than one processing unit bound to it.
4116 static int __kmp_affinity_find_core_level(const AddrUnsPair *address2os,
4117                                           int nprocs, int bottom_level) {
4118   int core_level = 0;
4119 
4120   for (int i = 0; i < nprocs; i++) {
4121     for (int j = bottom_level; j > 0; j--) {
4122       if (address2os[i].first.labels[j] > 0) {
4123         if (core_level < (j - 1)) {
4124           core_level = j - 1;
4125         }
4126       }
4127     }
4128   }
4129   return core_level;
4130 }
4131 
4132 // This function counts number of clusters/cores at given level.
4133 static int __kmp_affinity_compute_ncores(const AddrUnsPair *address2os,
4134                                          int nprocs, int bottom_level,
4135                                          int core_level) {
4136   int ncores = 0;
4137   int i, j;
4138 
4139   j = bottom_level;
4140   for (i = 0; i < nprocs; i++) {
4141     for (j = bottom_level; j > core_level; j--) {
4142       if ((i + 1) < nprocs) {
4143         if (address2os[i + 1].first.labels[j] > 0) {
4144           break;
4145         }
4146       }
4147     }
4148     if (j == core_level) {
4149       ncores++;
4150     }
4151   }
4152   if (j > core_level) {
4153     // In case of ( nprocs < __kmp_avail_proc ) we may end too deep and miss one
4154     // core. May occur when called from __kmp_affinity_find_core().
4155     ncores++;
4156   }
4157   return ncores;
4158 }
4159 
4160 // This function finds to which cluster/core given processing unit is bound.
4161 static int __kmp_affinity_find_core(const AddrUnsPair *address2os, int proc,
4162                                     int bottom_level, int core_level) {
4163   return __kmp_affinity_compute_ncores(address2os, proc + 1, bottom_level,
4164                                        core_level) -
4165          1;
4166 }
4167 
4168 // This function finds maximal number of processing units bound to a
4169 // cluster/core at given level.
4170 static int __kmp_affinity_max_proc_per_core(const AddrUnsPair *address2os,
4171                                             int nprocs, int bottom_level,
4172                                             int core_level) {
4173   int maxprocpercore = 0;
4174 
4175   if (core_level < bottom_level) {
4176     for (int i = 0; i < nprocs; i++) {
4177       int percore = address2os[i].first.labels[core_level + 1] + 1;
4178 
4179       if (percore > maxprocpercore) {
4180         maxprocpercore = percore;
4181       }
4182     }
4183   } else {
4184     maxprocpercore = 1;
4185   }
4186   return maxprocpercore;
4187 }
4188 
4189 static AddrUnsPair *address2os = NULL;
4190 static int *procarr = NULL;
4191 static int __kmp_aff_depth = 0;
4192 
4193 #if KMP_USE_HIER_SCHED
4194 #define KMP_EXIT_AFF_NONE                                                      \
4195   KMP_ASSERT(__kmp_affinity_type == affinity_none);                            \
4196   KMP_ASSERT(address2os == NULL);                                              \
4197   __kmp_apply_thread_places(NULL, 0);                                          \
4198   __kmp_create_affinity_none_places();                                         \
4199   __kmp_dispatch_set_hierarchy_values();                                       \
4200   return;
4201 #else
4202 #define KMP_EXIT_AFF_NONE                                                      \
4203   KMP_ASSERT(__kmp_affinity_type == affinity_none);                            \
4204   KMP_ASSERT(address2os == NULL);                                              \
4205   __kmp_apply_thread_places(NULL, 0);                                          \
4206   __kmp_create_affinity_none_places();                                         \
4207   return;
4208 #endif
4209 
4210 // Create a one element mask array (set of places) which only contains the
4211 // initial process's affinity mask
4212 static void __kmp_create_affinity_none_places() {
4213   KMP_ASSERT(__kmp_affin_fullMask != NULL);
4214   KMP_ASSERT(__kmp_affinity_type == affinity_none);
4215   __kmp_affinity_num_masks = 1;
4216   KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4217   kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4218   KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4219 }
4220 
4221 static int __kmp_affinity_cmp_Address_child_num(const void *a, const void *b) {
4222   const Address *aa = &(((const AddrUnsPair *)a)->first);
4223   const Address *bb = &(((const AddrUnsPair *)b)->first);
4224   unsigned depth = aa->depth;
4225   unsigned i;
4226   KMP_DEBUG_ASSERT(depth == bb->depth);
4227   KMP_DEBUG_ASSERT((unsigned)__kmp_affinity_compact <= depth);
4228   KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
4229   for (i = 0; i < (unsigned)__kmp_affinity_compact; i++) {
4230     int j = depth - i - 1;
4231     if (aa->childNums[j] < bb->childNums[j])
4232       return -1;
4233     if (aa->childNums[j] > bb->childNums[j])
4234       return 1;
4235   }
4236   for (; i < depth; i++) {
4237     int j = i - __kmp_affinity_compact;
4238     if (aa->childNums[j] < bb->childNums[j])
4239       return -1;
4240     if (aa->childNums[j] > bb->childNums[j])
4241       return 1;
4242   }
4243   return 0;
4244 }
4245 
4246 static void __kmp_aux_affinity_initialize(void) {
4247   if (__kmp_affinity_masks != NULL) {
4248     KMP_ASSERT(__kmp_affin_fullMask != NULL);
4249     return;
4250   }
4251 
4252   // Create the "full" mask - this defines all of the processors that we
4253   // consider to be in the machine model. If respect is set, then it is the
4254   // initialization thread's affinity mask. Otherwise, it is all processors that
4255   // we know about on the machine.
4256   if (__kmp_affin_fullMask == NULL) {
4257     KMP_CPU_ALLOC(__kmp_affin_fullMask);
4258   }
4259   if (KMP_AFFINITY_CAPABLE()) {
4260     __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4261     if (__kmp_affinity_respect_mask) {
4262       // Count the number of available processors.
4263       unsigned i;
4264       __kmp_avail_proc = 0;
4265       KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4266         if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4267           continue;
4268         }
4269         __kmp_avail_proc++;
4270       }
4271       if (__kmp_avail_proc > __kmp_xproc) {
4272         if (__kmp_affinity_verbose ||
4273             (__kmp_affinity_warnings &&
4274              (__kmp_affinity_type != affinity_none))) {
4275           KMP_WARNING(ErrorInitializeAffinity);
4276         }
4277         __kmp_affinity_type = affinity_none;
4278         KMP_AFFINITY_DISABLE();
4279         return;
4280       }
4281 
4282       if (__kmp_affinity_verbose) {
4283         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4284         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4285                                   __kmp_affin_fullMask);
4286         KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
4287       }
4288     } else {
4289       if (__kmp_affinity_verbose) {
4290         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4291         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4292                                   __kmp_affin_fullMask);
4293         KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
4294       }
4295       __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4296       __kmp_avail_proc = __kmp_xproc;
4297 #if KMP_OS_WINDOWS
4298       // Set the process affinity mask since threads' affinity
4299       // masks must be subset of process mask in Windows* OS
4300       __kmp_affin_fullMask->set_process_affinity(true);
4301 #endif
4302     }
4303   }
4304 
4305   if (__kmp_affinity_gran == affinity_gran_tile &&
4306       // check if user's request is valid
4307       __kmp_affinity_dispatch->get_api_type() == KMPAffinity::NATIVE_OS) {
4308     KMP_WARNING(AffTilesNoHWLOC, "KMP_AFFINITY");
4309     __kmp_affinity_gran = affinity_gran_package;
4310   }
4311 
4312   int depth = -1;
4313   kmp_i18n_id_t msg_id = kmp_i18n_null;
4314 
4315   // For backward compatibility, setting KMP_CPUINFO_FILE =>
4316   // KMP_TOPOLOGY_METHOD=cpuinfo
4317   if ((__kmp_cpuinfo_file != NULL) &&
4318       (__kmp_affinity_top_method == affinity_top_method_all)) {
4319     __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4320   }
4321 
4322   if (__kmp_affinity_top_method == affinity_top_method_all) {
4323     // In the default code path, errors are not fatal - we just try using
4324     // another method. We only emit a warning message if affinity is on, or the
4325     // verbose flag is set, and the nowarnings flag was not set.
4326     const char *file_name = NULL;
4327     int line = 0;
4328 #if KMP_USE_HWLOC
4329     if (depth < 0 &&
4330         __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4331       if (__kmp_affinity_verbose) {
4332         KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4333       }
4334       if (!__kmp_hwloc_error) {
4335         depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4336         if (depth == 0) {
4337           KMP_EXIT_AFF_NONE;
4338         } else if (depth < 0 && __kmp_affinity_verbose) {
4339           KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4340         }
4341       } else if (__kmp_affinity_verbose) {
4342         KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4343       }
4344     }
4345 #endif
4346 
4347 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4348 
4349     if (depth < 0) {
4350       if (__kmp_affinity_verbose) {
4351         KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4352       }
4353 
4354       file_name = NULL;
4355       depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4356       if (depth == 0) {
4357         KMP_EXIT_AFF_NONE;
4358       }
4359 
4360       if (depth < 0) {
4361         if (__kmp_affinity_verbose) {
4362           if (msg_id != kmp_i18n_null) {
4363             KMP_INFORM(AffInfoStrStr, "KMP_AFFINITY",
4364                        __kmp_i18n_catgets(msg_id),
4365                        KMP_I18N_STR(DecodingLegacyAPIC));
4366           } else {
4367             KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
4368                        KMP_I18N_STR(DecodingLegacyAPIC));
4369           }
4370         }
4371 
4372         file_name = NULL;
4373         depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4374         if (depth == 0) {
4375           KMP_EXIT_AFF_NONE;
4376         }
4377       }
4378     }
4379 
4380 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4381 
4382 #if KMP_OS_LINUX
4383 
4384     if (depth < 0) {
4385       if (__kmp_affinity_verbose) {
4386         if (msg_id != kmp_i18n_null) {
4387           KMP_INFORM(AffStrParseFilename, "KMP_AFFINITY",
4388                      __kmp_i18n_catgets(msg_id), "/proc/cpuinfo");
4389         } else {
4390           KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "/proc/cpuinfo");
4391         }
4392       }
4393 
4394       kmp_safe_raii_file_t f("/proc/cpuinfo", "r");
4395       depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4396       if (depth == 0) {
4397         KMP_EXIT_AFF_NONE;
4398       }
4399     }
4400 
4401 #endif /* KMP_OS_LINUX */
4402 
4403 #if KMP_GROUP_AFFINITY
4404 
4405     if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
4406       if (__kmp_affinity_verbose) {
4407         KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4408       }
4409 
4410       depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4411       KMP_ASSERT(depth != 0);
4412     }
4413 
4414 #endif /* KMP_GROUP_AFFINITY */
4415 
4416     if (depth < 0) {
4417       if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
4418         if (file_name == NULL) {
4419           KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
4420         } else if (line == 0) {
4421           KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
4422         } else {
4423           KMP_INFORM(UsingFlatOSFileLine, file_name, line,
4424                      __kmp_i18n_catgets(msg_id));
4425         }
4426       }
4427       // FIXME - print msg if msg_id = kmp_i18n_null ???
4428 
4429       file_name = "";
4430       depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4431       if (depth == 0) {
4432         KMP_EXIT_AFF_NONE;
4433       }
4434       KMP_ASSERT(depth > 0);
4435       KMP_ASSERT(address2os != NULL);
4436     }
4437   }
4438 
4439 #if KMP_USE_HWLOC
4440   else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4441     KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4442     if (__kmp_affinity_verbose) {
4443       KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4444     }
4445     depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4446     if (depth == 0) {
4447       KMP_EXIT_AFF_NONE;
4448     }
4449   }
4450 #endif // KMP_USE_HWLOC
4451 
4452   // If the user has specified that a particular topology discovery method is to
4453   // be used, then we abort if that method fails. The exception is group
4454   // affinity, which might have been implicitly set.
4455 
4456 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4457 
4458   else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4459            __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4460     if (__kmp_affinity_verbose) {
4461       KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4462     }
4463 
4464     depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4465     if (depth == 0) {
4466       KMP_EXIT_AFF_NONE;
4467     }
4468     if (depth < 0) {
4469       KMP_ASSERT(msg_id != kmp_i18n_null);
4470       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4471     }
4472   } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4473     if (__kmp_affinity_verbose) {
4474       KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
4475     }
4476 
4477     depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4478     if (depth == 0) {
4479       KMP_EXIT_AFF_NONE;
4480     }
4481     if (depth < 0) {
4482       KMP_ASSERT(msg_id != kmp_i18n_null);
4483       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4484     }
4485   }
4486 
4487 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4488 
4489   else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4490     const char *filename;
4491     const char *env_var = nullptr;
4492     if (__kmp_cpuinfo_file != NULL) {
4493       filename = __kmp_cpuinfo_file;
4494       env_var = "KMP_CPUINFO_FILE";
4495     } else {
4496       filename = "/proc/cpuinfo";
4497     }
4498 
4499     if (__kmp_affinity_verbose) {
4500       KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
4501     }
4502 
4503     kmp_safe_raii_file_t f(filename, "r", env_var);
4504     int line = 0;
4505     depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4506     if (depth < 0) {
4507       KMP_ASSERT(msg_id != kmp_i18n_null);
4508       if (line > 0) {
4509         KMP_FATAL(FileLineMsgExiting, filename, line,
4510                   __kmp_i18n_catgets(msg_id));
4511       } else {
4512         KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4513       }
4514     }
4515     if (__kmp_affinity_type == affinity_none) {
4516       KMP_ASSERT(depth == 0);
4517       KMP_EXIT_AFF_NONE;
4518     }
4519   }
4520 
4521 #if KMP_GROUP_AFFINITY
4522 
4523   else if (__kmp_affinity_top_method == affinity_top_method_group) {
4524     if (__kmp_affinity_verbose) {
4525       KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4526     }
4527 
4528     depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4529     KMP_ASSERT(depth != 0);
4530     if (depth < 0) {
4531       KMP_ASSERT(msg_id != kmp_i18n_null);
4532       KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4533     }
4534   }
4535 
4536 #endif /* KMP_GROUP_AFFINITY */
4537 
4538   else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4539     if (__kmp_affinity_verbose) {
4540       KMP_INFORM(AffUsingFlatOS, "KMP_AFFINITY");
4541     }
4542 
4543     depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4544     if (depth == 0) {
4545       KMP_EXIT_AFF_NONE;
4546     }
4547     // should not fail
4548     KMP_ASSERT(depth > 0);
4549     KMP_ASSERT(address2os != NULL);
4550   }
4551 
4552 #if KMP_USE_HIER_SCHED
4553   __kmp_dispatch_set_hierarchy_values();
4554 #endif
4555 
4556   if (address2os == NULL) {
4557     if (KMP_AFFINITY_CAPABLE() &&
4558         (__kmp_affinity_verbose ||
4559          (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4560       KMP_WARNING(ErrorInitializeAffinity);
4561     }
4562     __kmp_affinity_type = affinity_none;
4563     __kmp_create_affinity_none_places();
4564     KMP_AFFINITY_DISABLE();
4565     return;
4566   }
4567 
4568   if (__kmp_affinity_gran == affinity_gran_tile
4569 #if KMP_USE_HWLOC
4570       && __kmp_tile_depth == 0
4571 #endif
4572   ) {
4573     // tiles requested but not detected, warn user on this
4574     KMP_WARNING(AffTilesNoTiles, "KMP_AFFINITY");
4575   }
4576 
4577   __kmp_apply_thread_places(&address2os, depth);
4578 
4579   // Create the table of masks, indexed by thread Id.
4580   unsigned maxIndex;
4581   unsigned numUnique;
4582   kmp_affin_mask_t *osId2Mask =
4583       __kmp_create_masks(&maxIndex, &numUnique, address2os, __kmp_avail_proc);
4584   if (__kmp_affinity_gran_levels == 0) {
4585     KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4586   }
4587 
4588   // Set the childNums vector in all Address objects. This must be done before
4589   // we can sort using __kmp_affinity_cmp_Address_child_num(), which takes into
4590   // account the setting of __kmp_affinity_compact.
4591   __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
4592 
4593   switch (__kmp_affinity_type) {
4594 
4595   case affinity_explicit:
4596     KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4597     if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4598       __kmp_affinity_process_proclist(
4599           &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4600           __kmp_affinity_proclist, osId2Mask, maxIndex);
4601     } else {
4602       __kmp_affinity_process_placelist(
4603           &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4604           __kmp_affinity_proclist, osId2Mask, maxIndex);
4605     }
4606     if (__kmp_affinity_num_masks == 0) {
4607       if (__kmp_affinity_verbose ||
4608           (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4609         KMP_WARNING(AffNoValidProcID);
4610       }
4611       __kmp_affinity_type = affinity_none;
4612       __kmp_create_affinity_none_places();
4613       return;
4614     }
4615     break;
4616 
4617     // The other affinity types rely on sorting the Addresses according to some
4618     // permutation of the machine topology tree. Set __kmp_affinity_compact and
4619     // __kmp_affinity_offset appropriately, then jump to a common code fragment
4620     // to do the sort and create the array of affinity masks.
4621 
4622   case affinity_logical:
4623     __kmp_affinity_compact = 0;
4624     if (__kmp_affinity_offset) {
4625       __kmp_affinity_offset =
4626           __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4627     }
4628     goto sortAddresses;
4629 
4630   case affinity_physical:
4631     if (__kmp_nThreadsPerCore > 1) {
4632       __kmp_affinity_compact = 1;
4633       if (__kmp_affinity_compact >= depth) {
4634         __kmp_affinity_compact = 0;
4635       }
4636     } else {
4637       __kmp_affinity_compact = 0;
4638     }
4639     if (__kmp_affinity_offset) {
4640       __kmp_affinity_offset =
4641           __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4642     }
4643     goto sortAddresses;
4644 
4645   case affinity_scatter:
4646     if (__kmp_affinity_compact >= depth) {
4647       __kmp_affinity_compact = 0;
4648     } else {
4649       __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4650     }
4651     goto sortAddresses;
4652 
4653   case affinity_compact:
4654     if (__kmp_affinity_compact >= depth) {
4655       __kmp_affinity_compact = depth - 1;
4656     }
4657     goto sortAddresses;
4658 
4659   case affinity_balanced:
4660     if (depth <= 1) {
4661       if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4662         KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4663       }
4664       __kmp_affinity_type = affinity_none;
4665       __kmp_create_affinity_none_places();
4666       return;
4667     } else if (!__kmp_affinity_uniform_topology()) {
4668       // Save the depth for further usage
4669       __kmp_aff_depth = depth;
4670 
4671       int core_level = __kmp_affinity_find_core_level(
4672           address2os, __kmp_avail_proc, depth - 1);
4673       int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
4674                                                  depth - 1, core_level);
4675       int maxprocpercore = __kmp_affinity_max_proc_per_core(
4676           address2os, __kmp_avail_proc, depth - 1, core_level);
4677 
4678       int nproc = ncores * maxprocpercore;
4679       if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4680         if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4681           KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4682         }
4683         __kmp_affinity_type = affinity_none;
4684         return;
4685       }
4686 
4687       procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4688       for (int i = 0; i < nproc; i++) {
4689         procarr[i] = -1;
4690       }
4691 
4692       int lastcore = -1;
4693       int inlastcore = 0;
4694       for (int i = 0; i < __kmp_avail_proc; i++) {
4695         int proc = address2os[i].second;
4696         int core =
4697             __kmp_affinity_find_core(address2os, i, depth - 1, core_level);
4698 
4699         if (core == lastcore) {
4700           inlastcore++;
4701         } else {
4702           inlastcore = 0;
4703         }
4704         lastcore = core;
4705 
4706         procarr[core * maxprocpercore + inlastcore] = proc;
4707       }
4708     }
4709     if (__kmp_affinity_compact >= depth) {
4710       __kmp_affinity_compact = depth - 1;
4711     }
4712 
4713   sortAddresses:
4714     // Allocate the gtid->affinity mask table.
4715     if (__kmp_affinity_dups) {
4716       __kmp_affinity_num_masks = __kmp_avail_proc;
4717     } else {
4718       __kmp_affinity_num_masks = numUnique;
4719     }
4720 
4721     if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4722         (__kmp_affinity_num_places > 0) &&
4723         ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4724       __kmp_affinity_num_masks = __kmp_affinity_num_places;
4725     }
4726 
4727     KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4728 
4729     // Sort the address2os table according to the current setting of
4730     // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4731     qsort(address2os, __kmp_avail_proc, sizeof(*address2os),
4732           __kmp_affinity_cmp_Address_child_num);
4733     {
4734       int i;
4735       unsigned j;
4736       for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4737         if ((!__kmp_affinity_dups) && (!address2os[i].first.leader)) {
4738           continue;
4739         }
4740         unsigned osId = address2os[i].second;
4741         kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4742         kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4743         KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4744         KMP_CPU_COPY(dest, src);
4745         if (++j >= __kmp_affinity_num_masks) {
4746           break;
4747         }
4748       }
4749       KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4750     }
4751     break;
4752 
4753   default:
4754     KMP_ASSERT2(0, "Unexpected affinity setting");
4755   }
4756 
4757   KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4758   machine_hierarchy.init(address2os, __kmp_avail_proc);
4759 }
4760 #undef KMP_EXIT_AFF_NONE
4761 
4762 void __kmp_affinity_initialize(void) {
4763   // Much of the code above was written assuming that if a machine was not
4764   // affinity capable, then __kmp_affinity_type == affinity_none.  We now
4765   // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4766   // There are too many checks for __kmp_affinity_type == affinity_none
4767   // in this code.  Instead of trying to change them all, check if
4768   // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4769   // affinity_none, call the real initialization routine, then restore
4770   // __kmp_affinity_type to affinity_disabled.
4771   int disabled = (__kmp_affinity_type == affinity_disabled);
4772   if (!KMP_AFFINITY_CAPABLE()) {
4773     KMP_ASSERT(disabled);
4774   }
4775   if (disabled) {
4776     __kmp_affinity_type = affinity_none;
4777   }
4778   __kmp_aux_affinity_initialize();
4779   if (disabled) {
4780     __kmp_affinity_type = affinity_disabled;
4781   }
4782 }
4783 
4784 void __kmp_affinity_uninitialize(void) {
4785   if (__kmp_affinity_masks != NULL) {
4786     KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4787     __kmp_affinity_masks = NULL;
4788   }
4789   if (__kmp_affin_fullMask != NULL) {
4790     KMP_CPU_FREE(__kmp_affin_fullMask);
4791     __kmp_affin_fullMask = NULL;
4792   }
4793   __kmp_affinity_num_masks = 0;
4794   __kmp_affinity_type = affinity_default;
4795   __kmp_affinity_num_places = 0;
4796   if (__kmp_affinity_proclist != NULL) {
4797     __kmp_free(__kmp_affinity_proclist);
4798     __kmp_affinity_proclist = NULL;
4799   }
4800   if (address2os != NULL) {
4801     __kmp_free(address2os);
4802     address2os = NULL;
4803   }
4804   if (procarr != NULL) {
4805     __kmp_free(procarr);
4806     procarr = NULL;
4807   }
4808 #if KMP_USE_HWLOC
4809   if (__kmp_hwloc_topology != NULL) {
4810     hwloc_topology_destroy(__kmp_hwloc_topology);
4811     __kmp_hwloc_topology = NULL;
4812   }
4813 #endif
4814   KMPAffinity::destroy_api();
4815 }
4816 
4817 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4818   if (!KMP_AFFINITY_CAPABLE()) {
4819     return;
4820   }
4821 
4822   kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4823   if (th->th.th_affin_mask == NULL) {
4824     KMP_CPU_ALLOC(th->th.th_affin_mask);
4825   } else {
4826     KMP_CPU_ZERO(th->th.th_affin_mask);
4827   }
4828 
4829   // Copy the thread mask to the kmp_info_t structure. If
4830   // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4831   // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4832   // then the full mask is the same as the mask of the initialization thread.
4833   kmp_affin_mask_t *mask;
4834   int i;
4835 
4836   if (KMP_AFFINITY_NON_PROC_BIND) {
4837     if ((__kmp_affinity_type == affinity_none) ||
4838         (__kmp_affinity_type == affinity_balanced)) {
4839 #if KMP_GROUP_AFFINITY
4840       if (__kmp_num_proc_groups > 1) {
4841         return;
4842       }
4843 #endif
4844       KMP_ASSERT(__kmp_affin_fullMask != NULL);
4845       i = 0;
4846       mask = __kmp_affin_fullMask;
4847     } else {
4848       KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4849       i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4850       mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4851     }
4852   } else {
4853     if ((!isa_root) ||
4854         (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4855 #if KMP_GROUP_AFFINITY
4856       if (__kmp_num_proc_groups > 1) {
4857         return;
4858       }
4859 #endif
4860       KMP_ASSERT(__kmp_affin_fullMask != NULL);
4861       i = KMP_PLACE_ALL;
4862       mask = __kmp_affin_fullMask;
4863     } else {
4864       // int i = some hash function or just a counter that doesn't
4865       // always start at 0.  Use gtid for now.
4866       KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4867       i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4868       mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4869     }
4870   }
4871 
4872   th->th.th_current_place = i;
4873   if (isa_root) {
4874     th->th.th_new_place = i;
4875     th->th.th_first_place = 0;
4876     th->th.th_last_place = __kmp_affinity_num_masks - 1;
4877   } else if (KMP_AFFINITY_NON_PROC_BIND) {
4878     // When using a Non-OMP_PROC_BIND affinity method,
4879     // set all threads' place-partition-var to the entire place list
4880     th->th.th_first_place = 0;
4881     th->th.th_last_place = __kmp_affinity_num_masks - 1;
4882   }
4883 
4884   if (i == KMP_PLACE_ALL) {
4885     KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4886                    gtid));
4887   } else {
4888     KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4889                    gtid, i));
4890   }
4891 
4892   KMP_CPU_COPY(th->th.th_affin_mask, mask);
4893 
4894   if (__kmp_affinity_verbose
4895       /* to avoid duplicate printing (will be correctly printed on barrier) */
4896       && (__kmp_affinity_type == affinity_none ||
4897           (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4898     char buf[KMP_AFFIN_MASK_PRINT_LEN];
4899     __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4900                               th->th.th_affin_mask);
4901     KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4902                __kmp_gettid(), gtid, buf);
4903   }
4904 
4905 #if KMP_OS_WINDOWS
4906   // On Windows* OS, the process affinity mask might have changed. If the user
4907   // didn't request affinity and this call fails, just continue silently.
4908   // See CQ171393.
4909   if (__kmp_affinity_type == affinity_none) {
4910     __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4911   } else
4912 #endif
4913     __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4914 }
4915 
4916 void __kmp_affinity_set_place(int gtid) {
4917   if (!KMP_AFFINITY_CAPABLE()) {
4918     return;
4919   }
4920 
4921   kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4922 
4923   KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4924                  "place = %d)\n",
4925                  gtid, th->th.th_new_place, th->th.th_current_place));
4926 
4927   // Check that the new place is within this thread's partition.
4928   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4929   KMP_ASSERT(th->th.th_new_place >= 0);
4930   KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4931   if (th->th.th_first_place <= th->th.th_last_place) {
4932     KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4933                (th->th.th_new_place <= th->th.th_last_place));
4934   } else {
4935     KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4936                (th->th.th_new_place >= th->th.th_last_place));
4937   }
4938 
4939   // Copy the thread mask to the kmp_info_t structure,
4940   // and set this thread's affinity.
4941   kmp_affin_mask_t *mask =
4942       KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4943   KMP_CPU_COPY(th->th.th_affin_mask, mask);
4944   th->th.th_current_place = th->th.th_new_place;
4945 
4946   if (__kmp_affinity_verbose) {
4947     char buf[KMP_AFFIN_MASK_PRINT_LEN];
4948     __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4949                               th->th.th_affin_mask);
4950     KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4951                __kmp_gettid(), gtid, buf);
4952   }
4953   __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4954 }
4955 
4956 int __kmp_aux_set_affinity(void **mask) {
4957   int gtid;
4958   kmp_info_t *th;
4959   int retval;
4960 
4961   if (!KMP_AFFINITY_CAPABLE()) {
4962     return -1;
4963   }
4964 
4965   gtid = __kmp_entry_gtid();
4966   KA_TRACE(
4967       1000, (""); {
4968         char buf[KMP_AFFIN_MASK_PRINT_LEN];
4969         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4970                                   (kmp_affin_mask_t *)(*mask));
4971         __kmp_debug_printf(
4972             "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4973             gtid, buf);
4974       });
4975 
4976   if (__kmp_env_consistency_check) {
4977     if ((mask == NULL) || (*mask == NULL)) {
4978       KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4979     } else {
4980       unsigned proc;
4981       int num_procs = 0;
4982 
4983       KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4984         if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4985           KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4986         }
4987         if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4988           continue;
4989         }
4990         num_procs++;
4991       }
4992       if (num_procs == 0) {
4993         KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4994       }
4995 
4996 #if KMP_GROUP_AFFINITY
4997       if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4998         KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4999       }
5000 #endif /* KMP_GROUP_AFFINITY */
5001     }
5002   }
5003 
5004   th = __kmp_threads[gtid];
5005   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5006   retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5007   if (retval == 0) {
5008     KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
5009   }
5010 
5011   th->th.th_current_place = KMP_PLACE_UNDEFINED;
5012   th->th.th_new_place = KMP_PLACE_UNDEFINED;
5013   th->th.th_first_place = 0;
5014   th->th.th_last_place = __kmp_affinity_num_masks - 1;
5015 
5016   // Turn off 4.0 affinity for the current tread at this parallel level.
5017   th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
5018 
5019   return retval;
5020 }
5021 
5022 int __kmp_aux_get_affinity(void **mask) {
5023   int gtid;
5024   int retval;
5025   kmp_info_t *th;
5026 
5027   if (!KMP_AFFINITY_CAPABLE()) {
5028     return -1;
5029   }
5030 
5031   gtid = __kmp_entry_gtid();
5032   th = __kmp_threads[gtid];
5033   KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5034 
5035   KA_TRACE(
5036       1000, (""); {
5037         char buf[KMP_AFFIN_MASK_PRINT_LEN];
5038         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5039                                   th->th.th_affin_mask);
5040         __kmp_printf(
5041             "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
5042             buf);
5043       });
5044 
5045   if (__kmp_env_consistency_check) {
5046     if ((mask == NULL) || (*mask == NULL)) {
5047       KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
5048     }
5049   }
5050 
5051 #if !KMP_OS_WINDOWS
5052 
5053   retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5054   KA_TRACE(
5055       1000, (""); {
5056         char buf[KMP_AFFIN_MASK_PRINT_LEN];
5057         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5058                                   (kmp_affin_mask_t *)(*mask));
5059         __kmp_printf(
5060             "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
5061             buf);
5062       });
5063   return retval;
5064 
5065 #else
5066   (void)retval;
5067 
5068   KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
5069   return 0;
5070 
5071 #endif /* KMP_OS_WINDOWS */
5072 }
5073 
5074 int __kmp_aux_get_affinity_max_proc() {
5075   if (!KMP_AFFINITY_CAPABLE()) {
5076     return 0;
5077   }
5078 #if KMP_GROUP_AFFINITY
5079   if (__kmp_num_proc_groups > 1) {
5080     return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
5081   }
5082 #endif
5083   return __kmp_xproc;
5084 }
5085 
5086 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
5087   if (!KMP_AFFINITY_CAPABLE()) {
5088     return -1;
5089   }
5090 
5091   KA_TRACE(
5092       1000, (""); {
5093         int gtid = __kmp_entry_gtid();
5094         char buf[KMP_AFFIN_MASK_PRINT_LEN];
5095         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5096                                   (kmp_affin_mask_t *)(*mask));
5097         __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
5098                            "affinity mask for thread %d = %s\n",
5099                            proc, gtid, buf);
5100       });
5101 
5102   if (__kmp_env_consistency_check) {
5103     if ((mask == NULL) || (*mask == NULL)) {
5104       KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
5105     }
5106   }
5107 
5108   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5109     return -1;
5110   }
5111   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5112     return -2;
5113   }
5114 
5115   KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5116   return 0;
5117 }
5118 
5119 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
5120   if (!KMP_AFFINITY_CAPABLE()) {
5121     return -1;
5122   }
5123 
5124   KA_TRACE(
5125       1000, (""); {
5126         int gtid = __kmp_entry_gtid();
5127         char buf[KMP_AFFIN_MASK_PRINT_LEN];
5128         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5129                                   (kmp_affin_mask_t *)(*mask));
5130         __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5131                            "affinity mask for thread %d = %s\n",
5132                            proc, gtid, buf);
5133       });
5134 
5135   if (__kmp_env_consistency_check) {
5136     if ((mask == NULL) || (*mask == NULL)) {
5137       KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
5138     }
5139   }
5140 
5141   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5142     return -1;
5143   }
5144   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5145     return -2;
5146   }
5147 
5148   KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5149   return 0;
5150 }
5151 
5152 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
5153   if (!KMP_AFFINITY_CAPABLE()) {
5154     return -1;
5155   }
5156 
5157   KA_TRACE(
5158       1000, (""); {
5159         int gtid = __kmp_entry_gtid();
5160         char buf[KMP_AFFIN_MASK_PRINT_LEN];
5161         __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5162                                   (kmp_affin_mask_t *)(*mask));
5163         __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5164                            "affinity mask for thread %d = %s\n",
5165                            proc, gtid, buf);
5166       });
5167 
5168   if (__kmp_env_consistency_check) {
5169     if ((mask == NULL) || (*mask == NULL)) {
5170       KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
5171     }
5172   }
5173 
5174   if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5175     return -1;
5176   }
5177   if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5178     return 0;
5179   }
5180 
5181   return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5182 }
5183 
5184 // Dynamic affinity settings - Affinity balanced
5185 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
5186   KMP_DEBUG_ASSERT(th);
5187   bool fine_gran = true;
5188   int tid = th->th.th_info.ds.ds_tid;
5189 
5190   switch (__kmp_affinity_gran) {
5191   case affinity_gran_fine:
5192   case affinity_gran_thread:
5193     break;
5194   case affinity_gran_core:
5195     if (__kmp_nThreadsPerCore > 1) {
5196       fine_gran = false;
5197     }
5198     break;
5199   case affinity_gran_package:
5200     if (nCoresPerPkg > 1) {
5201       fine_gran = false;
5202     }
5203     break;
5204   default:
5205     fine_gran = false;
5206   }
5207 
5208   if (__kmp_affinity_uniform_topology()) {
5209     int coreID;
5210     int threadID;
5211     // Number of hyper threads per core in HT machine
5212     int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5213     // Number of cores
5214     int ncores = __kmp_ncores;
5215     if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5216       __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5217       ncores = nPackages;
5218     }
5219     // How many threads will be bound to each core
5220     int chunk = nthreads / ncores;
5221     // How many cores will have an additional thread bound to it - "big cores"
5222     int big_cores = nthreads % ncores;
5223     // Number of threads on the big cores
5224     int big_nth = (chunk + 1) * big_cores;
5225     if (tid < big_nth) {
5226       coreID = tid / (chunk + 1);
5227       threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5228     } else { // tid >= big_nth
5229       coreID = (tid - big_cores) / chunk;
5230       threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5231     }
5232 
5233     KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5234                       "Illegal set affinity operation when not capable");
5235 
5236     kmp_affin_mask_t *mask = th->th.th_affin_mask;
5237     KMP_CPU_ZERO(mask);
5238 
5239     if (fine_gran) {
5240       int osID = address2os[coreID * __kmp_nth_per_core + threadID].second;
5241       KMP_CPU_SET(osID, mask);
5242     } else {
5243       for (int i = 0; i < __kmp_nth_per_core; i++) {
5244         int osID;
5245         osID = address2os[coreID * __kmp_nth_per_core + i].second;
5246         KMP_CPU_SET(osID, mask);
5247       }
5248     }
5249     if (__kmp_affinity_verbose) {
5250       char buf[KMP_AFFIN_MASK_PRINT_LEN];
5251       __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5252       KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5253                  __kmp_gettid(), tid, buf);
5254     }
5255     __kmp_set_system_affinity(mask, TRUE);
5256   } else { // Non-uniform topology
5257 
5258     kmp_affin_mask_t *mask = th->th.th_affin_mask;
5259     KMP_CPU_ZERO(mask);
5260 
5261     int core_level = __kmp_affinity_find_core_level(
5262         address2os, __kmp_avail_proc, __kmp_aff_depth - 1);
5263     int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
5264                                                __kmp_aff_depth - 1, core_level);
5265     int nth_per_core = __kmp_affinity_max_proc_per_core(
5266         address2os, __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5267 
5268     // For performance gain consider the special case nthreads ==
5269     // __kmp_avail_proc
5270     if (nthreads == __kmp_avail_proc) {
5271       if (fine_gran) {
5272         int osID = address2os[tid].second;
5273         KMP_CPU_SET(osID, mask);
5274       } else {
5275         int core = __kmp_affinity_find_core(address2os, tid,
5276                                             __kmp_aff_depth - 1, core_level);
5277         for (int i = 0; i < __kmp_avail_proc; i++) {
5278           int osID = address2os[i].second;
5279           if (__kmp_affinity_find_core(address2os, i, __kmp_aff_depth - 1,
5280                                        core_level) == core) {
5281             KMP_CPU_SET(osID, mask);
5282           }
5283         }
5284       }
5285     } else if (nthreads <= ncores) {
5286 
5287       int core = 0;
5288       for (int i = 0; i < ncores; i++) {
5289         // Check if this core from procarr[] is in the mask
5290         int in_mask = 0;
5291         for (int j = 0; j < nth_per_core; j++) {
5292           if (procarr[i * nth_per_core + j] != -1) {
5293             in_mask = 1;
5294             break;
5295           }
5296         }
5297         if (in_mask) {
5298           if (tid == core) {
5299             for (int j = 0; j < nth_per_core; j++) {
5300               int osID = procarr[i * nth_per_core + j];
5301               if (osID != -1) {
5302                 KMP_CPU_SET(osID, mask);
5303                 // For fine granularity it is enough to set the first available
5304                 // osID for this core
5305                 if (fine_gran) {
5306                   break;
5307                 }
5308               }
5309             }
5310             break;
5311           } else {
5312             core++;
5313           }
5314         }
5315       }
5316     } else { // nthreads > ncores
5317       // Array to save the number of processors at each core
5318       int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5319       // Array to save the number of cores with "x" available processors;
5320       int *ncores_with_x_procs =
5321           (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5322       // Array to save the number of cores with # procs from x to nth_per_core
5323       int *ncores_with_x_to_max_procs =
5324           (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5325 
5326       for (int i = 0; i <= nth_per_core; i++) {
5327         ncores_with_x_procs[i] = 0;
5328         ncores_with_x_to_max_procs[i] = 0;
5329       }
5330 
5331       for (int i = 0; i < ncores; i++) {
5332         int cnt = 0;
5333         for (int j = 0; j < nth_per_core; j++) {
5334           if (procarr[i * nth_per_core + j] != -1) {
5335             cnt++;
5336           }
5337         }
5338         nproc_at_core[i] = cnt;
5339         ncores_with_x_procs[cnt]++;
5340       }
5341 
5342       for (int i = 0; i <= nth_per_core; i++) {
5343         for (int j = i; j <= nth_per_core; j++) {
5344           ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5345         }
5346       }
5347 
5348       // Max number of processors
5349       int nproc = nth_per_core * ncores;
5350       // An array to keep number of threads per each context
5351       int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5352       for (int i = 0; i < nproc; i++) {
5353         newarr[i] = 0;
5354       }
5355 
5356       int nth = nthreads;
5357       int flag = 0;
5358       while (nth > 0) {
5359         for (int j = 1; j <= nth_per_core; j++) {
5360           int cnt = ncores_with_x_to_max_procs[j];
5361           for (int i = 0; i < ncores; i++) {
5362             // Skip the core with 0 processors
5363             if (nproc_at_core[i] == 0) {
5364               continue;
5365             }
5366             for (int k = 0; k < nth_per_core; k++) {
5367               if (procarr[i * nth_per_core + k] != -1) {
5368                 if (newarr[i * nth_per_core + k] == 0) {
5369                   newarr[i * nth_per_core + k] = 1;
5370                   cnt--;
5371                   nth--;
5372                   break;
5373                 } else {
5374                   if (flag != 0) {
5375                     newarr[i * nth_per_core + k]++;
5376                     cnt--;
5377                     nth--;
5378                     break;
5379                   }
5380                 }
5381               }
5382             }
5383             if (cnt == 0 || nth == 0) {
5384               break;
5385             }
5386           }
5387           if (nth == 0) {
5388             break;
5389           }
5390         }
5391         flag = 1;
5392       }
5393       int sum = 0;
5394       for (int i = 0; i < nproc; i++) {
5395         sum += newarr[i];
5396         if (sum > tid) {
5397           if (fine_gran) {
5398             int osID = procarr[i];
5399             KMP_CPU_SET(osID, mask);
5400           } else {
5401             int coreID = i / nth_per_core;
5402             for (int ii = 0; ii < nth_per_core; ii++) {
5403               int osID = procarr[coreID * nth_per_core + ii];
5404               if (osID != -1) {
5405                 KMP_CPU_SET(osID, mask);
5406               }
5407             }
5408           }
5409           break;
5410         }
5411       }
5412       __kmp_free(newarr);
5413     }
5414 
5415     if (__kmp_affinity_verbose) {
5416       char buf[KMP_AFFIN_MASK_PRINT_LEN];
5417       __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5418       KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5419                  __kmp_gettid(), tid, buf);
5420     }
5421     __kmp_set_system_affinity(mask, TRUE);
5422   }
5423 }
5424 
5425 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5426 // We don't need this entry for Windows because
5427 // there is GetProcessAffinityMask() api
5428 //
5429 // The intended usage is indicated by these steps:
5430 // 1) The user gets the current affinity mask
5431 // 2) Then sets the affinity by calling this function
5432 // 3) Error check the return value
5433 // 4) Use non-OpenMP parallelization
5434 // 5) Reset the affinity to what was stored in step 1)
5435 #ifdef __cplusplus
5436 extern "C"
5437 #endif
5438     int
5439     kmp_set_thread_affinity_mask_initial()
5440 // the function returns 0 on success,
5441 //   -1 if we cannot bind thread
5442 //   >0 (errno) if an error happened during binding
5443 {
5444   int gtid = __kmp_get_gtid();
5445   if (gtid < 0) {
5446     // Do not touch non-omp threads
5447     KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5448                   "non-omp thread, returning\n"));
5449     return -1;
5450   }
5451   if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5452     KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5453                   "affinity not initialized, returning\n"));
5454     return -1;
5455   }
5456   KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5457                 "set full mask for thread %d\n",
5458                 gtid));
5459   KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5460   return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5461 }
5462 #endif
5463 
5464 #endif // KMP_AFFINITY_SUPPORTED
5465