blob: 6f4c7c1910bb37bc01f1bebbd0103725765a424b [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_affinity.cpp -- affinity management
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_i18n.h"
18#include "kmp_io.h"
19#include "kmp_str.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000020#include "kmp_wrapper_getpid.h"
Jonathan Peyton17078362015-09-10 19:22:07 +000021#include "kmp_affinity.h"
22
23// Store the real or imagined machine hierarchy here
24static hierarchy_info machine_hierarchy;
25
26void __kmp_cleanup_hierarchy() {
27 machine_hierarchy.fini();
28}
29
30void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
31 kmp_uint32 depth;
32 // The test below is true if affinity is available, but set to "none". Need to init on first use of hierarchical barrier.
33 if (TCR_1(machine_hierarchy.uninitialized))
34 machine_hierarchy.init(NULL, nproc);
Jonathan Peyton17078362015-09-10 19:22:07 +000035
Jonathan Peyton7dee82e2015-11-09 16:24:53 +000036 // Adjust the hierarchy in case num threads exceeds original
37 if (nproc > machine_hierarchy.base_num_threads)
38 machine_hierarchy.resize(nproc);
39
Jonathan Peyton17078362015-09-10 19:22:07 +000040 depth = machine_hierarchy.depth;
41 KMP_DEBUG_ASSERT(depth > 0);
Jonathan Peyton17078362015-09-10 19:22:07 +000042
43 thr_bar->depth = depth;
44 thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.numPerLevel[0]-1;
45 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
46}
Jim Cownie5e8470a2013-09-27 10:38:44 +000047
Alp Toker763b9392014-02-28 09:42:41 +000048#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +000049
50//
51// Print the affinity mask to the character array in a pretty format.
52//
Jonathan Peyton01dcf362015-11-30 20:02:59 +000053#if KMP_USE_HWLOC
54char *
55__kmp_affinity_print_mask(char *buf, int buf_len, kmp_affin_mask_t *mask)
56{
57 int num_chars_to_write, num_chars_written;
58 char* scan;
59 KMP_ASSERT(buf_len >= 40);
60
61 // bufsize of 0 just retrieves the needed buffer size.
62 num_chars_to_write = hwloc_bitmap_list_snprintf(buf, 0, (hwloc_bitmap_t)mask);
63
64 // need '{', "xxxxxxxx...xx", '}', '\0' = num_chars_to_write + 3 bytes
65 // * num_chars_to_write returned by hwloc_bitmap_list_snprintf does not
66 // take into account the '\0' character.
67 if(hwloc_bitmap_iszero((hwloc_bitmap_t)mask)) {
68 KMP_SNPRINTF(buf, buf_len, "{<empty>}");
69 } else if(num_chars_to_write < buf_len - 3) {
70 // no problem fitting the mask into buf_len number of characters
71 buf[0] = '{';
72 // use buf_len-3 because we have the three characters: '{' '}' '\0' to add to the buffer
73 num_chars_written = hwloc_bitmap_list_snprintf(buf+1, buf_len-3, (hwloc_bitmap_t)mask);
74 buf[num_chars_written+1] = '}';
75 buf[num_chars_written+2] = '\0';
76 } else {
77 // Need to truncate the affinity mask string and add ellipsis.
78 // To do this, we first write out the '{' + str(mask)
79 buf[0] = '{';
Jonathan Peyton1d5487c2016-04-25 21:08:31 +000080 hwloc_bitmap_list_snprintf(buf+1, buf_len-1, (hwloc_bitmap_t)mask);
Jonathan Peyton01dcf362015-11-30 20:02:59 +000081 // then, what we do here is go to the 7th to last character, then go backwards until we are NOT
82 // on a digit then write "...}\0". This way it is a clean ellipsis addition and we don't
83 // overwrite part of an affinity number. i.e., we avoid something like { 45, 67, 8...} and get
84 // { 45, 67,...} instead.
85 scan = buf + buf_len - 7;
86 while(*scan >= '0' && *scan <= '9' && scan >= buf)
87 scan--;
88 *(scan+1) = '.';
89 *(scan+2) = '.';
90 *(scan+3) = '.';
91 *(scan+4) = '}';
92 *(scan+5) = '\0';
93 }
94 return buf;
95}
96#else
Jim Cownie5e8470a2013-09-27 10:38:44 +000097char *
98__kmp_affinity_print_mask(char *buf, int buf_len, kmp_affin_mask_t *mask)
99{
100 KMP_ASSERT(buf_len >= 40);
101 char *scan = buf;
102 char *end = buf + buf_len - 1;
103
104 //
105 // Find first element / check for empty set.
106 //
107 size_t i;
108 for (i = 0; i < KMP_CPU_SETSIZE; i++) {
109 if (KMP_CPU_ISSET(i, mask)) {
110 break;
111 }
112 }
113 if (i == KMP_CPU_SETSIZE) {
Jonathan Peyton7edeef12015-09-25 17:23:17 +0000114 KMP_SNPRINTF(scan, end-scan+1, "{<empty>}");
Jim Cownie5e8470a2013-09-27 10:38:44 +0000115 while (*scan != '\0') scan++;
116 KMP_ASSERT(scan <= end);
117 return buf;
118 }
119
Jonathan Peyton7edeef12015-09-25 17:23:17 +0000120 KMP_SNPRINTF(scan, end-scan+1, "{%ld", (long)i);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000121 while (*scan != '\0') scan++;
122 i++;
123 for (; i < KMP_CPU_SETSIZE; i++) {
124 if (! KMP_CPU_ISSET(i, mask)) {
125 continue;
126 }
127
128 //
129 // Check for buffer overflow. A string of the form ",<n>" will have
130 // at most 10 characters, plus we want to leave room to print ",...}"
131 // if the set is too large to print for a total of 15 characters.
132 // We already left room for '\0' in setting end.
133 //
134 if (end - scan < 15) {
135 break;
136 }
Jonathan Peyton7edeef12015-09-25 17:23:17 +0000137 KMP_SNPRINTF(scan, end-scan+1, ",%-ld", (long)i);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000138 while (*scan != '\0') scan++;
139 }
140 if (i < KMP_CPU_SETSIZE) {
Jonathan Peyton7edeef12015-09-25 17:23:17 +0000141 KMP_SNPRINTF(scan, end-scan+1, ",...");
Jim Cownie5e8470a2013-09-27 10:38:44 +0000142 while (*scan != '\0') scan++;
143 }
Jonathan Peyton7edeef12015-09-25 17:23:17 +0000144 KMP_SNPRINTF(scan, end-scan+1, "}");
Jim Cownie5e8470a2013-09-27 10:38:44 +0000145 while (*scan != '\0') scan++;
146 KMP_ASSERT(scan <= end);
147 return buf;
148}
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000149#endif // KMP_USE_HWLOC
Jim Cownie5e8470a2013-09-27 10:38:44 +0000150
151
152void
153__kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask)
154{
155 KMP_CPU_ZERO(mask);
156
Andrey Churbanov7daf9802015-01-27 16:52:57 +0000157# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +0000158
159 if (__kmp_num_proc_groups > 1) {
160 int group;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000161 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
162 for (group = 0; group < __kmp_num_proc_groups; group++) {
163 int i;
164 int num = __kmp_GetActiveProcessorCount(group);
165 for (i = 0; i < num; i++) {
166 KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
167 }
168 }
169 }
170 else
171
Andrey Churbanov7daf9802015-01-27 16:52:57 +0000172# endif /* KMP_GROUP_AFFINITY */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000173
174 {
175 int proc;
176 for (proc = 0; proc < __kmp_xproc; proc++) {
177 KMP_CPU_SET(proc, mask);
178 }
179 }
180}
181
Jim Cownie5e8470a2013-09-27 10:38:44 +0000182//
183// When sorting by labels, __kmp_affinity_assign_child_nums() must first be
184// called to renumber the labels from [0..n] and place them into the child_num
185// vector of the address object. This is done in case the labels used for
Alp Toker8f2d3f02014-02-24 10:40:15 +0000186// the children at one node of the hierarchy differ from those used for
Jim Cownie5e8470a2013-09-27 10:38:44 +0000187// another node at the same level. Example: suppose the machine has 2 nodes
188// with 2 packages each. The first node contains packages 601 and 602, and
189// second node contains packages 603 and 604. If we try to sort the table
190// for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
191// because we are paying attention to the labels themselves, not the ordinal
192// child numbers. By using the child numbers in the sort, the result is
193// {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
194//
195static void
196__kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
197 int numAddrs)
198{
199 KMP_DEBUG_ASSERT(numAddrs > 0);
200 int depth = address2os->first.depth;
201 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
202 unsigned *lastLabel = (unsigned *)__kmp_allocate(depth
203 * sizeof(unsigned));
204 int labCt;
205 for (labCt = 0; labCt < depth; labCt++) {
206 address2os[0].first.childNums[labCt] = counts[labCt] = 0;
207 lastLabel[labCt] = address2os[0].first.labels[labCt];
208 }
209 int i;
210 for (i = 1; i < numAddrs; i++) {
211 for (labCt = 0; labCt < depth; labCt++) {
212 if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
213 int labCt2;
214 for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
215 counts[labCt2] = 0;
216 lastLabel[labCt2] = address2os[i].first.labels[labCt2];
217 }
218 counts[labCt]++;
219 lastLabel[labCt] = address2os[i].first.labels[labCt];
220 break;
221 }
222 }
223 for (labCt = 0; labCt < depth; labCt++) {
224 address2os[i].first.childNums[labCt] = counts[labCt];
225 }
226 for (; labCt < (int)Address::maxDepth; labCt++) {
227 address2os[i].first.childNums[labCt] = 0;
228 }
229 }
230}
231
232
233//
234// All of the __kmp_affinity_create_*_map() routines should set
235// __kmp_affinity_masks to a vector of affinity mask objects of length
236// __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and
237// return the number of levels in the machine topology tree (zero if
238// __kmp_affinity_type == affinity_none).
239//
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000240// All of the __kmp_affinity_create_*_map() routines should set *__kmp_affin_fullMask
Jim Cownie5e8470a2013-09-27 10:38:44 +0000241// to the affinity mask for the initialization thread. They need to save and
242// restore the mask, and it could be needed later, so saving it is just an
243// optimization to avoid calling kmp_get_system_affinity() again.
244//
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000245kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246
247static int nCoresPerPkg, nPackages;
Andrey Churbanovf696c822015-01-27 16:55:43 +0000248static int __kmp_nThreadsPerCore;
249#ifndef KMP_DFLT_NTH_CORES
250static int __kmp_ncores;
251#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000252
253//
254// __kmp_affinity_uniform_topology() doesn't work when called from
255// places which support arbitrarily many levels in the machine topology
256// map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
257// __kmp_affinity_create_x2apicid_map().
258//
259inline static bool
260__kmp_affinity_uniform_topology()
261{
262 return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
263}
264
265
266//
267// Print out the detailed machine topology map, i.e. the physical locations
268// of each OS proc.
269//
270static void
271__kmp_affinity_print_topology(AddrUnsPair *address2os, int len, int depth,
272 int pkgLevel, int coreLevel, int threadLevel)
273{
274 int proc;
275
276 KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
277 for (proc = 0; proc < len; proc++) {
278 int level;
279 kmp_str_buf_t buf;
280 __kmp_str_buf_init(&buf);
281 for (level = 0; level < depth; level++) {
282 if (level == threadLevel) {
283 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
284 }
285 else if (level == coreLevel) {
286 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
287 }
288 else if (level == pkgLevel) {
289 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
290 }
291 else if (level > pkgLevel) {
292 __kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
293 level - pkgLevel - 1);
294 }
295 else {
296 __kmp_str_buf_print(&buf, "L%d ", level);
297 }
298 __kmp_str_buf_print(&buf, "%d ",
299 address2os[proc].first.labels[level]);
300 }
301 KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
302 buf.str);
303 __kmp_str_buf_free(&buf);
304 }
305}
306
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000307#if KMP_USE_HWLOC
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000308
309// This function removes the topology levels that are radix 1 and don't offer
310// further information about the topology. The most common example is when you
311// have one thread context per core, we don't want the extra thread context
312// level if it offers no unique labels. So they are removed.
313// return value: the new depth of address2os
314static int
315__kmp_affinity_remove_radix_one_levels(AddrUnsPair *address2os, int nActiveThreads, int depth, int* pkgLevel, int* coreLevel, int* threadLevel) {
316 int level;
317 int i;
318 int radix1_detected;
319
320 for (level = depth-1; level >= 0; --level) {
321 // Always keep the package level
322 if (level == *pkgLevel)
323 continue;
324 // Detect if this level is radix 1
325 radix1_detected = 1;
326 for (i = 1; i < nActiveThreads; ++i) {
327 if (address2os[0].first.labels[level] != address2os[i].first.labels[level]) {
328 // There are differing label values for this level so it stays
329 radix1_detected = 0;
330 break;
331 }
332 }
333 if (!radix1_detected)
334 continue;
335 // Radix 1 was detected
336 if (level == *threadLevel) {
337 // If only one thread per core, then just decrement
338 // the depth which removes the threadlevel from address2os
339 for (i = 0; i < nActiveThreads; ++i) {
340 address2os[i].first.depth--;
341 }
342 *threadLevel = -1;
343 } else if (level == *coreLevel) {
344 // For core level, we move the thread labels over if they are still
345 // valid (*threadLevel != -1), and also reduce the depth another level
346 for (i = 0; i < nActiveThreads; ++i) {
347 if (*threadLevel != -1) {
348 address2os[i].first.labels[*coreLevel] = address2os[i].first.labels[*threadLevel];
349 }
350 address2os[i].first.depth--;
351 }
352 *coreLevel = -1;
353 }
354 }
355 return address2os[0].first.depth;
356}
357
358// Returns the number of objects of type 'type' below 'obj' within the topology tree structure.
359// e.g., if obj is a HWLOC_OBJ_SOCKET object, and type is HWLOC_OBJ_PU, then
360// this will return the number of PU's under the SOCKET object.
361static int
362__kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj, hwloc_obj_type_t type) {
363 int retval = 0;
364 hwloc_obj_t first;
365 for(first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type, obj->logical_index, type, 0);
366 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) == obj;
367 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type, first))
368 {
369 ++retval;
370 }
371 return retval;
372}
373
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000374static int
375__kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
376 kmp_i18n_id_t *const msg_id)
377{
378 *address2os = NULL;
379 *msg_id = kmp_i18n_null;
380
381 //
382 // Save the affinity mask for the current thread.
383 //
384 kmp_affin_mask_t *oldMask;
385 KMP_CPU_ALLOC(oldMask);
386 __kmp_get_system_affinity(oldMask, TRUE);
387
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000388 int depth = 3;
389 int pkgLevel = 0;
390 int coreLevel = 1;
391 int threadLevel = 2;
392 nPackages = __kmp_hwloc_get_nobjs_under_obj(hwloc_get_root_obj(__kmp_hwloc_topology), HWLOC_OBJ_SOCKET);
393 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, 0), HWLOC_OBJ_CORE);
394 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_CORE, 0), HWLOC_OBJ_PU);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000395
396 if (! KMP_AFFINITY_CAPABLE())
397 {
398 //
399 // Hack to try and infer the machine topology using only the data
400 // available from cpuid on the current thread, and __kmp_xproc.
401 //
402 KMP_ASSERT(__kmp_affinity_type == affinity_none);
403
404 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
405 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
406 if (__kmp_affinity_verbose) {
407 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
408 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
409 if (__kmp_affinity_uniform_topology()) {
410 KMP_INFORM(Uniform, "KMP_AFFINITY");
411 } else {
412 KMP_INFORM(NonUniform, "KMP_AFFINITY");
413 }
414 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
415 __kmp_nThreadsPerCore, __kmp_ncores);
416 }
Jonathan Peyton72a84982016-06-16 20:14:54 +0000417 KMP_CPU_FREE(oldMask);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000418 return 0;
419 }
420
421 //
422 // Allocate the data structure to be returned.
423 //
424 AddrUnsPair *retval = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
425
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000426 hwloc_obj_t pu;
427 hwloc_obj_t core;
428 hwloc_obj_t socket;
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000429 int nActiveThreads = 0;
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000430 int socket_identifier = 0;
431 for(socket = hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, 0);
432 socket != NULL;
433 socket = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, socket),
434 socket_identifier++)
435 {
436 int core_identifier = 0;
437 for(core = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, socket->type, socket->logical_index, HWLOC_OBJ_CORE, 0);
438 core != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, socket->type, core) == socket;
439 core = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_CORE, core),
440 core_identifier++)
441 {
442 int pu_identifier = 0;
443 for(pu = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, core->type, core->logical_index, HWLOC_OBJ_PU, 0);
444 pu != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, core->type, pu) == core;
445 pu = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_PU, pu),
446 pu_identifier++)
447 {
448 Address addr(3);
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000449 if(! KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask))
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000450 continue;
451 KA_TRACE(20, ("Hwloc inserting %d (%d) %d (%d) %d (%d) into address2os\n",
452 socket->os_index, socket->logical_index, core->os_index, core->logical_index, pu->os_index,pu->logical_index));
453 addr.labels[0] = socket_identifier; // package
454 addr.labels[1] = core_identifier; // core
455 addr.labels[2] = pu_identifier; // pu
456 retval[nActiveThreads] = AddrUnsPair(addr, pu->os_index);
457 nActiveThreads++;
458 }
459 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000460 }
461
462 //
463 // If there's only one thread context to bind to, return now.
464 //
465 KMP_ASSERT(nActiveThreads > 0);
466 if (nActiveThreads == 1) {
467 __kmp_ncores = nPackages = 1;
468 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
469 if (__kmp_affinity_verbose) {
470 char buf[KMP_AFFIN_MASK_PRINT_LEN];
471 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
472
473 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
474 if (__kmp_affinity_respect_mask) {
475 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
476 } else {
477 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
478 }
479 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
480 KMP_INFORM(Uniform, "KMP_AFFINITY");
481 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
482 __kmp_nThreadsPerCore, __kmp_ncores);
483 }
484
485 if (__kmp_affinity_type == affinity_none) {
486 __kmp_free(retval);
487 KMP_CPU_FREE(oldMask);
488 return 0;
489 }
490
491 //
492 // Form an Address object which only includes the package level.
493 //
494 Address addr(1);
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000495 addr.labels[0] = retval[0].first.labels[pkgLevel];
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000496 retval[0].first = addr;
497
498 if (__kmp_affinity_gran_levels < 0) {
499 __kmp_affinity_gran_levels = 0;
500 }
501
502 if (__kmp_affinity_verbose) {
503 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
504 }
505
506 *address2os = retval;
507 KMP_CPU_FREE(oldMask);
508 return 1;
509 }
510
511 //
512 // Sort the table by physical Id.
513 //
514 qsort(retval, nActiveThreads, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
515
516 //
517 // When affinity is off, this routine will still be called to set
518 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
519 // nCoresPerPkg, & nPackages. Make sure all these vars are set
520 // correctly, and return if affinity is not enabled.
521 //
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000522 __kmp_ncores = hwloc_get_nbobjs_by_type(__kmp_hwloc_topology, HWLOC_OBJ_CORE);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000523
524 //
525 // Check to see if the machine topology is uniform
526 //
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000527 unsigned npackages = hwloc_get_nbobjs_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000528 unsigned ncores = __kmp_ncores;
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000529 unsigned nthreads = hwloc_get_nbobjs_by_type(__kmp_hwloc_topology, HWLOC_OBJ_PU);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000530 unsigned uniform = (npackages * nCoresPerPkg * __kmp_nThreadsPerCore == nthreads);
531
532 //
533 // Print the machine topology summary.
534 //
535 if (__kmp_affinity_verbose) {
536 char mask[KMP_AFFIN_MASK_PRINT_LEN];
537 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
538
539 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
540 if (__kmp_affinity_respect_mask) {
541 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
542 } else {
543 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
544 }
545 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
546 if (uniform) {
547 KMP_INFORM(Uniform, "KMP_AFFINITY");
548 } else {
549 KMP_INFORM(NonUniform, "KMP_AFFINITY");
550 }
551
552 kmp_str_buf_t buf;
553 __kmp_str_buf_init(&buf);
554
555 __kmp_str_buf_print(&buf, "%d", npackages);
556 //for (level = 1; level <= pkgLevel; level++) {
557 // __kmp_str_buf_print(&buf, " x %d", maxCt[level]);
558 // }
559 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
560 __kmp_nThreadsPerCore, __kmp_ncores);
561
562 __kmp_str_buf_free(&buf);
563 }
564
565 if (__kmp_affinity_type == affinity_none) {
Jonathan Peyton72a84982016-06-16 20:14:54 +0000566 __kmp_free(retval);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000567 KMP_CPU_FREE(oldMask);
568 return 0;
569 }
570
571 //
572 // Find any levels with radiix 1, and remove them from the map
573 // (except for the package level).
574 //
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000575 depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth, &pkgLevel, &coreLevel, &threadLevel);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000576
577 if (__kmp_affinity_gran_levels < 0) {
578 //
579 // Set the granularity level based on what levels are modeled
580 // in the machine topology map.
581 //
582 __kmp_affinity_gran_levels = 0;
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000583 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000584 __kmp_affinity_gran_levels++;
585 }
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000586 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000587 __kmp_affinity_gran_levels++;
588 }
589 if (__kmp_affinity_gran > affinity_gran_package) {
590 __kmp_affinity_gran_levels++;
591 }
592 }
593
594 if (__kmp_affinity_verbose) {
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000595 __kmp_affinity_print_topology(retval, nActiveThreads, depth, pkgLevel,
596 coreLevel, threadLevel);
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000597 }
598
599 KMP_CPU_FREE(oldMask);
600 *address2os = retval;
Jonathan Peyton202a24d2016-06-13 17:30:08 +0000601 return depth;
Jonathan Peyton01dcf362015-11-30 20:02:59 +0000602}
603#endif // KMP_USE_HWLOC
Jim Cownie5e8470a2013-09-27 10:38:44 +0000604
605//
606// If we don't know how to retrieve the machine's processor topology, or
607// encounter an error in doing so, this routine is called to form a "flat"
608// mapping of os thread id's <-> processor id's.
609//
610static int
611__kmp_affinity_create_flat_map(AddrUnsPair **address2os,
612 kmp_i18n_id_t *const msg_id)
613{
614 *address2os = NULL;
615 *msg_id = kmp_i18n_null;
616
617 //
618 // Even if __kmp_affinity_type == affinity_none, this routine might still
Andrey Churbanovf696c822015-01-27 16:55:43 +0000619 // called to set __kmp_ncores, as well as
Jim Cownie5e8470a2013-09-27 10:38:44 +0000620 // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
621 //
622 if (! KMP_AFFINITY_CAPABLE()) {
623 KMP_ASSERT(__kmp_affinity_type == affinity_none);
624 __kmp_ncores = nPackages = __kmp_xproc;
625 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000626 if (__kmp_affinity_verbose) {
627 KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
628 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
629 KMP_INFORM(Uniform, "KMP_AFFINITY");
630 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
631 __kmp_nThreadsPerCore, __kmp_ncores);
632 }
633 return 0;
634 }
635
636 //
637 // When affinity is off, this routine will still be called to set
Andrey Churbanovf696c822015-01-27 16:55:43 +0000638 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000639 // nCoresPerPkg, & nPackages. Make sure all these vars are set
640 // correctly, and return now if affinity is not enabled.
641 //
642 __kmp_ncores = nPackages = __kmp_avail_proc;
643 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000644 if (__kmp_affinity_verbose) {
645 char buf[KMP_AFFIN_MASK_PRINT_LEN];
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000646 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, __kmp_affin_fullMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000647
648 KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
649 if (__kmp_affinity_respect_mask) {
650 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
651 } else {
652 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
653 }
654 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
655 KMP_INFORM(Uniform, "KMP_AFFINITY");
656 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
657 __kmp_nThreadsPerCore, __kmp_ncores);
658 }
659 if (__kmp_affinity_type == affinity_none) {
660 return 0;
661 }
662
663 //
664 // Contruct the data structure to be returned.
665 //
666 *address2os = (AddrUnsPair*)
667 __kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
668 int avail_ct = 0;
669 unsigned int i;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000670 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000671 //
672 // Skip this proc if it is not included in the machine model.
673 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000674 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000675 continue;
676 }
677
678 Address addr(1);
679 addr.labels[0] = i;
680 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
681 }
682 if (__kmp_affinity_verbose) {
683 KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
684 }
685
686 if (__kmp_affinity_gran_levels < 0) {
687 //
688 // Only the package level is modeled in the machine topology map,
689 // so the #levels of granularity is either 0 or 1.
690 //
691 if (__kmp_affinity_gran > affinity_gran_package) {
692 __kmp_affinity_gran_levels = 1;
693 }
694 else {
695 __kmp_affinity_gran_levels = 0;
696 }
697 }
698 return 1;
699}
700
701
Andrey Churbanov7daf9802015-01-27 16:52:57 +0000702# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +0000703
704//
705// If multiple Windows* OS processor groups exist, we can create a 2-level
706// topology map with the groups at level 0 and the individual procs at
707// level 1.
708//
709// This facilitates letting the threads float among all procs in a group,
710// if granularity=group (the default when there are multiple groups).
711//
712static int
713__kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
714 kmp_i18n_id_t *const msg_id)
715{
716 *address2os = NULL;
717 *msg_id = kmp_i18n_null;
718
719 //
720 // If we don't have multiple processor groups, return now.
721 // The flat mapping will be used.
722 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000723 if ((! KMP_AFFINITY_CAPABLE()) || (__kmp_get_proc_group(__kmp_affin_fullMask) >= 0)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000724 // FIXME set *msg_id
725 return -1;
726 }
727
728 //
729 // Contruct the data structure to be returned.
730 //
731 *address2os = (AddrUnsPair*)
732 __kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
733 int avail_ct = 0;
734 int i;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000735 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000736 //
737 // Skip this proc if it is not included in the machine model.
738 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000739 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000740 continue;
741 }
742
743 Address addr(2);
744 addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
745 addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
746 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
747
748 if (__kmp_affinity_verbose) {
749 KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
750 addr.labels[1]);
751 }
752 }
753
754 if (__kmp_affinity_gran_levels < 0) {
755 if (__kmp_affinity_gran == affinity_gran_group) {
756 __kmp_affinity_gran_levels = 1;
757 }
758 else if ((__kmp_affinity_gran == affinity_gran_fine)
759 || (__kmp_affinity_gran == affinity_gran_thread)) {
760 __kmp_affinity_gran_levels = 0;
761 }
762 else {
763 const char *gran_str = NULL;
764 if (__kmp_affinity_gran == affinity_gran_core) {
765 gran_str = "core";
766 }
767 else if (__kmp_affinity_gran == affinity_gran_package) {
768 gran_str = "package";
769 }
770 else if (__kmp_affinity_gran == affinity_gran_node) {
771 gran_str = "node";
772 }
773 else {
774 KMP_ASSERT(0);
775 }
776
777 // Warning: can't use affinity granularity \"gran\" with group topology method, using "thread"
778 __kmp_affinity_gran_levels = 0;
779 }
780 }
781 return 2;
782}
783
Andrey Churbanov7daf9802015-01-27 16:52:57 +0000784# endif /* KMP_GROUP_AFFINITY */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000785
786
787# if KMP_ARCH_X86 || KMP_ARCH_X86_64
788
789static int
790__kmp_cpuid_mask_width(int count) {
791 int r = 0;
792
793 while((1<<r) < count)
794 ++r;
795 return r;
796}
797
798
799class apicThreadInfo {
800public:
801 unsigned osId; // param to __kmp_affinity_bind_thread
802 unsigned apicId; // from cpuid after binding
803 unsigned maxCoresPerPkg; // ""
804 unsigned maxThreadsPerPkg; // ""
805 unsigned pkgId; // inferred from above values
806 unsigned coreId; // ""
807 unsigned threadId; // ""
808};
809
810
811static int
812__kmp_affinity_cmp_apicThreadInfo_os_id(const void *a, const void *b)
813{
814 const apicThreadInfo *aa = (const apicThreadInfo *)a;
815 const apicThreadInfo *bb = (const apicThreadInfo *)b;
816 if (aa->osId < bb->osId) return -1;
817 if (aa->osId > bb->osId) return 1;
818 return 0;
819}
820
821
822static int
823__kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a, const void *b)
824{
825 const apicThreadInfo *aa = (const apicThreadInfo *)a;
826 const apicThreadInfo *bb = (const apicThreadInfo *)b;
827 if (aa->pkgId < bb->pkgId) return -1;
828 if (aa->pkgId > bb->pkgId) return 1;
829 if (aa->coreId < bb->coreId) return -1;
830 if (aa->coreId > bb->coreId) return 1;
831 if (aa->threadId < bb->threadId) return -1;
832 if (aa->threadId > bb->threadId) return 1;
833 return 0;
834}
835
836
837//
838// On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
839// an algorithm which cycles through the available os threads, setting
840// the current thread's affinity mask to that thread, and then retrieves
841// the Apic Id for each thread context using the cpuid instruction.
842//
843static int
844__kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
845 kmp_i18n_id_t *const msg_id)
846{
Andrey Churbanov1c331292015-01-27 17:03:42 +0000847 kmp_cpuid buf;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000848 int rc;
849 *address2os = NULL;
850 *msg_id = kmp_i18n_null;
851
Andrey Churbanov1c331292015-01-27 17:03:42 +0000852 //
853 // Check if cpuid leaf 4 is supported.
854 //
Jim Cownie5e8470a2013-09-27 10:38:44 +0000855 __kmp_x86_cpuid(0, 0, &buf);
856 if (buf.eax < 4) {
857 *msg_id = kmp_i18n_str_NoLeaf4Support;
858 return -1;
859 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000860
861 //
Jim Cownie5e8470a2013-09-27 10:38:44 +0000862 // The algorithm used starts by setting the affinity to each available
Andrey Churbanov1c331292015-01-27 17:03:42 +0000863 // thread and retrieving info from the cpuid instruction, so if we are
864 // not capable of calling __kmp_get_system_affinity() and
865 // _kmp_get_system_affinity(), then we need to do something else - use
866 // the defaults that we calculated from issuing cpuid without binding
867 // to each proc.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000868 //
869 if (! KMP_AFFINITY_CAPABLE()) {
870 //
871 // Hack to try and infer the machine topology using only the data
872 // available from cpuid on the current thread, and __kmp_xproc.
873 //
874 KMP_ASSERT(__kmp_affinity_type == affinity_none);
875
876 //
877 // Get an upper bound on the number of threads per package using
878 // cpuid(1).
879 //
880 // On some OS/chps combinations where HT is supported by the chip
881 // but is disabled, this value will be 2 on a single core chip.
882 // Usually, it will be 2 if HT is enabled and 1 if HT is disabled.
883 //
Jim Cownie5e8470a2013-09-27 10:38:44 +0000884 __kmp_x86_cpuid(1, 0, &buf);
885 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
886 if (maxThreadsPerPkg == 0) {
887 maxThreadsPerPkg = 1;
888 }
889
890 //
891 // The num cores per pkg comes from cpuid(4).
892 // 1 must be added to the encoded value.
893 //
894 // The author of cpu_count.cpp treated this only an upper bound
895 // on the number of cores, but I haven't seen any cases where it
896 // was greater than the actual number of cores, so we will treat
897 // it as exact in this block of code.
898 //
899 // First, we need to check if cpuid(4) is supported on this chip.
900 // To see if cpuid(n) is supported, issue cpuid(0) and check if eax
901 // has the value n or greater.
902 //
903 __kmp_x86_cpuid(0, 0, &buf);
904 if (buf.eax >= 4) {
905 __kmp_x86_cpuid(4, 0, &buf);
906 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
907 }
908 else {
909 nCoresPerPkg = 1;
910 }
911
912 //
913 // There is no way to reliably tell if HT is enabled without issuing
914 // the cpuid instruction from every thread, can correlating the cpuid
915 // info, so if the machine is not affinity capable, we assume that HT
916 // is off. We have seen quite a few machines where maxThreadsPerPkg
917 // is 2, yet the machine does not support HT.
918 //
919 // - Older OSes are usually found on machines with older chips, which
920 // do not support HT.
921 //
922 // - The performance penalty for mistakenly identifying a machine as
923 // HT when it isn't (which results in blocktime being incorrecly set
924 // to 0) is greater than the penalty when for mistakenly identifying
925 // a machine as being 1 thread/core when it is really HT enabled
926 // (which results in blocktime being incorrectly set to a positive
927 // value).
928 //
929 __kmp_ncores = __kmp_xproc;
930 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
931 __kmp_nThreadsPerCore = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000932 if (__kmp_affinity_verbose) {
933 KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
934 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
935 if (__kmp_affinity_uniform_topology()) {
936 KMP_INFORM(Uniform, "KMP_AFFINITY");
937 } else {
938 KMP_INFORM(NonUniform, "KMP_AFFINITY");
939 }
940 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
941 __kmp_nThreadsPerCore, __kmp_ncores);
942 }
943 return 0;
944 }
945
946 //
947 //
948 // From here on, we can assume that it is safe to call
949 // __kmp_get_system_affinity() and __kmp_set_system_affinity(),
950 // even if __kmp_affinity_type = affinity_none.
951 //
952
953 //
954 // Save the affinity mask for the current thread.
955 //
956 kmp_affin_mask_t *oldMask;
957 KMP_CPU_ALLOC(oldMask);
958 KMP_ASSERT(oldMask != NULL);
959 __kmp_get_system_affinity(oldMask, TRUE);
960
961 //
962 // Run through each of the available contexts, binding the current thread
963 // to it, and obtaining the pertinent information using the cpuid instr.
964 //
965 // The relevant information is:
966 //
967 // Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
968 // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
969 //
970 // Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The
971 // value of this field determines the width of the core# + thread#
972 // fields in the Apic Id. It is also an upper bound on the number
973 // of threads per package, but it has been verified that situations
974 // happen were it is not exact. In particular, on certain OS/chip
975 // combinations where Intel(R) Hyper-Threading Technology is supported
976 // by the chip but has
977 // been disabled, the value of this field will be 2 (for a single core
978 // chip). On other OS/chip combinations supporting
979 // Intel(R) Hyper-Threading Technology, the value of
980 // this field will be 1 when Intel(R) Hyper-Threading Technology is
981 // disabled and 2 when it is enabled.
982 //
983 // Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The
984 // value of this field (+1) determines the width of the core# field in
985 // the Apic Id. The comments in "cpucount.cpp" say that this value is
986 // an upper bound, but the IA-32 architecture manual says that it is
987 // exactly the number of cores per package, and I haven't seen any
988 // case where it wasn't.
989 //
990 // From this information, deduce the package Id, core Id, and thread Id,
991 // and set the corresponding fields in the apicThreadInfo struct.
992 //
993 unsigned i;
994 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
995 __kmp_avail_proc * sizeof(apicThreadInfo));
996 unsigned nApics = 0;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +0000997 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000998 //
999 // Skip this proc if it is not included in the machine model.
1000 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00001001 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001002 continue;
1003 }
1004 KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
1005
1006 __kmp_affinity_bind_thread(i);
1007 threadInfo[nApics].osId = i;
1008
1009 //
1010 // The apic id and max threads per pkg come from cpuid(1).
1011 //
Jim Cownie5e8470a2013-09-27 10:38:44 +00001012 __kmp_x86_cpuid(1, 0, &buf);
1013 if (! (buf.edx >> 9) & 1) {
1014 __kmp_set_system_affinity(oldMask, TRUE);
1015 __kmp_free(threadInfo);
1016 KMP_CPU_FREE(oldMask);
1017 *msg_id = kmp_i18n_str_ApicNotPresent;
1018 return -1;
1019 }
1020 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1021 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1022 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1023 threadInfo[nApics].maxThreadsPerPkg = 1;
1024 }
1025
1026 //
1027 // Max cores per pkg comes from cpuid(4).
1028 // 1 must be added to the encoded value.
1029 //
1030 // First, we need to check if cpuid(4) is supported on this chip.
1031 // To see if cpuid(n) is supported, issue cpuid(0) and check if eax
1032 // has the value n or greater.
1033 //
1034 __kmp_x86_cpuid(0, 0, &buf);
1035 if (buf.eax >= 4) {
1036 __kmp_x86_cpuid(4, 0, &buf);
1037 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1038 }
1039 else {
1040 threadInfo[nApics].maxCoresPerPkg = 1;
1041 }
1042
1043 //
1044 // Infer the pkgId / coreId / threadId using only the info
1045 // obtained locally.
1046 //
1047 int widthCT = __kmp_cpuid_mask_width(
1048 threadInfo[nApics].maxThreadsPerPkg);
1049 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1050
1051 int widthC = __kmp_cpuid_mask_width(
1052 threadInfo[nApics].maxCoresPerPkg);
1053 int widthT = widthCT - widthC;
1054 if (widthT < 0) {
1055 //
1056 // I've never seen this one happen, but I suppose it could, if
1057 // the cpuid instruction on a chip was really screwed up.
1058 // Make sure to restore the affinity mask before the tail call.
1059 //
1060 __kmp_set_system_affinity(oldMask, TRUE);
1061 __kmp_free(threadInfo);
1062 KMP_CPU_FREE(oldMask);
1063 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1064 return -1;
1065 }
1066
1067 int maskC = (1 << widthC) - 1;
1068 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT)
1069 &maskC;
1070
1071 int maskT = (1 << widthT) - 1;
1072 threadInfo[nApics].threadId = threadInfo[nApics].apicId &maskT;
1073
1074 nApics++;
1075 }
1076
1077 //
1078 // We've collected all the info we need.
1079 // Restore the old affinity mask for this thread.
1080 //
1081 __kmp_set_system_affinity(oldMask, TRUE);
1082
1083 //
1084 // If there's only one thread context to bind to, form an Address object
1085 // with depth 1 and return immediately (or, if affinity is off, set
1086 // address2os to NULL and return).
1087 //
1088 // If it is configured to omit the package level when there is only a
1089 // single package, the logic at the end of this routine won't work if
1090 // there is only a single thread - it would try to form an Address
1091 // object with depth 0.
1092 //
1093 KMP_ASSERT(nApics > 0);
1094 if (nApics == 1) {
1095 __kmp_ncores = nPackages = 1;
1096 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001097 if (__kmp_affinity_verbose) {
1098 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1099 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1100
1101 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1102 if (__kmp_affinity_respect_mask) {
1103 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1104 } else {
1105 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1106 }
1107 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1108 KMP_INFORM(Uniform, "KMP_AFFINITY");
1109 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1110 __kmp_nThreadsPerCore, __kmp_ncores);
1111 }
1112
1113 if (__kmp_affinity_type == affinity_none) {
1114 __kmp_free(threadInfo);
1115 KMP_CPU_FREE(oldMask);
1116 return 0;
1117 }
1118
1119 *address2os = (AddrUnsPair*)__kmp_allocate(sizeof(AddrUnsPair));
1120 Address addr(1);
1121 addr.labels[0] = threadInfo[0].pkgId;
1122 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1123
1124 if (__kmp_affinity_gran_levels < 0) {
1125 __kmp_affinity_gran_levels = 0;
1126 }
1127
1128 if (__kmp_affinity_verbose) {
1129 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1130 }
1131
1132 __kmp_free(threadInfo);
1133 KMP_CPU_FREE(oldMask);
1134 return 1;
1135 }
1136
1137 //
1138 // Sort the threadInfo table by physical Id.
1139 //
1140 qsort(threadInfo, nApics, sizeof(*threadInfo),
1141 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1142
1143 //
1144 // The table is now sorted by pkgId / coreId / threadId, but we really
1145 // don't know the radix of any of the fields. pkgId's may be sparsely
1146 // assigned among the chips on a system. Although coreId's are usually
1147 // assigned [0 .. coresPerPkg-1] and threadId's are usually assigned
1148 // [0..threadsPerCore-1], we don't want to make any such assumptions.
1149 //
1150 // For that matter, we don't know what coresPerPkg and threadsPerCore
1151 // (or the total # packages) are at this point - we want to determine
1152 // that now. We only have an upper bound on the first two figures.
1153 //
1154 // We also perform a consistency check at this point: the values returned
1155 // by the cpuid instruction for any thread bound to a given package had
1156 // better return the same info for maxThreadsPerPkg and maxCoresPerPkg.
1157 //
1158 nPackages = 1;
1159 nCoresPerPkg = 1;
1160 __kmp_nThreadsPerCore = 1;
1161 unsigned nCores = 1;
1162
1163 unsigned pkgCt = 1; // to determine radii
1164 unsigned lastPkgId = threadInfo[0].pkgId;
1165 unsigned coreCt = 1;
1166 unsigned lastCoreId = threadInfo[0].coreId;
1167 unsigned threadCt = 1;
1168 unsigned lastThreadId = threadInfo[0].threadId;
1169
1170 // intra-pkg consist checks
1171 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1172 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1173
1174 for (i = 1; i < nApics; i++) {
1175 if (threadInfo[i].pkgId != lastPkgId) {
1176 nCores++;
1177 pkgCt++;
1178 lastPkgId = threadInfo[i].pkgId;
1179 if ((int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1180 coreCt = 1;
1181 lastCoreId = threadInfo[i].coreId;
1182 if ((int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1183 threadCt = 1;
1184 lastThreadId = threadInfo[i].threadId;
1185
1186 //
1187 // This is a different package, so go on to the next iteration
1188 // without doing any consistency checks. Reset the consistency
1189 // check vars, though.
1190 //
1191 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1192 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1193 continue;
1194 }
1195
1196 if (threadInfo[i].coreId != lastCoreId) {
1197 nCores++;
1198 coreCt++;
1199 lastCoreId = threadInfo[i].coreId;
1200 if ((int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1201 threadCt = 1;
1202 lastThreadId = threadInfo[i].threadId;
1203 }
1204 else if (threadInfo[i].threadId != lastThreadId) {
1205 threadCt++;
1206 lastThreadId = threadInfo[i].threadId;
1207 }
1208 else {
1209 __kmp_free(threadInfo);
1210 KMP_CPU_FREE(oldMask);
1211 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1212 return -1;
1213 }
1214
1215 //
1216 // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
1217 // fields agree between all the threads bounds to a given package.
1218 //
1219 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg)
1220 || (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1221 __kmp_free(threadInfo);
1222 KMP_CPU_FREE(oldMask);
1223 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1224 return -1;
1225 }
1226 }
1227 nPackages = pkgCt;
1228 if ((int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1229 if ((int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1230
1231 //
1232 // When affinity is off, this routine will still be called to set
Andrey Churbanovf696c822015-01-27 16:55:43 +00001233 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001234 // nCoresPerPkg, & nPackages. Make sure all these vars are set
1235 // correctly, and return now if affinity is not enabled.
1236 //
Jim Cownie5e8470a2013-09-27 10:38:44 +00001237 __kmp_ncores = nCores;
1238 if (__kmp_affinity_verbose) {
1239 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1240 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1241
1242 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1243 if (__kmp_affinity_respect_mask) {
1244 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1245 } else {
1246 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1247 }
1248 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1249 if (__kmp_affinity_uniform_topology()) {
1250 KMP_INFORM(Uniform, "KMP_AFFINITY");
1251 } else {
1252 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1253 }
1254 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1255 __kmp_nThreadsPerCore, __kmp_ncores);
1256
1257 }
1258
1259 if (__kmp_affinity_type == affinity_none) {
1260 __kmp_free(threadInfo);
1261 KMP_CPU_FREE(oldMask);
1262 return 0;
1263 }
1264
1265 //
1266 // Now that we've determined the number of packages, the number of cores
1267 // per package, and the number of threads per core, we can construct the
1268 // data structure that is to be returned.
1269 //
1270 int pkgLevel = 0;
1271 int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1272 int threadLevel = (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1273 unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1274
1275 KMP_ASSERT(depth > 0);
1276 *address2os = (AddrUnsPair*)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1277
1278 for (i = 0; i < nApics; ++i) {
1279 Address addr(depth);
1280 unsigned os = threadInfo[i].osId;
1281 int d = 0;
1282
1283 if (pkgLevel >= 0) {
1284 addr.labels[d++] = threadInfo[i].pkgId;
1285 }
1286 if (coreLevel >= 0) {
1287 addr.labels[d++] = threadInfo[i].coreId;
1288 }
1289 if (threadLevel >= 0) {
1290 addr.labels[d++] = threadInfo[i].threadId;
1291 }
1292 (*address2os)[i] = AddrUnsPair(addr, os);
1293 }
1294
1295 if (__kmp_affinity_gran_levels < 0) {
1296 //
1297 // Set the granularity level based on what levels are modeled
1298 // in the machine topology map.
1299 //
1300 __kmp_affinity_gran_levels = 0;
1301 if ((threadLevel >= 0)
1302 && (__kmp_affinity_gran > affinity_gran_thread)) {
1303 __kmp_affinity_gran_levels++;
1304 }
1305 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1306 __kmp_affinity_gran_levels++;
1307 }
1308 if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1309 __kmp_affinity_gran_levels++;
1310 }
1311 }
1312
1313 if (__kmp_affinity_verbose) {
1314 __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1315 coreLevel, threadLevel);
1316 }
1317
1318 __kmp_free(threadInfo);
1319 KMP_CPU_FREE(oldMask);
1320 return depth;
1321}
1322
1323
1324//
1325// Intel(R) microarchitecture code name Nehalem, Dunnington and later
1326// architectures support a newer interface for specifying the x2APIC Ids,
1327// based on cpuid leaf 11.
1328//
1329static int
1330__kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1331 kmp_i18n_id_t *const msg_id)
1332{
1333 kmp_cpuid buf;
1334
1335 *address2os = NULL;
1336 *msg_id = kmp_i18n_null;
1337
1338 //
1339 // Check to see if cpuid leaf 11 is supported.
1340 //
1341 __kmp_x86_cpuid(0, 0, &buf);
1342 if (buf.eax < 11) {
1343 *msg_id = kmp_i18n_str_NoLeaf11Support;
1344 return -1;
1345 }
1346 __kmp_x86_cpuid(11, 0, &buf);
1347 if (buf.ebx == 0) {
1348 *msg_id = kmp_i18n_str_NoLeaf11Support;
1349 return -1;
1350 }
1351
1352 //
1353 // Find the number of levels in the machine topology. While we're at it,
1354 // get the default values for __kmp_nThreadsPerCore & nCoresPerPkg. We will
1355 // try to get more accurate values later by explicitly counting them,
1356 // but get reasonable defaults now, in case we return early.
1357 //
1358 int level;
1359 int threadLevel = -1;
1360 int coreLevel = -1;
1361 int pkgLevel = -1;
1362 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1363
1364 for (level = 0;; level++) {
1365 if (level > 31) {
1366 //
1367 // FIXME: Hack for DPD200163180
1368 //
1369 // If level is big then something went wrong -> exiting
1370 //
1371 // There could actually be 32 valid levels in the machine topology,
1372 // but so far, the only machine we have seen which does not exit
1373 // this loop before iteration 32 has fubar x2APIC settings.
1374 //
1375 // For now, just reject this case based upon loop trip count.
1376 //
1377 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1378 return -1;
1379 }
1380 __kmp_x86_cpuid(11, level, &buf);
1381 if (buf.ebx == 0) {
1382 if (pkgLevel < 0) {
1383 //
1384 // Will infer nPackages from __kmp_xproc
1385 //
1386 pkgLevel = level;
1387 level++;
1388 }
1389 break;
1390 }
1391 int kind = (buf.ecx >> 8) & 0xff;
1392 if (kind == 1) {
1393 //
1394 // SMT level
1395 //
1396 threadLevel = level;
1397 coreLevel = -1;
1398 pkgLevel = -1;
1399 __kmp_nThreadsPerCore = buf.ebx & 0xff;
1400 if (__kmp_nThreadsPerCore == 0) {
1401 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1402 return -1;
1403 }
1404 }
1405 else if (kind == 2) {
1406 //
1407 // core level
1408 //
1409 coreLevel = level;
1410 pkgLevel = -1;
1411 nCoresPerPkg = buf.ebx & 0xff;
1412 if (nCoresPerPkg == 0) {
1413 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1414 return -1;
1415 }
1416 }
1417 else {
1418 if (level <= 0) {
1419 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1420 return -1;
1421 }
1422 if (pkgLevel >= 0) {
1423 continue;
1424 }
1425 pkgLevel = level;
1426 nPackages = buf.ebx & 0xff;
1427 if (nPackages == 0) {
1428 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1429 return -1;
1430 }
1431 }
1432 }
1433 int depth = level;
1434
1435 //
1436 // In the above loop, "level" was counted from the finest level (usually
1437 // thread) to the coarsest. The caller expects that we will place the
1438 // labels in (*address2os)[].first.labels[] in the inverse order, so
1439 // we need to invert the vars saying which level means what.
1440 //
1441 if (threadLevel >= 0) {
1442 threadLevel = depth - threadLevel - 1;
1443 }
1444 if (coreLevel >= 0) {
1445 coreLevel = depth - coreLevel - 1;
1446 }
1447 KMP_DEBUG_ASSERT(pkgLevel >= 0);
1448 pkgLevel = depth - pkgLevel - 1;
1449
1450 //
1451 // The algorithm used starts by setting the affinity to each available
Andrey Churbanov1c331292015-01-27 17:03:42 +00001452 // thread and retrieving info from the cpuid instruction, so if we are
1453 // not capable of calling __kmp_get_system_affinity() and
1454 // _kmp_get_system_affinity(), then we need to do something else - use
1455 // the defaults that we calculated from issuing cpuid without binding
1456 // to each proc.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001457 //
1458 if (! KMP_AFFINITY_CAPABLE())
1459 {
1460 //
1461 // Hack to try and infer the machine topology using only the data
1462 // available from cpuid on the current thread, and __kmp_xproc.
1463 //
1464 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1465
1466 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1467 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001468 if (__kmp_affinity_verbose) {
1469 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
1470 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1471 if (__kmp_affinity_uniform_topology()) {
1472 KMP_INFORM(Uniform, "KMP_AFFINITY");
1473 } else {
1474 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1475 }
1476 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1477 __kmp_nThreadsPerCore, __kmp_ncores);
1478 }
1479 return 0;
1480 }
1481
1482 //
1483 //
1484 // From here on, we can assume that it is safe to call
1485 // __kmp_get_system_affinity() and __kmp_set_system_affinity(),
1486 // even if __kmp_affinity_type = affinity_none.
1487 //
1488
1489 //
1490 // Save the affinity mask for the current thread.
1491 //
1492 kmp_affin_mask_t *oldMask;
1493 KMP_CPU_ALLOC(oldMask);
1494 __kmp_get_system_affinity(oldMask, TRUE);
1495
1496 //
1497 // Allocate the data structure to be returned.
1498 //
1499 AddrUnsPair *retval = (AddrUnsPair *)
1500 __kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
1501
1502 //
1503 // Run through each of the available contexts, binding the current thread
1504 // to it, and obtaining the pertinent information using the cpuid instr.
1505 //
1506 unsigned int proc;
1507 int nApics = 0;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00001508 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001509 //
1510 // Skip this proc if it is not included in the machine model.
1511 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00001512 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001513 continue;
1514 }
1515 KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1516
1517 __kmp_affinity_bind_thread(proc);
1518
1519 //
1520 // Extrach the labels for each level in the machine topology map
1521 // from the Apic ID.
1522 //
1523 Address addr(depth);
1524 int prev_shift = 0;
1525
1526 for (level = 0; level < depth; level++) {
1527 __kmp_x86_cpuid(11, level, &buf);
1528 unsigned apicId = buf.edx;
1529 if (buf.ebx == 0) {
1530 if (level != depth - 1) {
1531 KMP_CPU_FREE(oldMask);
1532 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1533 return -1;
1534 }
1535 addr.labels[depth - level - 1] = apicId >> prev_shift;
1536 level++;
1537 break;
1538 }
1539 int shift = buf.eax & 0x1f;
1540 int mask = (1 << shift) - 1;
1541 addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
1542 prev_shift = shift;
1543 }
1544 if (level != depth) {
1545 KMP_CPU_FREE(oldMask);
1546 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1547 return -1;
1548 }
1549
1550 retval[nApics] = AddrUnsPair(addr, proc);
1551 nApics++;
1552 }
1553
1554 //
1555 // We've collected all the info we need.
1556 // Restore the old affinity mask for this thread.
1557 //
1558 __kmp_set_system_affinity(oldMask, TRUE);
1559
1560 //
1561 // If there's only one thread context to bind to, return now.
1562 //
1563 KMP_ASSERT(nApics > 0);
1564 if (nApics == 1) {
1565 __kmp_ncores = nPackages = 1;
1566 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001567 if (__kmp_affinity_verbose) {
1568 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1569 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1570
1571 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1572 if (__kmp_affinity_respect_mask) {
1573 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1574 } else {
1575 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1576 }
1577 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1578 KMP_INFORM(Uniform, "KMP_AFFINITY");
1579 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1580 __kmp_nThreadsPerCore, __kmp_ncores);
1581 }
1582
1583 if (__kmp_affinity_type == affinity_none) {
1584 __kmp_free(retval);
1585 KMP_CPU_FREE(oldMask);
1586 return 0;
1587 }
1588
1589 //
1590 // Form an Address object which only includes the package level.
1591 //
1592 Address addr(1);
1593 addr.labels[0] = retval[0].first.labels[pkgLevel];
1594 retval[0].first = addr;
1595
1596 if (__kmp_affinity_gran_levels < 0) {
1597 __kmp_affinity_gran_levels = 0;
1598 }
1599
1600 if (__kmp_affinity_verbose) {
1601 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1602 }
1603
1604 *address2os = retval;
1605 KMP_CPU_FREE(oldMask);
1606 return 1;
1607 }
1608
1609 //
1610 // Sort the table by physical Id.
1611 //
1612 qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1613
1614 //
1615 // Find the radix at each of the levels.
1616 //
1617 unsigned *totals = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1618 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1619 unsigned *maxCt = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1620 unsigned *last = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1621 for (level = 0; level < depth; level++) {
1622 totals[level] = 1;
1623 maxCt[level] = 1;
1624 counts[level] = 1;
1625 last[level] = retval[0].first.labels[level];
1626 }
1627
1628 //
1629 // From here on, the iteration variable "level" runs from the finest
1630 // level to the coarsest, i.e. we iterate forward through
1631 // (*address2os)[].first.labels[] - in the previous loops, we iterated
1632 // backwards.
1633 //
1634 for (proc = 1; (int)proc < nApics; proc++) {
1635 int level;
1636 for (level = 0; level < depth; level++) {
1637 if (retval[proc].first.labels[level] != last[level]) {
1638 int j;
1639 for (j = level + 1; j < depth; j++) {
1640 totals[j]++;
1641 counts[j] = 1;
1642 // The line below causes printing incorrect topology information
1643 // in case the max value for some level (maxCt[level]) is encountered earlier than
1644 // some less value while going through the array.
1645 // For example, let pkg0 has 4 cores and pkg1 has 2 cores. Then maxCt[1] == 2
1646 // whereas it must be 4.
1647 // TODO!!! Check if it can be commented safely
1648 //maxCt[j] = 1;
1649 last[j] = retval[proc].first.labels[j];
1650 }
1651 totals[level]++;
1652 counts[level]++;
1653 if (counts[level] > maxCt[level]) {
1654 maxCt[level] = counts[level];
1655 }
1656 last[level] = retval[proc].first.labels[level];
1657 break;
1658 }
1659 else if (level == depth - 1) {
1660 __kmp_free(last);
1661 __kmp_free(maxCt);
1662 __kmp_free(counts);
1663 __kmp_free(totals);
1664 __kmp_free(retval);
1665 KMP_CPU_FREE(oldMask);
1666 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
1667 return -1;
1668 }
1669 }
1670 }
1671
1672 //
1673 // When affinity is off, this routine will still be called to set
Andrey Churbanovf696c822015-01-27 16:55:43 +00001674 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001675 // nCoresPerPkg, & nPackages. Make sure all these vars are set
1676 // correctly, and return if affinity is not enabled.
1677 //
1678 if (threadLevel >= 0) {
1679 __kmp_nThreadsPerCore = maxCt[threadLevel];
1680 }
1681 else {
1682 __kmp_nThreadsPerCore = 1;
1683 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001684 nPackages = totals[pkgLevel];
1685
1686 if (coreLevel >= 0) {
1687 __kmp_ncores = totals[coreLevel];
1688 nCoresPerPkg = maxCt[coreLevel];
1689 }
1690 else {
1691 __kmp_ncores = nPackages;
1692 nCoresPerPkg = 1;
1693 }
1694
1695 //
1696 // Check to see if the machine topology is uniform
1697 //
1698 unsigned prod = maxCt[0];
1699 for (level = 1; level < depth; level++) {
1700 prod *= maxCt[level];
1701 }
1702 bool uniform = (prod == totals[level - 1]);
1703
1704 //
1705 // Print the machine topology summary.
1706 //
1707 if (__kmp_affinity_verbose) {
1708 char mask[KMP_AFFIN_MASK_PRINT_LEN];
1709 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1710
1711 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1712 if (__kmp_affinity_respect_mask) {
1713 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
1714 } else {
1715 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
1716 }
1717 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1718 if (uniform) {
1719 KMP_INFORM(Uniform, "KMP_AFFINITY");
1720 } else {
1721 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1722 }
1723
1724 kmp_str_buf_t buf;
1725 __kmp_str_buf_init(&buf);
1726
1727 __kmp_str_buf_print(&buf, "%d", totals[0]);
1728 for (level = 1; level <= pkgLevel; level++) {
1729 __kmp_str_buf_print(&buf, " x %d", maxCt[level]);
1730 }
1731 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
1732 __kmp_nThreadsPerCore, __kmp_ncores);
1733
1734 __kmp_str_buf_free(&buf);
1735 }
1736
1737 if (__kmp_affinity_type == affinity_none) {
1738 __kmp_free(last);
1739 __kmp_free(maxCt);
1740 __kmp_free(counts);
1741 __kmp_free(totals);
1742 __kmp_free(retval);
1743 KMP_CPU_FREE(oldMask);
1744 return 0;
1745 }
1746
1747 //
1748 // Find any levels with radiix 1, and remove them from the map
1749 // (except for the package level).
1750 //
1751 int new_depth = 0;
1752 for (level = 0; level < depth; level++) {
1753 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1754 continue;
1755 }
1756 new_depth++;
1757 }
1758
1759 //
1760 // If we are removing any levels, allocate a new vector to return,
1761 // and copy the relevant information to it.
1762 //
1763 if (new_depth != depth) {
1764 AddrUnsPair *new_retval = (AddrUnsPair *)__kmp_allocate(
1765 sizeof(AddrUnsPair) * nApics);
1766 for (proc = 0; (int)proc < nApics; proc++) {
1767 Address addr(new_depth);
1768 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
1769 }
1770 int new_level = 0;
Jonathan Peyton62f38402015-08-25 18:44:41 +00001771 int newPkgLevel = -1;
1772 int newCoreLevel = -1;
1773 int newThreadLevel = -1;
1774 int i;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001775 for (level = 0; level < depth; level++) {
Jonathan Peyton62f38402015-08-25 18:44:41 +00001776 if ((maxCt[level] == 1)
1777 && (level != pkgLevel)) {
1778 //
1779 // Remove this level. Never remove the package level
1780 //
1781 continue;
1782 }
1783 if (level == pkgLevel) {
1784 newPkgLevel = level;
1785 }
1786 if (level == coreLevel) {
1787 newCoreLevel = level;
1788 }
1789 if (level == threadLevel) {
1790 newThreadLevel = level;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001791 }
1792 for (proc = 0; (int)proc < nApics; proc++) {
1793 new_retval[proc].first.labels[new_level]
1794 = retval[proc].first.labels[level];
1795 }
1796 new_level++;
1797 }
1798
1799 __kmp_free(retval);
1800 retval = new_retval;
1801 depth = new_depth;
Jonathan Peyton62f38402015-08-25 18:44:41 +00001802 pkgLevel = newPkgLevel;
1803 coreLevel = newCoreLevel;
1804 threadLevel = newThreadLevel;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001805 }
1806
1807 if (__kmp_affinity_gran_levels < 0) {
1808 //
1809 // Set the granularity level based on what levels are modeled
1810 // in the machine topology map.
1811 //
1812 __kmp_affinity_gran_levels = 0;
1813 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1814 __kmp_affinity_gran_levels++;
1815 }
1816 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1817 __kmp_affinity_gran_levels++;
1818 }
1819 if (__kmp_affinity_gran > affinity_gran_package) {
1820 __kmp_affinity_gran_levels++;
1821 }
1822 }
1823
1824 if (__kmp_affinity_verbose) {
1825 __kmp_affinity_print_topology(retval, nApics, depth, pkgLevel,
1826 coreLevel, threadLevel);
1827 }
1828
1829 __kmp_free(last);
1830 __kmp_free(maxCt);
1831 __kmp_free(counts);
1832 __kmp_free(totals);
1833 KMP_CPU_FREE(oldMask);
1834 *address2os = retval;
1835 return depth;
1836}
1837
1838
1839# endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1840
1841
1842#define osIdIndex 0
1843#define threadIdIndex 1
1844#define coreIdIndex 2
1845#define pkgIdIndex 3
1846#define nodeIdIndex 4
1847
1848typedef unsigned *ProcCpuInfo;
1849static unsigned maxIndex = pkgIdIndex;
1850
1851
1852static int
1853__kmp_affinity_cmp_ProcCpuInfo_os_id(const void *a, const void *b)
1854{
1855 const unsigned *aa = (const unsigned *)a;
1856 const unsigned *bb = (const unsigned *)b;
1857 if (aa[osIdIndex] < bb[osIdIndex]) return -1;
1858 if (aa[osIdIndex] > bb[osIdIndex]) return 1;
1859 return 0;
1860};
1861
1862
1863static int
1864__kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a, const void *b)
1865{
1866 unsigned i;
1867 const unsigned *aa = *((const unsigned **)a);
1868 const unsigned *bb = *((const unsigned **)b);
1869 for (i = maxIndex; ; i--) {
1870 if (aa[i] < bb[i]) return -1;
1871 if (aa[i] > bb[i]) return 1;
1872 if (i == osIdIndex) break;
1873 }
1874 return 0;
1875}
1876
1877
1878//
1879// Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
1880// affinity map.
1881//
1882static int
1883__kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os, int *line,
1884 kmp_i18n_id_t *const msg_id, FILE *f)
1885{
1886 *address2os = NULL;
1887 *msg_id = kmp_i18n_null;
1888
1889 //
1890 // Scan of the file, and count the number of "processor" (osId) fields,
Alp Toker8f2d3f02014-02-24 10:40:15 +00001891 // and find the highest value of <n> for a node_<n> field.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001892 //
1893 char buf[256];
1894 unsigned num_records = 0;
1895 while (! feof(f)) {
1896 buf[sizeof(buf) - 1] = 1;
1897 if (! fgets(buf, sizeof(buf), f)) {
1898 //
1899 // Read errors presumably because of EOF
1900 //
1901 break;
1902 }
1903
1904 char s1[] = "processor";
1905 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
1906 num_records++;
1907 continue;
1908 }
1909
1910 //
1911 // FIXME - this will match "node_<n> <garbage>"
1912 //
1913 unsigned level;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00001914 if (KMP_SSCANF(buf, "node_%d id", &level) == 1) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001915 if (nodeIdIndex + level >= maxIndex) {
1916 maxIndex = nodeIdIndex + level;
1917 }
1918 continue;
1919 }
1920 }
1921
1922 //
1923 // Check for empty file / no valid processor records, or too many.
1924 // The number of records can't exceed the number of valid bits in the
1925 // affinity mask.
1926 //
1927 if (num_records == 0) {
1928 *line = 0;
1929 *msg_id = kmp_i18n_str_NoProcRecords;
1930 return -1;
1931 }
1932 if (num_records > (unsigned)__kmp_xproc) {
1933 *line = 0;
1934 *msg_id = kmp_i18n_str_TooManyProcRecords;
1935 return -1;
1936 }
1937
1938 //
1939 // Set the file pointer back to the begginning, so that we can scan the
1940 // file again, this time performing a full parse of the data.
1941 // Allocate a vector of ProcCpuInfo object, where we will place the data.
1942 // Adding an extra element at the end allows us to remove a lot of extra
1943 // checks for termination conditions.
1944 //
1945 if (fseek(f, 0, SEEK_SET) != 0) {
1946 *line = 0;
1947 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
1948 return -1;
1949 }
1950
1951 //
1952 // Allocate the array of records to store the proc info in. The dummy
1953 // element at the end makes the logic in filling them out easier to code.
1954 //
1955 unsigned **threadInfo = (unsigned **)__kmp_allocate((num_records + 1)
1956 * sizeof(unsigned *));
1957 unsigned i;
1958 for (i = 0; i <= num_records; i++) {
1959 threadInfo[i] = (unsigned *)__kmp_allocate((maxIndex + 1)
1960 * sizeof(unsigned));
1961 }
1962
1963#define CLEANUP_THREAD_INFO \
1964 for (i = 0; i <= num_records; i++) { \
1965 __kmp_free(threadInfo[i]); \
1966 } \
1967 __kmp_free(threadInfo);
1968
1969 //
1970 // A value of UINT_MAX means that we didn't find the field
1971 //
1972 unsigned __index;
1973
1974#define INIT_PROC_INFO(p) \
1975 for (__index = 0; __index <= maxIndex; __index++) { \
1976 (p)[__index] = UINT_MAX; \
1977 }
1978
1979 for (i = 0; i <= num_records; i++) {
1980 INIT_PROC_INFO(threadInfo[i]);
1981 }
1982
1983 unsigned num_avail = 0;
1984 *line = 0;
1985 while (! feof(f)) {
1986 //
1987 // Create an inner scoping level, so that all the goto targets at the
1988 // end of the loop appear in an outer scoping level. This avoids
1989 // warnings about jumping past an initialization to a target in the
1990 // same block.
1991 //
1992 {
1993 buf[sizeof(buf) - 1] = 1;
1994 bool long_line = false;
1995 if (! fgets(buf, sizeof(buf), f)) {
1996 //
1997 // Read errors presumably because of EOF
1998 //
1999 // If there is valid data in threadInfo[num_avail], then fake
2000 // a blank line in ensure that the last address gets parsed.
2001 //
2002 bool valid = false;
2003 for (i = 0; i <= maxIndex; i++) {
2004 if (threadInfo[num_avail][i] != UINT_MAX) {
2005 valid = true;
2006 }
2007 }
2008 if (! valid) {
2009 break;
2010 }
2011 buf[0] = 0;
2012 } else if (!buf[sizeof(buf) - 1]) {
2013 //
2014 // The line is longer than the buffer. Set a flag and don't
2015 // emit an error if we were going to ignore the line, anyway.
2016 //
2017 long_line = true;
2018
2019#define CHECK_LINE \
2020 if (long_line) { \
2021 CLEANUP_THREAD_INFO; \
2022 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2023 return -1; \
2024 }
2025 }
2026 (*line)++;
2027
2028 char s1[] = "processor";
2029 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2030 CHECK_LINE;
2031 char *p = strchr(buf + sizeof(s1) - 1, ':');
2032 unsigned val;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002033 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1)) goto no_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002034 if (threadInfo[num_avail][osIdIndex] != UINT_MAX) goto dup_field;
2035 threadInfo[num_avail][osIdIndex] = val;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002036#if KMP_OS_LINUX && USE_SYSFS_INFO
2037 char path[256];
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002038 KMP_SNPRINTF(path, sizeof(path),
Jim Cownie181b4bb2013-12-23 17:28:57 +00002039 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2040 threadInfo[num_avail][osIdIndex]);
2041 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2042
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002043 KMP_SNPRINTF(path, sizeof(path),
Jim Cownie181b4bb2013-12-23 17:28:57 +00002044 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2045 threadInfo[num_avail][osIdIndex]);
2046 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002047 continue;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002048#else
Jim Cownie5e8470a2013-09-27 10:38:44 +00002049 }
2050 char s2[] = "physical id";
2051 if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2052 CHECK_LINE;
2053 char *p = strchr(buf + sizeof(s2) - 1, ':');
2054 unsigned val;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002055 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1)) goto no_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002056 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX) goto dup_field;
2057 threadInfo[num_avail][pkgIdIndex] = val;
2058 continue;
2059 }
2060 char s3[] = "core id";
2061 if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2062 CHECK_LINE;
2063 char *p = strchr(buf + sizeof(s3) - 1, ':');
2064 unsigned val;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002065 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1)) goto no_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002066 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX) goto dup_field;
2067 threadInfo[num_avail][coreIdIndex] = val;
2068 continue;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002069#endif // KMP_OS_LINUX && USE_SYSFS_INFO
Jim Cownie5e8470a2013-09-27 10:38:44 +00002070 }
2071 char s4[] = "thread id";
2072 if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2073 CHECK_LINE;
2074 char *p = strchr(buf + sizeof(s4) - 1, ':');
2075 unsigned val;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002076 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1)) goto no_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002077 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX) goto dup_field;
2078 threadInfo[num_avail][threadIdIndex] = val;
2079 continue;
2080 }
2081 unsigned level;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002082 if (KMP_SSCANF(buf, "node_%d id", &level) == 1) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00002083 CHECK_LINE;
2084 char *p = strchr(buf + sizeof(s4) - 1, ':');
2085 unsigned val;
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002086 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1)) goto no_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002087 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2088 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX) goto dup_field;
2089 threadInfo[num_avail][nodeIdIndex + level] = val;
2090 continue;
2091 }
2092
2093 //
2094 // We didn't recognize the leading token on the line.
2095 // There are lots of leading tokens that we don't recognize -
2096 // if the line isn't empty, go on to the next line.
2097 //
2098 if ((*buf != 0) && (*buf != '\n')) {
2099 //
2100 // If the line is longer than the buffer, read characters
2101 // until we find a newline.
2102 //
2103 if (long_line) {
2104 int ch;
2105 while (((ch = fgetc(f)) != EOF) && (ch != '\n'));
2106 }
2107 continue;
2108 }
2109
2110 //
2111 // A newline has signalled the end of the processor record.
2112 // Check that there aren't too many procs specified.
2113 //
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002114 if ((int)num_avail == __kmp_xproc) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00002115 CLEANUP_THREAD_INFO;
2116 *msg_id = kmp_i18n_str_TooManyEntries;
2117 return -1;
2118 }
2119
2120 //
2121 // Check for missing fields. The osId field must be there, and we
2122 // currently require that the physical id field is specified, also.
2123 //
2124 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2125 CLEANUP_THREAD_INFO;
2126 *msg_id = kmp_i18n_str_MissingProcField;
2127 return -1;
2128 }
2129 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2130 CLEANUP_THREAD_INFO;
2131 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2132 return -1;
2133 }
2134
2135 //
2136 // Skip this proc if it is not included in the machine model.
2137 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00002138 if (! KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex], __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00002139 INIT_PROC_INFO(threadInfo[num_avail]);
2140 continue;
2141 }
2142
2143 //
2144 // We have a successful parse of this proc's info.
2145 // Increment the counter, and prepare for the next proc.
2146 //
2147 num_avail++;
2148 KMP_ASSERT(num_avail <= num_records);
2149 INIT_PROC_INFO(threadInfo[num_avail]);
2150 }
2151 continue;
2152
2153 no_val:
2154 CLEANUP_THREAD_INFO;
2155 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2156 return -1;
2157
2158 dup_field:
2159 CLEANUP_THREAD_INFO;
2160 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2161 return -1;
2162 }
2163 *line = 0;
2164
2165# if KMP_MIC && REDUCE_TEAM_SIZE
2166 unsigned teamSize = 0;
2167# endif // KMP_MIC && REDUCE_TEAM_SIZE
2168
2169 // check for num_records == __kmp_xproc ???
2170
2171 //
2172 // If there's only one thread context to bind to, form an Address object
2173 // with depth 1 and return immediately (or, if affinity is off, set
2174 // address2os to NULL and return).
2175 //
2176 // If it is configured to omit the package level when there is only a
2177 // single package, the logic at the end of this routine won't work if
2178 // there is only a single thread - it would try to form an Address
2179 // object with depth 0.
2180 //
2181 KMP_ASSERT(num_avail > 0);
2182 KMP_ASSERT(num_avail <= num_records);
2183 if (num_avail == 1) {
2184 __kmp_ncores = 1;
2185 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002186 if (__kmp_affinity_verbose) {
2187 if (! KMP_AFFINITY_CAPABLE()) {
2188 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2189 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2190 KMP_INFORM(Uniform, "KMP_AFFINITY");
2191 }
2192 else {
2193 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2194 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00002195 __kmp_affin_fullMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002196 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2197 if (__kmp_affinity_respect_mask) {
2198 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
2199 } else {
2200 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
2201 }
2202 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2203 KMP_INFORM(Uniform, "KMP_AFFINITY");
2204 }
2205 int index;
2206 kmp_str_buf_t buf;
2207 __kmp_str_buf_init(&buf);
2208 __kmp_str_buf_print(&buf, "1");
2209 for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2210 __kmp_str_buf_print(&buf, " x 1");
2211 }
2212 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
2213 __kmp_str_buf_free(&buf);
2214 }
2215
2216 if (__kmp_affinity_type == affinity_none) {
2217 CLEANUP_THREAD_INFO;
2218 return 0;
2219 }
2220
2221 *address2os = (AddrUnsPair*)__kmp_allocate(sizeof(AddrUnsPair));
2222 Address addr(1);
2223 addr.labels[0] = threadInfo[0][pkgIdIndex];
2224 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2225
2226 if (__kmp_affinity_gran_levels < 0) {
2227 __kmp_affinity_gran_levels = 0;
2228 }
2229
2230 if (__kmp_affinity_verbose) {
2231 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2232 }
2233
2234 CLEANUP_THREAD_INFO;
2235 return 1;
2236 }
2237
2238 //
2239 // Sort the threadInfo table by physical Id.
2240 //
2241 qsort(threadInfo, num_avail, sizeof(*threadInfo),
2242 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2243
2244 //
2245 // The table is now sorted by pkgId / coreId / threadId, but we really
2246 // don't know the radix of any of the fields. pkgId's may be sparsely
2247 // assigned among the chips on a system. Although coreId's are usually
2248 // assigned [0 .. coresPerPkg-1] and threadId's are usually assigned
2249 // [0..threadsPerCore-1], we don't want to make any such assumptions.
2250 //
2251 // For that matter, we don't know what coresPerPkg and threadsPerCore
2252 // (or the total # packages) are at this point - we want to determine
2253 // that now. We only have an upper bound on the first two figures.
2254 //
2255 unsigned *counts = (unsigned *)__kmp_allocate((maxIndex + 1)
2256 * sizeof(unsigned));
2257 unsigned *maxCt = (unsigned *)__kmp_allocate((maxIndex + 1)
2258 * sizeof(unsigned));
2259 unsigned *totals = (unsigned *)__kmp_allocate((maxIndex + 1)
2260 * sizeof(unsigned));
2261 unsigned *lastId = (unsigned *)__kmp_allocate((maxIndex + 1)
2262 * sizeof(unsigned));
2263
2264 bool assign_thread_ids = false;
2265 unsigned threadIdCt;
2266 unsigned index;
2267
2268 restart_radix_check:
2269 threadIdCt = 0;
2270
2271 //
2272 // Initialize the counter arrays with data from threadInfo[0].
2273 //
2274 if (assign_thread_ids) {
2275 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2276 threadInfo[0][threadIdIndex] = threadIdCt++;
2277 }
2278 else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2279 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2280 }
2281 }
2282 for (index = 0; index <= maxIndex; index++) {
2283 counts[index] = 1;
2284 maxCt[index] = 1;
2285 totals[index] = 1;
2286 lastId[index] = threadInfo[0][index];;
2287 }
2288
2289 //
2290 // Run through the rest of the OS procs.
2291 //
2292 for (i = 1; i < num_avail; i++) {
2293 //
2294 // Find the most significant index whose id differs
2295 // from the id for the previous OS proc.
2296 //
2297 for (index = maxIndex; index >= threadIdIndex; index--) {
2298 if (assign_thread_ids && (index == threadIdIndex)) {
2299 //
2300 // Auto-assign the thread id field if it wasn't specified.
2301 //
2302 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2303 threadInfo[i][threadIdIndex] = threadIdCt++;
2304 }
2305
2306 //
2307 // Aparrently the thread id field was specified for some
2308 // entries and not others. Start the thread id counter
2309 // off at the next higher thread id.
2310 //
2311 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2312 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2313 }
2314 }
2315 if (threadInfo[i][index] != lastId[index]) {
2316 //
2317 // Run through all indices which are less significant,
2318 // and reset the counts to 1.
2319 //
2320 // At all levels up to and including index, we need to
2321 // increment the totals and record the last id.
2322 //
2323 unsigned index2;
2324 for (index2 = threadIdIndex; index2 < index; index2++) {
2325 totals[index2]++;
2326 if (counts[index2] > maxCt[index2]) {
2327 maxCt[index2] = counts[index2];
2328 }
2329 counts[index2] = 1;
2330 lastId[index2] = threadInfo[i][index2];
2331 }
2332 counts[index]++;
2333 totals[index]++;
2334 lastId[index] = threadInfo[i][index];
2335
2336 if (assign_thread_ids && (index > threadIdIndex)) {
2337
2338# if KMP_MIC && REDUCE_TEAM_SIZE
2339 //
2340 // The default team size is the total #threads in the machine
2341 // minus 1 thread for every core that has 3 or more threads.
2342 //
2343 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2344# endif // KMP_MIC && REDUCE_TEAM_SIZE
2345
2346 //
2347 // Restart the thread counter, as we are on a new core.
2348 //
2349 threadIdCt = 0;
2350
2351 //
2352 // Auto-assign the thread id field if it wasn't specified.
2353 //
2354 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2355 threadInfo[i][threadIdIndex] = threadIdCt++;
2356 }
2357
2358 //
2359 // Aparrently the thread id field was specified for some
2360 // entries and not others. Start the thread id counter
2361 // off at the next higher thread id.
2362 //
2363 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2364 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2365 }
2366 }
2367 break;
2368 }
2369 }
2370 if (index < threadIdIndex) {
2371 //
2372 // If thread ids were specified, it is an error if they are not
2373 // unique. Also, check that we waven't already restarted the
2374 // loop (to be safe - shouldn't need to).
2375 //
2376 if ((threadInfo[i][threadIdIndex] != UINT_MAX)
2377 || assign_thread_ids) {
2378 __kmp_free(lastId);
2379 __kmp_free(totals);
2380 __kmp_free(maxCt);
2381 __kmp_free(counts);
2382 CLEANUP_THREAD_INFO;
2383 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2384 return -1;
2385 }
2386
2387 //
2388 // If the thread ids were not specified and we see entries
2389 // entries that are duplicates, start the loop over and
2390 // assign the thread ids manually.
2391 //
2392 assign_thread_ids = true;
2393 goto restart_radix_check;
2394 }
2395 }
2396
2397# if KMP_MIC && REDUCE_TEAM_SIZE
2398 //
2399 // The default team size is the total #threads in the machine
2400 // minus 1 thread for every core that has 3 or more threads.
2401 //
2402 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2403# endif // KMP_MIC && REDUCE_TEAM_SIZE
2404
2405 for (index = threadIdIndex; index <= maxIndex; index++) {
2406 if (counts[index] > maxCt[index]) {
2407 maxCt[index] = counts[index];
2408 }
2409 }
2410
2411 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2412 nCoresPerPkg = maxCt[coreIdIndex];
2413 nPackages = totals[pkgIdIndex];
2414
2415 //
2416 // Check to see if the machine topology is uniform
2417 //
2418 unsigned prod = totals[maxIndex];
2419 for (index = threadIdIndex; index < maxIndex; index++) {
2420 prod *= maxCt[index];
2421 }
2422 bool uniform = (prod == totals[threadIdIndex]);
2423
2424 //
2425 // When affinity is off, this routine will still be called to set
Andrey Churbanovf696c822015-01-27 16:55:43 +00002426 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
Jim Cownie5e8470a2013-09-27 10:38:44 +00002427 // nCoresPerPkg, & nPackages. Make sure all these vars are set
2428 // correctly, and return now if affinity is not enabled.
2429 //
Jim Cownie5e8470a2013-09-27 10:38:44 +00002430 __kmp_ncores = totals[coreIdIndex];
2431
2432 if (__kmp_affinity_verbose) {
2433 if (! KMP_AFFINITY_CAPABLE()) {
2434 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2435 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2436 if (uniform) {
2437 KMP_INFORM(Uniform, "KMP_AFFINITY");
2438 } else {
2439 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2440 }
2441 }
2442 else {
2443 char buf[KMP_AFFIN_MASK_PRINT_LEN];
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00002444 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, __kmp_affin_fullMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002445 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2446 if (__kmp_affinity_respect_mask) {
2447 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
2448 } else {
2449 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
2450 }
2451 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2452 if (uniform) {
2453 KMP_INFORM(Uniform, "KMP_AFFINITY");
2454 } else {
2455 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2456 }
2457 }
2458 kmp_str_buf_t buf;
2459 __kmp_str_buf_init(&buf);
2460
2461 __kmp_str_buf_print(&buf, "%d", totals[maxIndex]);
2462 for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2463 __kmp_str_buf_print(&buf, " x %d", maxCt[index]);
2464 }
2465 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2466 maxCt[threadIdIndex], __kmp_ncores);
2467
2468 __kmp_str_buf_free(&buf);
2469 }
2470
2471# if KMP_MIC && REDUCE_TEAM_SIZE
2472 //
2473 // Set the default team size.
2474 //
2475 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2476 __kmp_dflt_team_nth = teamSize;
2477 KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting __kmp_dflt_team_nth = %d\n",
2478 __kmp_dflt_team_nth));
2479 }
2480# endif // KMP_MIC && REDUCE_TEAM_SIZE
2481
2482 if (__kmp_affinity_type == affinity_none) {
2483 __kmp_free(lastId);
2484 __kmp_free(totals);
2485 __kmp_free(maxCt);
2486 __kmp_free(counts);
2487 CLEANUP_THREAD_INFO;
2488 return 0;
2489 }
2490
2491 //
2492 // Count the number of levels which have more nodes at that level than
2493 // at the parent's level (with there being an implicit root node of
2494 // the top level). This is equivalent to saying that there is at least
2495 // one node at this level which has a sibling. These levels are in the
2496 // map, and the package level is always in the map.
2497 //
2498 bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
2499 int level = 0;
2500 for (index = threadIdIndex; index < maxIndex; index++) {
2501 KMP_ASSERT(totals[index] >= totals[index + 1]);
2502 inMap[index] = (totals[index] > totals[index + 1]);
2503 }
2504 inMap[maxIndex] = (totals[maxIndex] > 1);
2505 inMap[pkgIdIndex] = true;
2506
2507 int depth = 0;
2508 for (index = threadIdIndex; index <= maxIndex; index++) {
2509 if (inMap[index]) {
2510 depth++;
2511 }
2512 }
2513 KMP_ASSERT(depth > 0);
2514
2515 //
2516 // Construct the data structure that is to be returned.
2517 //
2518 *address2os = (AddrUnsPair*)
2519 __kmp_allocate(sizeof(AddrUnsPair) * num_avail);
2520 int pkgLevel = -1;
2521 int coreLevel = -1;
2522 int threadLevel = -1;
2523
2524 for (i = 0; i < num_avail; ++i) {
2525 Address addr(depth);
2526 unsigned os = threadInfo[i][osIdIndex];
2527 int src_index;
2528 int dst_index = 0;
2529
2530 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2531 if (! inMap[src_index]) {
2532 continue;
2533 }
2534 addr.labels[dst_index] = threadInfo[i][src_index];
2535 if (src_index == pkgIdIndex) {
2536 pkgLevel = dst_index;
2537 }
2538 else if (src_index == coreIdIndex) {
2539 coreLevel = dst_index;
2540 }
2541 else if (src_index == threadIdIndex) {
2542 threadLevel = dst_index;
2543 }
2544 dst_index++;
2545 }
2546 (*address2os)[i] = AddrUnsPair(addr, os);
2547 }
2548
2549 if (__kmp_affinity_gran_levels < 0) {
2550 //
2551 // Set the granularity level based on what levels are modeled
2552 // in the machine topology map.
2553 //
2554 unsigned src_index;
2555 __kmp_affinity_gran_levels = 0;
2556 for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2557 if (! inMap[src_index]) {
2558 continue;
2559 }
2560 switch (src_index) {
2561 case threadIdIndex:
2562 if (__kmp_affinity_gran > affinity_gran_thread) {
2563 __kmp_affinity_gran_levels++;
2564 }
2565
2566 break;
2567 case coreIdIndex:
2568 if (__kmp_affinity_gran > affinity_gran_core) {
2569 __kmp_affinity_gran_levels++;
2570 }
2571 break;
2572
2573 case pkgIdIndex:
2574 if (__kmp_affinity_gran > affinity_gran_package) {
2575 __kmp_affinity_gran_levels++;
2576 }
2577 break;
2578 }
2579 }
2580 }
2581
2582 if (__kmp_affinity_verbose) {
2583 __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2584 coreLevel, threadLevel);
2585 }
2586
2587 __kmp_free(inMap);
2588 __kmp_free(lastId);
2589 __kmp_free(totals);
2590 __kmp_free(maxCt);
2591 __kmp_free(counts);
2592 CLEANUP_THREAD_INFO;
2593 return depth;
2594}
2595
2596
2597//
2598// Create and return a table of affinity masks, indexed by OS thread ID.
2599// This routine handles OR'ing together all the affinity masks of threads
2600// that are sufficiently close, if granularity > fine.
2601//
2602static kmp_affin_mask_t *
2603__kmp_create_masks(unsigned *maxIndex, unsigned *numUnique,
2604 AddrUnsPair *address2os, unsigned numAddrs)
2605{
2606 //
2607 // First form a table of affinity masks in order of OS thread id.
2608 //
2609 unsigned depth;
2610 unsigned maxOsId;
2611 unsigned i;
2612
2613 KMP_ASSERT(numAddrs > 0);
2614 depth = address2os[0].first.depth;
2615
2616 maxOsId = 0;
2617 for (i = 0; i < numAddrs; i++) {
2618 unsigned osId = address2os[i].second;
2619 if (osId > maxOsId) {
2620 maxOsId = osId;
2621 }
2622 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002623 kmp_affin_mask_t *osId2Mask;
2624 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId+1));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002625
2626 //
2627 // Sort the address2os table according to physical order. Doing so
2628 // will put all threads on the same core/package/node in consecutive
2629 // locations.
2630 //
2631 qsort(address2os, numAddrs, sizeof(*address2os),
2632 __kmp_affinity_cmp_Address_labels);
2633
2634 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2635 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2636 KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
2637 }
2638 if (__kmp_affinity_gran_levels >= (int)depth) {
2639 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2640 && (__kmp_affinity_type != affinity_none))) {
2641 KMP_WARNING(AffThreadsMayMigrate);
2642 }
2643 }
2644
2645 //
2646 // Run through the table, forming the masks for all threads on each
2647 // core. Threads on the same core will have identical "Address"
2648 // objects, not considering the last level, which must be the thread
2649 // id. All threads on a core will appear consecutively.
2650 //
2651 unsigned unique = 0;
2652 unsigned j = 0; // index of 1st thread on core
2653 unsigned leader = 0;
2654 Address *leaderAddr = &(address2os[0].first);
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002655 kmp_affin_mask_t *sum;
2656 KMP_CPU_ALLOC_ON_STACK(sum);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002657 KMP_CPU_ZERO(sum);
2658 KMP_CPU_SET(address2os[0].second, sum);
2659 for (i = 1; i < numAddrs; i++) {
2660 //
Alp Toker8f2d3f02014-02-24 10:40:15 +00002661 // If this thread is sufficiently close to the leader (within the
Jim Cownie5e8470a2013-09-27 10:38:44 +00002662 // granularity setting), then set the bit for this os thread in the
2663 // affinity mask for this group, and go on to the next thread.
2664 //
2665 if (leaderAddr->isClose(address2os[i].first,
2666 __kmp_affinity_gran_levels)) {
2667 KMP_CPU_SET(address2os[i].second, sum);
2668 continue;
2669 }
2670
2671 //
2672 // For every thread in this group, copy the mask to the thread's
2673 // entry in the osId2Mask table. Mark the first address as a
2674 // leader.
2675 //
2676 for (; j < i; j++) {
2677 unsigned osId = address2os[j].second;
2678 KMP_DEBUG_ASSERT(osId <= maxOsId);
2679 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2680 KMP_CPU_COPY(mask, sum);
2681 address2os[j].first.leader = (j == leader);
2682 }
2683 unique++;
2684
2685 //
2686 // Start a new mask.
2687 //
2688 leader = i;
2689 leaderAddr = &(address2os[i].first);
2690 KMP_CPU_ZERO(sum);
2691 KMP_CPU_SET(address2os[i].second, sum);
2692 }
2693
2694 //
2695 // For every thread in last group, copy the mask to the thread's
2696 // entry in the osId2Mask table.
2697 //
2698 for (; j < i; j++) {
2699 unsigned osId = address2os[j].second;
2700 KMP_DEBUG_ASSERT(osId <= maxOsId);
2701 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2702 KMP_CPU_COPY(mask, sum);
2703 address2os[j].first.leader = (j == leader);
2704 }
2705 unique++;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002706 KMP_CPU_FREE_FROM_STACK(sum);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002707
2708 *maxIndex = maxOsId;
2709 *numUnique = unique;
2710 return osId2Mask;
2711}
2712
2713
2714//
2715// Stuff for the affinity proclist parsers. It's easier to declare these vars
2716// as file-static than to try and pass them through the calling sequence of
2717// the recursive-descent OMP_PLACES parser.
2718//
2719static kmp_affin_mask_t *newMasks;
2720static int numNewMasks;
2721static int nextNewMask;
2722
2723#define ADD_MASK(_mask) \
2724 { \
2725 if (nextNewMask >= numNewMasks) { \
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002726 int i; \
Jim Cownie5e8470a2013-09-27 10:38:44 +00002727 numNewMasks *= 2; \
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002728 kmp_affin_mask_t* temp; \
2729 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
2730 for(i=0;i<numNewMasks/2;i++) { \
2731 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i); \
2732 kmp_affin_mask_t* dest = KMP_CPU_INDEX(temp, i); \
2733 KMP_CPU_COPY(dest, src); \
2734 } \
2735 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks/2); \
2736 newMasks = temp; \
Jim Cownie5e8470a2013-09-27 10:38:44 +00002737 } \
2738 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
2739 nextNewMask++; \
2740 }
2741
2742#define ADD_MASK_OSID(_osId,_osId2Mask,_maxOsId) \
2743 { \
2744 if (((_osId) > _maxOsId) || \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002745 (! KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
Jim Cownie5e8470a2013-09-27 10:38:44 +00002746 if (__kmp_affinity_verbose || (__kmp_affinity_warnings \
2747 && (__kmp_affinity_type != affinity_none))) { \
2748 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
2749 } \
2750 } \
2751 else { \
2752 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
2753 } \
2754 }
2755
2756
2757//
2758// Re-parse the proclist (for the explicit affinity type), and form the list
2759// of affinity newMasks indexed by gtid.
2760//
2761static void
2762__kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2763 unsigned int *out_numMasks, const char *proclist,
2764 kmp_affin_mask_t *osId2Mask, int maxOsId)
2765{
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002766 int i;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002767 const char *scan = proclist;
2768 const char *next = proclist;
2769
2770 //
2771 // We use malloc() for the temporary mask vector,
2772 // so that we can use realloc() to extend it.
2773 //
2774 numNewMasks = 2;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002775 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002776 nextNewMask = 0;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002777 kmp_affin_mask_t *sumMask;
2778 KMP_CPU_ALLOC(sumMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002779 int setSize = 0;
2780
2781 for (;;) {
2782 int start, end, stride;
2783
2784 SKIP_WS(scan);
2785 next = scan;
2786 if (*next == '\0') {
2787 break;
2788 }
2789
2790 if (*next == '{') {
2791 int num;
2792 setSize = 0;
2793 next++; // skip '{'
2794 SKIP_WS(next);
2795 scan = next;
2796
2797 //
2798 // Read the first integer in the set.
2799 //
2800 KMP_ASSERT2((*next >= '0') && (*next <= '9'),
2801 "bad proclist");
2802 SKIP_DIGITS(next);
2803 num = __kmp_str_to_int(scan, *next);
2804 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2805
2806 //
2807 // Copy the mask for that osId to the sum (union) mask.
2808 //
2809 if ((num > maxOsId) ||
2810 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2811 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2812 && (__kmp_affinity_type != affinity_none))) {
2813 KMP_WARNING(AffIgnoreInvalidProcID, num);
2814 }
2815 KMP_CPU_ZERO(sumMask);
2816 }
2817 else {
2818 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2819 setSize = 1;
2820 }
2821
2822 for (;;) {
2823 //
2824 // Check for end of set.
2825 //
2826 SKIP_WS(next);
2827 if (*next == '}') {
2828 next++; // skip '}'
2829 break;
2830 }
2831
2832 //
2833 // Skip optional comma.
2834 //
2835 if (*next == ',') {
2836 next++;
2837 }
2838 SKIP_WS(next);
2839
2840 //
2841 // Read the next integer in the set.
2842 //
2843 scan = next;
2844 KMP_ASSERT2((*next >= '0') && (*next <= '9'),
2845 "bad explicit proc list");
2846
2847 SKIP_DIGITS(next);
2848 num = __kmp_str_to_int(scan, *next);
2849 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2850
2851 //
2852 // Add the mask for that osId to the sum mask.
2853 //
2854 if ((num > maxOsId) ||
2855 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2856 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2857 && (__kmp_affinity_type != affinity_none))) {
2858 KMP_WARNING(AffIgnoreInvalidProcID, num);
2859 }
2860 }
2861 else {
2862 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2863 setSize++;
2864 }
2865 }
2866 if (setSize > 0) {
2867 ADD_MASK(sumMask);
2868 }
2869
2870 SKIP_WS(next);
2871 if (*next == ',') {
2872 next++;
2873 }
2874 scan = next;
2875 continue;
2876 }
2877
2878 //
2879 // Read the first integer.
2880 //
2881 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2882 SKIP_DIGITS(next);
2883 start = __kmp_str_to_int(scan, *next);
2884 KMP_ASSERT2(start >= 0, "bad explicit proc list");
2885 SKIP_WS(next);
2886
2887 //
2888 // If this isn't a range, then add a mask to the list and go on.
2889 //
2890 if (*next != '-') {
2891 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2892
2893 //
2894 // Skip optional comma.
2895 //
2896 if (*next == ',') {
2897 next++;
2898 }
2899 scan = next;
2900 continue;
2901 }
2902
2903 //
2904 // This is a range. Skip over the '-' and read in the 2nd int.
2905 //
2906 next++; // skip '-'
2907 SKIP_WS(next);
2908 scan = next;
2909 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2910 SKIP_DIGITS(next);
2911 end = __kmp_str_to_int(scan, *next);
2912 KMP_ASSERT2(end >= 0, "bad explicit proc list");
2913
2914 //
2915 // Check for a stride parameter
2916 //
2917 stride = 1;
2918 SKIP_WS(next);
2919 if (*next == ':') {
2920 //
2921 // A stride is specified. Skip over the ':" and read the 3rd int.
2922 //
2923 int sign = +1;
2924 next++; // skip ':'
2925 SKIP_WS(next);
2926 scan = next;
2927 if (*next == '-') {
2928 sign = -1;
2929 next++;
2930 SKIP_WS(next);
2931 scan = next;
2932 }
2933 KMP_ASSERT2((*next >= '0') && (*next <= '9'),
2934 "bad explicit proc list");
2935 SKIP_DIGITS(next);
2936 stride = __kmp_str_to_int(scan, *next);
2937 KMP_ASSERT2(stride >= 0, "bad explicit proc list");
2938 stride *= sign;
2939 }
2940
2941 //
2942 // Do some range checks.
2943 //
2944 KMP_ASSERT2(stride != 0, "bad explicit proc list");
2945 if (stride > 0) {
2946 KMP_ASSERT2(start <= end, "bad explicit proc list");
2947 }
2948 else {
2949 KMP_ASSERT2(start >= end, "bad explicit proc list");
2950 }
2951 KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
2952
2953 //
2954 // Add the mask for each OS proc # to the list.
2955 //
2956 if (stride > 0) {
2957 do {
2958 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2959 start += stride;
2960 } while (start <= end);
2961 }
2962 else {
2963 do {
2964 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2965 start += stride;
2966 } while (start >= end);
2967 }
2968
2969 //
2970 // Skip optional comma.
2971 //
2972 SKIP_WS(next);
2973 if (*next == ',') {
2974 next++;
2975 }
2976 scan = next;
2977 }
2978
2979 *out_numMasks = nextNewMask;
2980 if (nextNewMask == 0) {
2981 *out_masks = NULL;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002982 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002983 return;
2984 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00002985 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
2986 for(i = 0; i < nextNewMask; i++) {
2987 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
2988 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
2989 KMP_CPU_COPY(dest, src);
2990 }
2991 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
2992 KMP_CPU_FREE(sumMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002993}
2994
2995
2996# if OMP_40_ENABLED
2997
2998/*-----------------------------------------------------------------------------
2999
3000Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3001places. Again, Here is the grammar:
3002
3003place_list := place
3004place_list := place , place_list
3005place := num
3006place := place : num
3007place := place : num : signed
3008place := { subplacelist }
3009place := ! place // (lowest priority)
3010subplace_list := subplace
3011subplace_list := subplace , subplace_list
3012subplace := num
3013subplace := num : num
3014subplace := num : num : signed
3015signed := num
3016signed := + signed
3017signed := - signed
3018
3019-----------------------------------------------------------------------------*/
3020
3021static void
3022__kmp_process_subplace_list(const char **scan, kmp_affin_mask_t *osId2Mask,
3023 int maxOsId, kmp_affin_mask_t *tempMask, int *setSize)
3024{
3025 const char *next;
3026
3027 for (;;) {
3028 int start, count, stride, i;
3029
3030 //
3031 // Read in the starting proc id
3032 //
3033 SKIP_WS(*scan);
3034 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'),
3035 "bad explicit places list");
3036 next = *scan;
3037 SKIP_DIGITS(next);
3038 start = __kmp_str_to_int(*scan, *next);
3039 KMP_ASSERT(start >= 0);
3040 *scan = next;
3041
3042 //
3043 // valid follow sets are ',' ':' and '}'
3044 //
3045 SKIP_WS(*scan);
3046 if (**scan == '}' || **scan == ',') {
3047 if ((start > maxOsId) ||
3048 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3049 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3050 && (__kmp_affinity_type != affinity_none))) {
3051 KMP_WARNING(AffIgnoreInvalidProcID, start);
3052 }
3053 }
3054 else {
3055 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3056 (*setSize)++;
3057 }
3058 if (**scan == '}') {
3059 break;
3060 }
3061 (*scan)++; // skip ','
3062 continue;
3063 }
3064 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3065 (*scan)++; // skip ':'
3066
3067 //
3068 // Read count parameter
3069 //
3070 SKIP_WS(*scan);
3071 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'),
3072 "bad explicit places list");
3073 next = *scan;
3074 SKIP_DIGITS(next);
3075 count = __kmp_str_to_int(*scan, *next);
3076 KMP_ASSERT(count >= 0);
3077 *scan = next;
3078
3079 //
3080 // valid follow sets are ',' ':' and '}'
3081 //
3082 SKIP_WS(*scan);
3083 if (**scan == '}' || **scan == ',') {
3084 for (i = 0; i < count; i++) {
3085 if ((start > maxOsId) ||
3086 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3087 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3088 && (__kmp_affinity_type != affinity_none))) {
3089 KMP_WARNING(AffIgnoreInvalidProcID, start);
3090 }
3091 break; // don't proliferate warnings for large count
3092 }
3093 else {
3094 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3095 start++;
3096 (*setSize)++;
3097 }
3098 }
3099 if (**scan == '}') {
3100 break;
3101 }
3102 (*scan)++; // skip ','
3103 continue;
3104 }
3105 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3106 (*scan)++; // skip ':'
3107
3108 //
3109 // Read stride parameter
3110 //
3111 int sign = +1;
3112 for (;;) {
3113 SKIP_WS(*scan);
3114 if (**scan == '+') {
3115 (*scan)++; // skip '+'
3116 continue;
3117 }
3118 if (**scan == '-') {
3119 sign *= -1;
3120 (*scan)++; // skip '-'
3121 continue;
3122 }
3123 break;
3124 }
3125 SKIP_WS(*scan);
3126 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'),
3127 "bad explicit places list");
3128 next = *scan;
3129 SKIP_DIGITS(next);
3130 stride = __kmp_str_to_int(*scan, *next);
3131 KMP_ASSERT(stride >= 0);
3132 *scan = next;
3133 stride *= sign;
3134
3135 //
3136 // valid follow sets are ',' and '}'
3137 //
3138 SKIP_WS(*scan);
3139 if (**scan == '}' || **scan == ',') {
3140 for (i = 0; i < count; i++) {
3141 if ((start > maxOsId) ||
3142 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3143 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3144 && (__kmp_affinity_type != affinity_none))) {
3145 KMP_WARNING(AffIgnoreInvalidProcID, start);
3146 }
3147 break; // don't proliferate warnings for large count
3148 }
3149 else {
3150 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3151 start += stride;
3152 (*setSize)++;
3153 }
3154 }
3155 if (**scan == '}') {
3156 break;
3157 }
3158 (*scan)++; // skip ','
3159 continue;
3160 }
3161
3162 KMP_ASSERT2(0, "bad explicit places list");
3163 }
3164}
3165
3166
3167static void
3168__kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3169 int maxOsId, kmp_affin_mask_t *tempMask, int *setSize)
3170{
3171 const char *next;
3172
3173 //
3174 // valid follow sets are '{' '!' and num
3175 //
3176 SKIP_WS(*scan);
3177 if (**scan == '{') {
3178 (*scan)++; // skip '{'
3179 __kmp_process_subplace_list(scan, osId2Mask, maxOsId , tempMask,
3180 setSize);
3181 KMP_ASSERT2(**scan == '}', "bad explicit places list");
3182 (*scan)++; // skip '}'
3183 }
3184 else if (**scan == '!') {
Jonathan Peyton6778c732015-10-19 19:43:01 +00003185 (*scan)++; // skip '!'
Jim Cownie5e8470a2013-09-27 10:38:44 +00003186 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003187 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003188 }
3189 else if ((**scan >= '0') && (**scan <= '9')) {
3190 next = *scan;
3191 SKIP_DIGITS(next);
3192 int num = __kmp_str_to_int(*scan, *next);
3193 KMP_ASSERT(num >= 0);
3194 if ((num > maxOsId) ||
3195 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3196 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3197 && (__kmp_affinity_type != affinity_none))) {
3198 KMP_WARNING(AffIgnoreInvalidProcID, num);
3199 }
3200 }
3201 else {
3202 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3203 (*setSize)++;
3204 }
3205 *scan = next; // skip num
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003206 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003207 else {
3208 KMP_ASSERT2(0, "bad explicit places list");
3209 }
3210}
3211
3212
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003213//static void
3214void
Jim Cownie5e8470a2013-09-27 10:38:44 +00003215__kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3216 unsigned int *out_numMasks, const char *placelist,
3217 kmp_affin_mask_t *osId2Mask, int maxOsId)
3218{
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003219 int i,j,count,stride,sign;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003220 const char *scan = placelist;
3221 const char *next = placelist;
3222
3223 numNewMasks = 2;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003224 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003225 nextNewMask = 0;
3226
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003227 // tempMask is modified based on the previous or initial
3228 // place to form the current place
3229 // previousMask contains the previous place
3230 kmp_affin_mask_t *tempMask;
3231 kmp_affin_mask_t *previousMask;
3232 KMP_CPU_ALLOC(tempMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003233 KMP_CPU_ZERO(tempMask);
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003234 KMP_CPU_ALLOC(previousMask);
3235 KMP_CPU_ZERO(previousMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003236 int setSize = 0;
3237
3238 for (;;) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003239 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3240
3241 //
3242 // valid follow sets are ',' ':' and EOL
3243 //
3244 SKIP_WS(scan);
3245 if (*scan == '\0' || *scan == ',') {
3246 if (setSize > 0) {
3247 ADD_MASK(tempMask);
3248 }
3249 KMP_CPU_ZERO(tempMask);
3250 setSize = 0;
3251 if (*scan == '\0') {
3252 break;
3253 }
3254 scan++; // skip ','
3255 continue;
3256 }
3257
3258 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3259 scan++; // skip ':'
3260
3261 //
3262 // Read count parameter
3263 //
3264 SKIP_WS(scan);
3265 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'),
3266 "bad explicit places list");
3267 next = scan;
3268 SKIP_DIGITS(next);
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003269 count = __kmp_str_to_int(scan, *next);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003270 KMP_ASSERT(count >= 0);
3271 scan = next;
3272
3273 //
3274 // valid follow sets are ',' ':' and EOL
3275 //
3276 SKIP_WS(scan);
3277 if (*scan == '\0' || *scan == ',') {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003278 stride = +1;
3279 }
3280 else {
3281 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3282 scan++; // skip ':'
Jim Cownie5e8470a2013-09-27 10:38:44 +00003283
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003284 //
3285 // Read stride parameter
3286 //
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003287 sign = +1;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003288 for (;;) {
3289 SKIP_WS(scan);
3290 if (*scan == '+') {
3291 scan++; // skip '+'
3292 continue;
3293 }
3294 if (*scan == '-') {
3295 sign *= -1;
3296 scan++; // skip '-'
3297 continue;
3298 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003299 break;
3300 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003301 SKIP_WS(scan);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003302 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'),
3303 "bad explicit places list");
3304 next = scan;
3305 SKIP_DIGITS(next);
3306 stride = __kmp_str_to_int(scan, *next);
3307 KMP_DEBUG_ASSERT(stride >= 0);
3308 scan = next;
3309 stride *= sign;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003310 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003311
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003312 // Add places determined by initial_place : count : stride
3313 for (i = 0; i < count; i++) {
3314 if (setSize == 0) {
3315 break;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003316 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003317 // Add the current place, then build the next place (tempMask) from that
3318 KMP_CPU_COPY(previousMask, tempMask);
3319 ADD_MASK(previousMask);
3320 KMP_CPU_ZERO(tempMask);
3321 setSize = 0;
3322 KMP_CPU_SET_ITERATE(j, previousMask) {
3323 if (! KMP_CPU_ISSET(j, previousMask)) {
3324 continue;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003325 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003326 if ((j+stride > maxOsId) || (j+stride < 0) ||
3327 (! KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003328 (! KMP_CPU_ISSET(j+stride, KMP_CPU_INDEX(osId2Mask, j+stride)))) {
3329 if ((__kmp_affinity_verbose || (__kmp_affinity_warnings
3330 && (__kmp_affinity_type != affinity_none))) && i < count - 1) {
3331 KMP_WARNING(AffIgnoreInvalidProcID, j+stride);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003332 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003333 continue;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003334 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003335 KMP_CPU_SET(j+stride, tempMask);
3336 setSize++;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003337 }
3338 }
3339 KMP_CPU_ZERO(tempMask);
3340 setSize = 0;
3341
3342 //
3343 // valid follow sets are ',' and EOL
3344 //
3345 SKIP_WS(scan);
3346 if (*scan == '\0') {
3347 break;
3348 }
3349 if (*scan == ',') {
3350 scan++; // skip ','
3351 continue;
3352 }
3353
3354 KMP_ASSERT2(0, "bad explicit places list");
3355 }
3356
3357 *out_numMasks = nextNewMask;
3358 if (nextNewMask == 0) {
3359 *out_masks = NULL;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003360 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003361 return;
3362 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003363 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3364 KMP_CPU_FREE(tempMask);
3365 KMP_CPU_FREE(previousMask);
3366 for(i = 0; i < nextNewMask; i++) {
3367 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
3368 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
3369 KMP_CPU_COPY(dest, src);
3370 }
3371 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003372}
3373
3374# endif /* OMP_40_ENABLED */
3375
3376#undef ADD_MASK
3377#undef ADD_MASK_OSID
3378
Jim Cownie5e8470a2013-09-27 10:38:44 +00003379static void
3380__kmp_apply_thread_places(AddrUnsPair **pAddr, int depth)
3381{
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003382 if (__kmp_place_num_sockets == 0 &&
3383 __kmp_place_num_cores == 0 &&
3384 __kmp_place_num_threads_per_core == 0 )
3385 return; // no topology limiting actions requested, exit
3386 if (__kmp_place_num_sockets == 0)
3387 __kmp_place_num_sockets = nPackages; // use all available sockets
3388 if (__kmp_place_num_cores == 0)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003389 __kmp_place_num_cores = nCoresPerPkg; // use all available cores
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003390 if (__kmp_place_num_threads_per_core == 0 ||
3391 __kmp_place_num_threads_per_core > __kmp_nThreadsPerCore)
3392 __kmp_place_num_threads_per_core = __kmp_nThreadsPerCore; // use all HW contexts
3393
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003394 if ( !__kmp_affinity_uniform_topology() ) {
Jonathan Peytonb9d28fb2016-06-16 18:53:48 +00003395 KMP_WARNING( AffHWSubsetNonUniform );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003396 return; // don't support non-uniform topology
3397 }
3398 if ( depth != 3 ) {
Jonathan Peytonb9d28fb2016-06-16 18:53:48 +00003399 KMP_WARNING( AffHWSubsetNonThreeLevel );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003400 return; // don't support not-3-level topology
Jim Cownie5e8470a2013-09-27 10:38:44 +00003401 }
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003402 if (__kmp_place_socket_offset + __kmp_place_num_sockets > nPackages) {
Jonathan Peytonb9d28fb2016-06-16 18:53:48 +00003403 KMP_WARNING(AffHWSubsetManySockets);
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003404 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003405 }
Andrey Churbanov12875572015-03-10 09:00:36 +00003406 if ( __kmp_place_core_offset + __kmp_place_num_cores > nCoresPerPkg ) {
Jonathan Peytonb9d28fb2016-06-16 18:53:48 +00003407 KMP_WARNING( AffHWSubsetManyCores );
Jim Cownie5e8470a2013-09-27 10:38:44 +00003408 return;
3409 }
3410
3411 AddrUnsPair *newAddr = (AddrUnsPair *)__kmp_allocate( sizeof(AddrUnsPair) *
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003412 __kmp_place_num_sockets * __kmp_place_num_cores * __kmp_place_num_threads_per_core);
3413
Jim Cownie5e8470a2013-09-27 10:38:44 +00003414 int i, j, k, n_old = 0, n_new = 0;
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003415 for (i = 0; i < nPackages; ++i)
3416 if (i < __kmp_place_socket_offset ||
3417 i >= __kmp_place_socket_offset + __kmp_place_num_sockets)
3418 n_old += nCoresPerPkg * __kmp_nThreadsPerCore; // skip not-requested socket
3419 else
3420 for (j = 0; j < nCoresPerPkg; ++j) // walk through requested socket
3421 if (j < __kmp_place_core_offset ||
3422 j >= __kmp_place_core_offset + __kmp_place_num_cores)
3423 n_old += __kmp_nThreadsPerCore; // skip not-requested core
3424 else
3425 for (k = 0; k < __kmp_nThreadsPerCore; ++k) { // walk through requested core
3426 if (k < __kmp_place_num_threads_per_core) {
3427 newAddr[n_new] = (*pAddr)[n_old]; // collect requested thread's data
3428 n_new++;
3429 }
3430 n_old++;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003431 }
Jonathan Peytondd4aa9b2015-10-08 17:55:54 +00003432 KMP_DEBUG_ASSERT(n_old == nPackages * nCoresPerPkg * __kmp_nThreadsPerCore);
3433 KMP_DEBUG_ASSERT(n_new == __kmp_place_num_sockets * __kmp_place_num_cores *
3434 __kmp_place_num_threads_per_core);
3435
3436 nPackages = __kmp_place_num_sockets; // correct nPackages
Jim Cownie5e8470a2013-09-27 10:38:44 +00003437 nCoresPerPkg = __kmp_place_num_cores; // correct nCoresPerPkg
3438 __kmp_nThreadsPerCore = __kmp_place_num_threads_per_core; // correct __kmp_nThreadsPerCore
3439 __kmp_avail_proc = n_new; // correct avail_proc
3440 __kmp_ncores = nPackages * __kmp_place_num_cores; // correct ncores
3441
3442 __kmp_free( *pAddr );
3443 *pAddr = newAddr; // replace old topology with new one
3444}
3445
Jim Cownie5e8470a2013-09-27 10:38:44 +00003446
3447static AddrUnsPair *address2os = NULL;
3448static int * procarr = NULL;
3449static int __kmp_aff_depth = 0;
3450
3451static void
3452__kmp_aux_affinity_initialize(void)
3453{
3454 if (__kmp_affinity_masks != NULL) {
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003455 KMP_ASSERT(__kmp_affin_fullMask != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003456 return;
3457 }
3458
3459 //
3460 // Create the "full" mask - this defines all of the processors that we
3461 // consider to be in the machine model. If respect is set, then it is
3462 // the initialization thread's affinity mask. Otherwise, it is all
3463 // processors that we know about on the machine.
3464 //
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003465 if (__kmp_affin_fullMask == NULL) {
3466 KMP_CPU_ALLOC(__kmp_affin_fullMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003467 }
3468 if (KMP_AFFINITY_CAPABLE()) {
3469 if (__kmp_affinity_respect_mask) {
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003470 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003471
3472 //
3473 // Count the number of available processors.
3474 //
3475 unsigned i;
3476 __kmp_avail_proc = 0;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003477 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
3478 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003479 continue;
3480 }
3481 __kmp_avail_proc++;
3482 }
3483 if (__kmp_avail_proc > __kmp_xproc) {
3484 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3485 && (__kmp_affinity_type != affinity_none))) {
3486 KMP_WARNING(ErrorInitializeAffinity);
3487 }
3488 __kmp_affinity_type = affinity_none;
Andrey Churbanov1f037e42015-03-10 09:15:26 +00003489 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +00003490 return;
3491 }
3492 }
3493 else {
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00003494 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003495 __kmp_avail_proc = __kmp_xproc;
3496 }
3497 }
3498
3499 int depth = -1;
3500 kmp_i18n_id_t msg_id = kmp_i18n_null;
3501
3502 //
Alp Toker8f2d3f02014-02-24 10:40:15 +00003503 // For backward compatibility, setting KMP_CPUINFO_FILE =>
Jim Cownie5e8470a2013-09-27 10:38:44 +00003504 // KMP_TOPOLOGY_METHOD=cpuinfo
3505 //
3506 if ((__kmp_cpuinfo_file != NULL) &&
3507 (__kmp_affinity_top_method == affinity_top_method_all)) {
3508 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
3509 }
3510
3511 if (__kmp_affinity_top_method == affinity_top_method_all) {
3512 //
3513 // In the default code path, errors are not fatal - we just try using
3514 // another method. We only emit a warning message if affinity is on,
3515 // or the verbose flag is set, an the nowarnings flag was not set.
3516 //
3517 const char *file_name = NULL;
3518 int line = 0;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003519# if KMP_USE_HWLOC
3520 if (depth < 0) {
3521 if (__kmp_affinity_verbose) {
3522 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
3523 }
3524 if(!__kmp_hwloc_error) {
3525 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3526 if (depth == 0) {
3527 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3528 KMP_ASSERT(address2os == NULL);
3529 return;
3530 } else if(depth < 0 && __kmp_affinity_verbose) {
3531 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
3532 }
3533 } else if(__kmp_affinity_verbose) {
3534 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
3535 }
3536 }
3537# endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003538
3539# if KMP_ARCH_X86 || KMP_ARCH_X86_64
3540
Jim Cownie5e8470a2013-09-27 10:38:44 +00003541 if (depth < 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003542 if (__kmp_affinity_verbose) {
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003543 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003544 }
3545
3546 file_name = NULL;
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003547 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003548 if (depth == 0) {
3549 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3550 KMP_ASSERT(address2os == NULL);
3551 return;
3552 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003553
3554 if (depth < 0) {
3555 if (__kmp_affinity_verbose) {
3556 if (msg_id != kmp_i18n_null) {
3557 KMP_INFORM(AffInfoStrStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id),
3558 KMP_I18N_STR(DecodingLegacyAPIC));
3559 }
3560 else {
3561 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
3562 }
3563 }
3564
3565 file_name = NULL;
3566 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3567 if (depth == 0) {
3568 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3569 KMP_ASSERT(address2os == NULL);
3570 return;
3571 }
3572 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003573 }
3574
3575# endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
3576
3577# if KMP_OS_LINUX
3578
3579 if (depth < 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003580 if (__kmp_affinity_verbose) {
3581 if (msg_id != kmp_i18n_null) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003582 KMP_INFORM(AffStrParseFilename, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id), "/proc/cpuinfo");
3583 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003584 else {
3585 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "/proc/cpuinfo");
3586 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003587 }
3588
3589 FILE *f = fopen("/proc/cpuinfo", "r");
3590 if (f == NULL) {
3591 msg_id = kmp_i18n_str_CantOpenCpuinfo;
3592 }
3593 else {
3594 file_name = "/proc/cpuinfo";
3595 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3596 fclose(f);
3597 if (depth == 0) {
3598 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3599 KMP_ASSERT(address2os == NULL);
3600 return;
3601 }
3602 }
3603 }
3604
3605# endif /* KMP_OS_LINUX */
3606
Andrey Churbanov7daf9802015-01-27 16:52:57 +00003607# if KMP_GROUP_AFFINITY
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003608
3609 if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
3610 if (__kmp_affinity_verbose) {
3611 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
3612 }
3613
3614 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3615 KMP_ASSERT(depth != 0);
3616 }
3617
Andrey Churbanov7daf9802015-01-27 16:52:57 +00003618# endif /* KMP_GROUP_AFFINITY */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003619
Jim Cownie5e8470a2013-09-27 10:38:44 +00003620 if (depth < 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003621 if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003622 if (file_name == NULL) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003623 KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003624 }
3625 else if (line == 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003626 KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003627 }
3628 else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003629 KMP_INFORM(UsingFlatOSFileLine, file_name, line, __kmp_i18n_catgets(msg_id));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003630 }
3631 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003632 // FIXME - print msg if msg_id = kmp_i18n_null ???
Jim Cownie5e8470a2013-09-27 10:38:44 +00003633
3634 file_name = "";
3635 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3636 if (depth == 0) {
3637 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3638 KMP_ASSERT(address2os == NULL);
3639 return;
3640 }
3641 KMP_ASSERT(depth > 0);
3642 KMP_ASSERT(address2os != NULL);
3643 }
3644 }
3645
3646 //
3647 // If the user has specified that a paricular topology discovery method
3648 // is to be used, then we abort if that method fails. The exception is
3649 // group affinity, which might have been implicitly set.
3650 //
3651
3652# if KMP_ARCH_X86 || KMP_ARCH_X86_64
3653
3654 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
3655 if (__kmp_affinity_verbose) {
3656 KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
3657 KMP_I18N_STR(Decodingx2APIC));
3658 }
3659
3660 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
3661 if (depth == 0) {
3662 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3663 KMP_ASSERT(address2os == NULL);
3664 return;
3665 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003666 if (depth < 0) {
3667 KMP_ASSERT(msg_id != kmp_i18n_null);
3668 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3669 }
3670 }
3671 else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
3672 if (__kmp_affinity_verbose) {
3673 KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
3674 KMP_I18N_STR(DecodingLegacyAPIC));
3675 }
3676
3677 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3678 if (depth == 0) {
3679 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3680 KMP_ASSERT(address2os == NULL);
3681 return;
3682 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003683 if (depth < 0) {
3684 KMP_ASSERT(msg_id != kmp_i18n_null);
3685 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3686 }
3687 }
3688
3689# endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
3690
3691 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
3692 const char *filename;
3693 if (__kmp_cpuinfo_file != NULL) {
3694 filename = __kmp_cpuinfo_file;
3695 }
3696 else {
3697 filename = "/proc/cpuinfo";
3698 }
3699
3700 if (__kmp_affinity_verbose) {
3701 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
3702 }
3703
3704 FILE *f = fopen(filename, "r");
3705 if (f == NULL) {
3706 int code = errno;
3707 if (__kmp_cpuinfo_file != NULL) {
3708 __kmp_msg(
3709 kmp_ms_fatal,
3710 KMP_MSG(CantOpenFileForReading, filename),
3711 KMP_ERR(code),
3712 KMP_HNT(NameComesFrom_CPUINFO_FILE),
3713 __kmp_msg_null
3714 );
3715 }
3716 else {
3717 __kmp_msg(
3718 kmp_ms_fatal,
3719 KMP_MSG(CantOpenFileForReading, filename),
3720 KMP_ERR(code),
3721 __kmp_msg_null
3722 );
3723 }
3724 }
3725 int line = 0;
3726 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3727 fclose(f);
3728 if (depth < 0) {
3729 KMP_ASSERT(msg_id != kmp_i18n_null);
3730 if (line > 0) {
3731 KMP_FATAL(FileLineMsgExiting, filename, line, __kmp_i18n_catgets(msg_id));
3732 }
3733 else {
3734 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
3735 }
3736 }
3737 if (__kmp_affinity_type == affinity_none) {
3738 KMP_ASSERT(depth == 0);
3739 KMP_ASSERT(address2os == NULL);
3740 return;
3741 }
3742 }
3743
Andrey Churbanov7daf9802015-01-27 16:52:57 +00003744# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +00003745
3746 else if (__kmp_affinity_top_method == affinity_top_method_group) {
3747 if (__kmp_affinity_verbose) {
3748 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
3749 }
3750
3751 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3752 KMP_ASSERT(depth != 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003753 if (depth < 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003754 KMP_ASSERT(msg_id != kmp_i18n_null);
3755 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003756 }
3757 }
3758
Andrey Churbanov7daf9802015-01-27 16:52:57 +00003759# endif /* KMP_GROUP_AFFINITY */
Jim Cownie5e8470a2013-09-27 10:38:44 +00003760
3761 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
3762 if (__kmp_affinity_verbose) {
3763 KMP_INFORM(AffUsingFlatOS, "KMP_AFFINITY");
3764 }
3765
3766 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3767 if (depth == 0) {
3768 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3769 KMP_ASSERT(address2os == NULL);
3770 return;
3771 }
3772 // should not fail
3773 KMP_ASSERT(depth > 0);
3774 KMP_ASSERT(address2os != NULL);
3775 }
3776
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003777# if KMP_USE_HWLOC
3778 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
3779 if (__kmp_affinity_verbose) {
3780 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
3781 }
3782 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3783 if (depth == 0) {
3784 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3785 KMP_ASSERT(address2os == NULL);
3786 return;
3787 }
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003788 }
3789# endif // KMP_USE_HWLOC
3790
Jim Cownie5e8470a2013-09-27 10:38:44 +00003791 if (address2os == NULL) {
3792 if (KMP_AFFINITY_CAPABLE()
3793 && (__kmp_affinity_verbose || (__kmp_affinity_warnings
3794 && (__kmp_affinity_type != affinity_none)))) {
3795 KMP_WARNING(ErrorInitializeAffinity);
3796 }
3797 __kmp_affinity_type = affinity_none;
Andrey Churbanov1f037e42015-03-10 09:15:26 +00003798 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +00003799 return;
3800 }
3801
Jim Cownie5e8470a2013-09-27 10:38:44 +00003802 __kmp_apply_thread_places(&address2os, depth);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003803
3804 //
3805 // Create the table of masks, indexed by thread Id.
3806 //
3807 unsigned maxIndex;
3808 unsigned numUnique;
3809 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique,
3810 address2os, __kmp_avail_proc);
3811 if (__kmp_affinity_gran_levels == 0) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003812 KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003813 }
3814
3815 //
3816 // Set the childNums vector in all Address objects. This must be done
3817 // before we can sort using __kmp_affinity_cmp_Address_child_num(),
3818 // which takes into account the setting of __kmp_affinity_compact.
3819 //
3820 __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
3821
3822 switch (__kmp_affinity_type) {
3823
3824 case affinity_explicit:
3825 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
3826# if OMP_40_ENABLED
3827 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
3828# endif
3829 {
3830 __kmp_affinity_process_proclist(&__kmp_affinity_masks,
3831 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3832 maxIndex);
3833 }
3834# if OMP_40_ENABLED
3835 else {
3836 __kmp_affinity_process_placelist(&__kmp_affinity_masks,
3837 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3838 maxIndex);
3839 }
3840# endif
3841 if (__kmp_affinity_num_masks == 0) {
3842 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3843 && (__kmp_affinity_type != affinity_none))) {
3844 KMP_WARNING(AffNoValidProcID);
3845 }
3846 __kmp_affinity_type = affinity_none;
3847 return;
3848 }
3849 break;
3850
3851 //
3852 // The other affinity types rely on sorting the Addresses according
3853 // to some permutation of the machine topology tree. Set
3854 // __kmp_affinity_compact and __kmp_affinity_offset appropriately,
3855 // then jump to a common code fragment to do the sort and create
3856 // the array of affinity masks.
3857 //
3858
3859 case affinity_logical:
3860 __kmp_affinity_compact = 0;
3861 if (__kmp_affinity_offset) {
3862 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3863 % __kmp_avail_proc;
3864 }
3865 goto sortAddresses;
3866
3867 case affinity_physical:
3868 if (__kmp_nThreadsPerCore > 1) {
3869 __kmp_affinity_compact = 1;
3870 if (__kmp_affinity_compact >= depth) {
3871 __kmp_affinity_compact = 0;
3872 }
3873 } else {
3874 __kmp_affinity_compact = 0;
3875 }
3876 if (__kmp_affinity_offset) {
3877 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3878 % __kmp_avail_proc;
3879 }
3880 goto sortAddresses;
3881
3882 case affinity_scatter:
3883 if (__kmp_affinity_compact >= depth) {
3884 __kmp_affinity_compact = 0;
3885 }
3886 else {
3887 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
3888 }
3889 goto sortAddresses;
3890
3891 case affinity_compact:
3892 if (__kmp_affinity_compact >= depth) {
3893 __kmp_affinity_compact = depth - 1;
3894 }
3895 goto sortAddresses;
3896
Jim Cownie5e8470a2013-09-27 10:38:44 +00003897 case affinity_balanced:
Jonathan Peytoncaf09fe2015-05-27 23:27:33 +00003898 // Balanced works only for the case of a single package
Jim Cownie5e8470a2013-09-27 10:38:44 +00003899 if( nPackages > 1 ) {
3900 if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
3901 KMP_WARNING( AffBalancedNotAvail, "KMP_AFFINITY" );
3902 }
3903 __kmp_affinity_type = affinity_none;
3904 return;
3905 } else if( __kmp_affinity_uniform_topology() ) {
3906 break;
3907 } else { // Non-uniform topology
3908
3909 // Save the depth for further usage
3910 __kmp_aff_depth = depth;
3911
3912 // Number of hyper threads per core in HT machine
3913 int nth_per_core = __kmp_nThreadsPerCore;
3914
3915 int core_level;
3916 if( nth_per_core > 1 ) {
3917 core_level = depth - 2;
3918 } else {
3919 core_level = depth - 1;
3920 }
3921 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
3922 int nproc = nth_per_core * ncores;
3923
3924 procarr = ( int * )__kmp_allocate( sizeof( int ) * nproc );
3925 for( int i = 0; i < nproc; i++ ) {
3926 procarr[ i ] = -1;
3927 }
3928
3929 for( int i = 0; i < __kmp_avail_proc; i++ ) {
3930 int proc = address2os[ i ].second;
3931 // If depth == 3 then level=0 - package, level=1 - core, level=2 - thread.
3932 // If there is only one thread per core then depth == 2: level 0 - package,
3933 // level 1 - core.
3934 int level = depth - 1;
3935
3936 // __kmp_nth_per_core == 1
3937 int thread = 0;
3938 int core = address2os[ i ].first.labels[ level ];
3939 // If the thread level exists, that is we have more than one thread context per core
3940 if( nth_per_core > 1 ) {
3941 thread = address2os[ i ].first.labels[ level ] % nth_per_core;
3942 core = address2os[ i ].first.labels[ level - 1 ];
3943 }
3944 procarr[ core * nth_per_core + thread ] = proc;
3945 }
3946
3947 break;
3948 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003949
3950 sortAddresses:
3951 //
3952 // Allocate the gtid->affinity mask table.
3953 //
3954 if (__kmp_affinity_dups) {
3955 __kmp_affinity_num_masks = __kmp_avail_proc;
3956 }
3957 else {
3958 __kmp_affinity_num_masks = numUnique;
3959 }
3960
3961# if OMP_40_ENABLED
3962 if ( ( __kmp_nested_proc_bind.bind_types[0] != proc_bind_intel )
3963 && ( __kmp_affinity_num_places > 0 )
3964 && ( (unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks ) ) {
3965 __kmp_affinity_num_masks = __kmp_affinity_num_places;
3966 }
3967# endif
3968
Jonathan Peyton01dcf362015-11-30 20:02:59 +00003969 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003970
3971 //
3972 // Sort the address2os table according to the current setting of
3973 // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
3974 //
3975 qsort(address2os, __kmp_avail_proc, sizeof(*address2os),
3976 __kmp_affinity_cmp_Address_child_num);
3977 {
3978 int i;
3979 unsigned j;
3980 for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
3981 if ((! __kmp_affinity_dups) && (! address2os[i].first.leader)) {
3982 continue;
3983 }
3984 unsigned osId = address2os[i].second;
3985 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
3986 kmp_affin_mask_t *dest
3987 = KMP_CPU_INDEX(__kmp_affinity_masks, j);
3988 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
3989 KMP_CPU_COPY(dest, src);
3990 if (++j >= __kmp_affinity_num_masks) {
3991 break;
3992 }
3993 }
3994 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
3995 }
3996 break;
3997
3998 default:
3999 KMP_ASSERT2(0, "Unexpected affinity setting");
4000 }
4001
4002 __kmp_free(osId2Mask);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004003 machine_hierarchy.init(address2os, __kmp_avail_proc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004004}
4005
4006
4007void
4008__kmp_affinity_initialize(void)
4009{
4010 //
4011 // Much of the code above was written assumming that if a machine was not
4012 // affinity capable, then __kmp_affinity_type == affinity_none. We now
4013 // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4014 //
4015 // There are too many checks for __kmp_affinity_type == affinity_none
4016 // in this code. Instead of trying to change them all, check if
4017 // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4018 // affinity_none, call the real initialization routine, then restore
4019 // __kmp_affinity_type to affinity_disabled.
4020 //
4021 int disabled = (__kmp_affinity_type == affinity_disabled);
4022 if (! KMP_AFFINITY_CAPABLE()) {
4023 KMP_ASSERT(disabled);
4024 }
4025 if (disabled) {
4026 __kmp_affinity_type = affinity_none;
4027 }
4028 __kmp_aux_affinity_initialize();
4029 if (disabled) {
4030 __kmp_affinity_type = affinity_disabled;
4031 }
4032}
4033
4034
4035void
4036__kmp_affinity_uninitialize(void)
4037{
4038 if (__kmp_affinity_masks != NULL) {
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004039 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004040 __kmp_affinity_masks = NULL;
4041 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004042 if (__kmp_affin_fullMask != NULL) {
4043 KMP_CPU_FREE(__kmp_affin_fullMask);
4044 __kmp_affin_fullMask = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004045 }
4046 __kmp_affinity_num_masks = 0;
4047# if OMP_40_ENABLED
4048 __kmp_affinity_num_places = 0;
4049# endif
4050 if (__kmp_affinity_proclist != NULL) {
4051 __kmp_free(__kmp_affinity_proclist);
4052 __kmp_affinity_proclist = NULL;
4053 }
4054 if( address2os != NULL ) {
4055 __kmp_free( address2os );
4056 address2os = NULL;
4057 }
4058 if( procarr != NULL ) {
4059 __kmp_free( procarr );
4060 procarr = NULL;
4061 }
Jonathan Peyton202a24d2016-06-13 17:30:08 +00004062# if KMP_USE_HWLOC
4063 if (__kmp_hwloc_topology != NULL) {
4064 hwloc_topology_destroy(__kmp_hwloc_topology);
4065 __kmp_hwloc_topology = NULL;
4066 }
4067# endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00004068}
4069
4070
4071void
4072__kmp_affinity_set_init_mask(int gtid, int isa_root)
4073{
4074 if (! KMP_AFFINITY_CAPABLE()) {
4075 return;
4076 }
4077
4078 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4079 if (th->th.th_affin_mask == NULL) {
4080 KMP_CPU_ALLOC(th->th.th_affin_mask);
4081 }
4082 else {
4083 KMP_CPU_ZERO(th->th.th_affin_mask);
4084 }
4085
4086 //
4087 // Copy the thread mask to the kmp_info_t strucuture.
4088 // If __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one
4089 // that has all of the OS proc ids set, or if __kmp_affinity_respect_mask
4090 // is set, then the full mask is the same as the mask of the initialization
4091 // thread.
4092 //
4093 kmp_affin_mask_t *mask;
4094 int i;
4095
4096# if OMP_40_ENABLED
4097 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
4098# endif
4099 {
Andrey Churbanovf28f6132015-01-13 14:54:00 +00004100 if ((__kmp_affinity_type == affinity_none) || (__kmp_affinity_type == affinity_balanced)
Jim Cownie5e8470a2013-09-27 10:38:44 +00004101 ) {
Andrey Churbanov7daf9802015-01-27 16:52:57 +00004102# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +00004103 if (__kmp_num_proc_groups > 1) {
4104 return;
4105 }
4106# endif
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004107 KMP_ASSERT(__kmp_affin_fullMask != NULL);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004108 i = KMP_PLACE_ALL;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004109 mask = __kmp_affin_fullMask;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004110 }
4111 else {
4112 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4113 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4114 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4115 }
4116 }
4117# if OMP_40_ENABLED
4118 else {
4119 if ((! isa_root)
4120 || (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
Andrey Churbanov7daf9802015-01-27 16:52:57 +00004121# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +00004122 if (__kmp_num_proc_groups > 1) {
4123 return;
4124 }
4125# endif
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004126 KMP_ASSERT(__kmp_affin_fullMask != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004127 i = KMP_PLACE_ALL;
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004128 mask = __kmp_affin_fullMask;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004129 }
4130 else {
4131 //
4132 // int i = some hash function or just a counter that doesn't
4133 // always start at 0. Use gtid for now.
4134 //
4135 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4136 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4137 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4138 }
4139 }
4140# endif
4141
4142# if OMP_40_ENABLED
4143 th->th.th_current_place = i;
4144 if (isa_root) {
4145 th->th.th_new_place = i;
4146 th->th.th_first_place = 0;
4147 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4148 }
4149
4150 if (i == KMP_PLACE_ALL) {
4151 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4152 gtid));
4153 }
4154 else {
4155 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4156 gtid, i));
4157 }
4158# else
4159 if (i == -1) {
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004160 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to __kmp_affin_fullMask\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00004161 gtid));
4162 }
4163 else {
4164 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to mask %d\n",
4165 gtid, i));
4166 }
4167# endif /* OMP_40_ENABLED */
4168
4169 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4170
4171 if (__kmp_affinity_verbose) {
4172 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4173 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4174 th->th.th_affin_mask);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004175 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(), gtid,
4176 buf);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004177 }
4178
4179# if KMP_OS_WINDOWS
4180 //
4181 // On Windows* OS, the process affinity mask might have changed.
4182 // If the user didn't request affinity and this call fails,
4183 // just continue silently. See CQ171393.
4184 //
4185 if ( __kmp_affinity_type == affinity_none ) {
4186 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4187 }
4188 else
4189# endif
4190 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4191}
4192
4193
4194# if OMP_40_ENABLED
4195
4196void
4197__kmp_affinity_set_place(int gtid)
4198{
4199 int retval;
4200
4201 if (! KMP_AFFINITY_CAPABLE()) {
4202 return;
4203 }
4204
4205 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4206
4207 KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current place = %d)\n",
4208 gtid, th->th.th_new_place, th->th.th_current_place));
4209
4210 //
Alp Toker8f2d3f02014-02-24 10:40:15 +00004211 // Check that the new place is within this thread's partition.
Jim Cownie5e8470a2013-09-27 10:38:44 +00004212 //
4213 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004214 KMP_ASSERT(th->th.th_new_place >= 0);
4215 KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004216 if (th->th.th_first_place <= th->th.th_last_place) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004217 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place)
Jim Cownie5e8470a2013-09-27 10:38:44 +00004218 && (th->th.th_new_place <= th->th.th_last_place));
4219 }
4220 else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004221 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place)
Jim Cownie5e8470a2013-09-27 10:38:44 +00004222 || (th->th.th_new_place >= th->th.th_last_place));
4223 }
4224
4225 //
4226 // Copy the thread mask to the kmp_info_t strucuture,
4227 // and set this thread's affinity.
4228 //
4229 kmp_affin_mask_t *mask = KMP_CPU_INDEX(__kmp_affinity_masks,
4230 th->th.th_new_place);
4231 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4232 th->th.th_current_place = th->th.th_new_place;
4233
4234 if (__kmp_affinity_verbose) {
4235 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4236 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4237 th->th.th_affin_mask);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004238 KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4239 gtid, buf);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004240 }
4241 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4242}
4243
4244# endif /* OMP_40_ENABLED */
4245
4246
4247int
4248__kmp_aux_set_affinity(void **mask)
4249{
4250 int gtid;
4251 kmp_info_t *th;
4252 int retval;
4253
4254 if (! KMP_AFFINITY_CAPABLE()) {
4255 return -1;
4256 }
4257
4258 gtid = __kmp_entry_gtid();
4259 KA_TRACE(1000, ;{
4260 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4261 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4262 (kmp_affin_mask_t *)(*mask));
4263 __kmp_debug_printf("kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4264 gtid, buf);
4265 });
4266
4267 if (__kmp_env_consistency_check) {
4268 if ((mask == NULL) || (*mask == NULL)) {
4269 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4270 }
4271 else {
4272 unsigned proc;
4273 int num_procs = 0;
4274
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004275 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t*)(*mask))) {
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004276 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4277 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4278 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00004279 if (! KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4280 continue;
4281 }
4282 num_procs++;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004283 }
4284 if (num_procs == 0) {
4285 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4286 }
4287
Andrey Churbanov7daf9802015-01-27 16:52:57 +00004288# if KMP_GROUP_AFFINITY
Jim Cownie5e8470a2013-09-27 10:38:44 +00004289 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4290 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4291 }
Andrey Churbanov7daf9802015-01-27 16:52:57 +00004292# endif /* KMP_GROUP_AFFINITY */
Jim Cownie5e8470a2013-09-27 10:38:44 +00004293
4294 }
4295 }
4296
4297 th = __kmp_threads[gtid];
4298 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4299 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4300 if (retval == 0) {
4301 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4302 }
4303
4304# if OMP_40_ENABLED
4305 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4306 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4307 th->th.th_first_place = 0;
4308 th->th.th_last_place = __kmp_affinity_num_masks - 1;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004309
4310 //
4311 // Turn off 4.0 affinity for the current tread at this parallel level.
4312 //
4313 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004314# endif
4315
4316 return retval;
4317}
4318
4319
4320int
4321__kmp_aux_get_affinity(void **mask)
4322{
4323 int gtid;
4324 int retval;
4325 kmp_info_t *th;
4326
4327 if (! KMP_AFFINITY_CAPABLE()) {
4328 return -1;
4329 }
4330
4331 gtid = __kmp_entry_gtid();
4332 th = __kmp_threads[gtid];
4333 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4334
4335 KA_TRACE(1000, ;{
4336 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4337 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4338 th->th.th_affin_mask);
4339 __kmp_printf("kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid, buf);
4340 });
4341
4342 if (__kmp_env_consistency_check) {
4343 if ((mask == NULL) || (*mask == NULL)) {
4344 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4345 }
4346 }
4347
4348# if !KMP_OS_WINDOWS
4349
4350 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4351 KA_TRACE(1000, ;{
4352 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4353 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4354 (kmp_affin_mask_t *)(*mask));
4355 __kmp_printf("kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid, buf);
4356 });
4357 return retval;
4358
4359# else
4360
4361 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4362 return 0;
4363
4364# endif /* KMP_OS_WINDOWS */
4365
4366}
4367
Jim Cownie5e8470a2013-09-27 10:38:44 +00004368int
4369__kmp_aux_set_affinity_mask_proc(int proc, void **mask)
4370{
4371 int retval;
4372
4373 if (! KMP_AFFINITY_CAPABLE()) {
4374 return -1;
4375 }
4376
4377 KA_TRACE(1000, ;{
4378 int gtid = __kmp_entry_gtid();
4379 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4380 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4381 (kmp_affin_mask_t *)(*mask));
4382 __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in affinity mask for thread %d = %s\n",
4383 proc, gtid, buf);
4384 });
4385
4386 if (__kmp_env_consistency_check) {
4387 if ((mask == NULL) || (*mask == NULL)) {
4388 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
4389 }
4390 }
4391
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004392 if ((proc < 0)
4393# if !KMP_USE_HWLOC
4394 || ((unsigned)proc >= KMP_CPU_SETSIZE)
4395# endif
4396 ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00004397 return -1;
4398 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004399 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00004400 return -2;
4401 }
4402
4403 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4404 return 0;
4405}
4406
4407
4408int
4409__kmp_aux_unset_affinity_mask_proc(int proc, void **mask)
4410{
4411 int retval;
4412
4413 if (! KMP_AFFINITY_CAPABLE()) {
4414 return -1;
4415 }
4416
4417 KA_TRACE(1000, ;{
4418 int gtid = __kmp_entry_gtid();
4419 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4420 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4421 (kmp_affin_mask_t *)(*mask));
4422 __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in affinity mask for thread %d = %s\n",
4423 proc, gtid, buf);
4424 });
4425
4426 if (__kmp_env_consistency_check) {
4427 if ((mask == NULL) || (*mask == NULL)) {
4428 KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
4429 }
4430 }
4431
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004432 if ((proc < 0)
4433# if !KMP_USE_HWLOC
4434 || ((unsigned)proc >= KMP_CPU_SETSIZE)
4435# endif
4436 ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00004437 return -1;
4438 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004439 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00004440 return -2;
4441 }
4442
4443 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4444 return 0;
4445}
4446
4447
4448int
4449__kmp_aux_get_affinity_mask_proc(int proc, void **mask)
4450{
4451 int retval;
4452
4453 if (! KMP_AFFINITY_CAPABLE()) {
4454 return -1;
4455 }
4456
4457 KA_TRACE(1000, ;{
4458 int gtid = __kmp_entry_gtid();
4459 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4460 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4461 (kmp_affin_mask_t *)(*mask));
4462 __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in affinity mask for thread %d = %s\n",
4463 proc, gtid, buf);
4464 });
4465
4466 if (__kmp_env_consistency_check) {
4467 if ((mask == NULL) || (*mask == NULL)) {
Andrey Churbanov4b2f17a2015-01-29 15:49:22 +00004468 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
Jim Cownie5e8470a2013-09-27 10:38:44 +00004469 }
4470 }
4471
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004472 if ((proc < 0)
4473# if !KMP_USE_HWLOC
4474 || ((unsigned)proc >= KMP_CPU_SETSIZE)
4475# endif
4476 ) {
4477 return -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00004478 }
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004479 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00004480 return 0;
4481 }
4482
4483 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4484}
4485
Jim Cownie5e8470a2013-09-27 10:38:44 +00004486
4487// Dynamic affinity settings - Affinity balanced
4488void __kmp_balanced_affinity( int tid, int nthreads )
4489{
4490 if( __kmp_affinity_uniform_topology() ) {
4491 int coreID;
4492 int threadID;
4493 // Number of hyper threads per core in HT machine
4494 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4495 // Number of cores
4496 int ncores = __kmp_ncores;
4497 // How many threads will be bound to each core
4498 int chunk = nthreads / ncores;
4499 // How many cores will have an additional thread bound to it - "big cores"
4500 int big_cores = nthreads % ncores;
4501 // Number of threads on the big cores
4502 int big_nth = ( chunk + 1 ) * big_cores;
4503 if( tid < big_nth ) {
4504 coreID = tid / (chunk + 1 );
4505 threadID = ( tid % (chunk + 1 ) ) % __kmp_nth_per_core ;
4506 } else { //tid >= big_nth
4507 coreID = ( tid - big_cores ) / chunk;
4508 threadID = ( ( tid - big_cores ) % chunk ) % __kmp_nth_per_core ;
4509 }
4510
4511 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4512 "Illegal set affinity operation when not capable");
4513
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004514 kmp_affin_mask_t *mask;
4515 KMP_CPU_ALLOC_ON_STACK(mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004516 KMP_CPU_ZERO(mask);
4517
4518 // Granularity == thread
4519 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4520 int osID = address2os[ coreID * __kmp_nth_per_core + threadID ].second;
4521 KMP_CPU_SET( osID, mask);
4522 } else if( __kmp_affinity_gran == affinity_gran_core ) { // Granularity == core
4523 for( int i = 0; i < __kmp_nth_per_core; i++ ) {
4524 int osID;
4525 osID = address2os[ coreID * __kmp_nth_per_core + i ].second;
4526 KMP_CPU_SET( osID, mask);
4527 }
4528 }
4529 if (__kmp_affinity_verbose) {
4530 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4531 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004532 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4533 tid, buf);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004534 }
4535 __kmp_set_system_affinity( mask, TRUE );
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004536 KMP_CPU_FREE_FROM_STACK(mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004537 } else { // Non-uniform topology
4538
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004539 kmp_affin_mask_t *mask;
4540 KMP_CPU_ALLOC_ON_STACK(mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004541 KMP_CPU_ZERO(mask);
4542
4543 // Number of hyper threads per core in HT machine
4544 int nth_per_core = __kmp_nThreadsPerCore;
4545 int core_level;
4546 if( nth_per_core > 1 ) {
4547 core_level = __kmp_aff_depth - 2;
4548 } else {
4549 core_level = __kmp_aff_depth - 1;
4550 }
4551
4552 // Number of cores - maximum value; it does not count trail cores with 0 processors
4553 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
4554
4555 // For performance gain consider the special case nthreads == __kmp_avail_proc
4556 if( nthreads == __kmp_avail_proc ) {
4557 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4558 int osID = address2os[ tid ].second;
4559 KMP_CPU_SET( osID, mask);
4560 } else if( __kmp_affinity_gran == affinity_gran_core ) { // Granularity == core
4561 int coreID = address2os[ tid ].first.labels[ core_level ];
4562 // We'll count found osIDs for the current core; they can be not more than nth_per_core;
4563 // since the address2os is sortied we can break when cnt==nth_per_core
4564 int cnt = 0;
4565 for( int i = 0; i < __kmp_avail_proc; i++ ) {
4566 int osID = address2os[ i ].second;
4567 int core = address2os[ i ].first.labels[ core_level ];
4568 if( core == coreID ) {
4569 KMP_CPU_SET( osID, mask);
4570 cnt++;
4571 if( cnt == nth_per_core ) {
4572 break;
4573 }
4574 }
4575 }
4576 }
4577 } else if( nthreads <= __kmp_ncores ) {
4578
4579 int core = 0;
4580 for( int i = 0; i < ncores; i++ ) {
4581 // Check if this core from procarr[] is in the mask
4582 int in_mask = 0;
4583 for( int j = 0; j < nth_per_core; j++ ) {
4584 if( procarr[ i * nth_per_core + j ] != - 1 ) {
4585 in_mask = 1;
4586 break;
4587 }
4588 }
4589 if( in_mask ) {
4590 if( tid == core ) {
4591 for( int j = 0; j < nth_per_core; j++ ) {
4592 int osID = procarr[ i * nth_per_core + j ];
4593 if( osID != -1 ) {
4594 KMP_CPU_SET( osID, mask );
4595 // For granularity=thread it is enough to set the first available osID for this core
4596 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4597 break;
4598 }
4599 }
4600 }
4601 break;
4602 } else {
4603 core++;
4604 }
4605 }
4606 }
4607
4608 } else { // nthreads > __kmp_ncores
4609
4610 // Array to save the number of processors at each core
Jonathan Peyton7be075332015-06-22 15:53:50 +00004611 int* nproc_at_core = (int*)KMP_ALLOCA(sizeof(int)*ncores);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004612 // Array to save the number of cores with "x" available processors;
Jonathan Peyton7be075332015-06-22 15:53:50 +00004613 int* ncores_with_x_procs = (int*)KMP_ALLOCA(sizeof(int)*(nth_per_core+1));
Jim Cownie5e8470a2013-09-27 10:38:44 +00004614 // Array to save the number of cores with # procs from x to nth_per_core
Jonathan Peyton7be075332015-06-22 15:53:50 +00004615 int* ncores_with_x_to_max_procs = (int*)KMP_ALLOCA(sizeof(int)*(nth_per_core+1));
Jim Cownie5e8470a2013-09-27 10:38:44 +00004616
4617 for( int i = 0; i <= nth_per_core; i++ ) {
4618 ncores_with_x_procs[ i ] = 0;
4619 ncores_with_x_to_max_procs[ i ] = 0;
4620 }
4621
4622 for( int i = 0; i < ncores; i++ ) {
4623 int cnt = 0;
4624 for( int j = 0; j < nth_per_core; j++ ) {
4625 if( procarr[ i * nth_per_core + j ] != -1 ) {
4626 cnt++;
4627 }
4628 }
4629 nproc_at_core[ i ] = cnt;
4630 ncores_with_x_procs[ cnt ]++;
4631 }
4632
4633 for( int i = 0; i <= nth_per_core; i++ ) {
4634 for( int j = i; j <= nth_per_core; j++ ) {
4635 ncores_with_x_to_max_procs[ i ] += ncores_with_x_procs[ j ];
4636 }
4637 }
4638
4639 // Max number of processors
4640 int nproc = nth_per_core * ncores;
4641 // An array to keep number of threads per each context
4642 int * newarr = ( int * )__kmp_allocate( sizeof( int ) * nproc );
4643 for( int i = 0; i < nproc; i++ ) {
4644 newarr[ i ] = 0;
4645 }
4646
4647 int nth = nthreads;
4648 int flag = 0;
4649 while( nth > 0 ) {
4650 for( int j = 1; j <= nth_per_core; j++ ) {
4651 int cnt = ncores_with_x_to_max_procs[ j ];
4652 for( int i = 0; i < ncores; i++ ) {
4653 // Skip the core with 0 processors
4654 if( nproc_at_core[ i ] == 0 ) {
4655 continue;
4656 }
4657 for( int k = 0; k < nth_per_core; k++ ) {
4658 if( procarr[ i * nth_per_core + k ] != -1 ) {
4659 if( newarr[ i * nth_per_core + k ] == 0 ) {
4660 newarr[ i * nth_per_core + k ] = 1;
4661 cnt--;
4662 nth--;
4663 break;
4664 } else {
4665 if( flag != 0 ) {
4666 newarr[ i * nth_per_core + k ] ++;
4667 cnt--;
4668 nth--;
4669 break;
4670 }
4671 }
4672 }
4673 }
4674 if( cnt == 0 || nth == 0 ) {
4675 break;
4676 }
4677 }
4678 if( nth == 0 ) {
4679 break;
4680 }
4681 }
4682 flag = 1;
4683 }
4684 int sum = 0;
4685 for( int i = 0; i < nproc; i++ ) {
4686 sum += newarr[ i ];
4687 if( sum > tid ) {
4688 // Granularity == thread
4689 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4690 int osID = procarr[ i ];
4691 KMP_CPU_SET( osID, mask);
4692 } else if( __kmp_affinity_gran == affinity_gran_core ) { // Granularity == core
4693 int coreID = i / nth_per_core;
4694 for( int ii = 0; ii < nth_per_core; ii++ ) {
4695 int osID = procarr[ coreID * nth_per_core + ii ];
4696 if( osID != -1 ) {
4697 KMP_CPU_SET( osID, mask);
4698 }
4699 }
4700 }
4701 break;
4702 }
4703 }
4704 __kmp_free( newarr );
4705 }
4706
4707 if (__kmp_affinity_verbose) {
4708 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4709 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00004710 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4711 tid, buf);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004712 }
4713 __kmp_set_system_affinity( mask, TRUE );
Jonathan Peyton01dcf362015-11-30 20:02:59 +00004714 KMP_CPU_FREE_FROM_STACK(mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00004715 }
4716}
4717
Jonathan Peyton3076fa42016-01-12 17:21:55 +00004718#if KMP_OS_LINUX
4719// We don't need this entry for Windows because
4720// there is GetProcessAffinityMask() api
4721//
4722// The intended usage is indicated by these steps:
4723// 1) The user gets the current affinity mask
4724// 2) Then sets the affinity by calling this function
4725// 3) Error check the return value
4726// 4) Use non-OpenMP parallelization
4727// 5) Reset the affinity to what was stored in step 1)
4728#ifdef __cplusplus
4729extern "C"
4730#endif
4731int
4732kmp_set_thread_affinity_mask_initial()
4733// the function returns 0 on success,
4734// -1 if we cannot bind thread
4735// >0 (errno) if an error happened during binding
4736{
4737 int gtid = __kmp_get_gtid();
4738 if (gtid < 0) {
4739 // Do not touch non-omp threads
4740 KA_TRACE(30, ( "kmp_set_thread_affinity_mask_initial: "
4741 "non-omp thread, returning\n"));
4742 return -1;
4743 }
4744 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
4745 KA_TRACE(30, ( "kmp_set_thread_affinity_mask_initial: "
4746 "affinity not initialized, returning\n"));
4747 return -1;
4748 }
4749 KA_TRACE(30, ( "kmp_set_thread_affinity_mask_initial: "
4750 "set full mask for thread %d\n", gtid));
Jonathan Peytonc5304aa2016-06-13 21:28:03 +00004751 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
4752 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
Jonathan Peyton3076fa42016-01-12 17:21:55 +00004753}
4754#endif
4755
Alp Toker763b9392014-02-28 09:42:41 +00004756#endif // KMP_AFFINITY_SUPPORTED