19#if KMP_AFFINITY_SUPPORTED
21class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal get affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
82 int get_proc_group()
const override {
85 if (__kmp_num_proc_groups == 1) {
88 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
90 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
91 unsigned long second_32_bits =
92 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
93 if (first_32_bits == 0 && second_32_bits == 0) {
105 void determine_capable(
const char *var)
override {
106 const hwloc_topology_support *topology_support;
107 if (__kmp_hwloc_topology == NULL) {
108 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
109 __kmp_hwloc_error = TRUE;
110 if (__kmp_affinity_verbose)
111 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
113 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
114 __kmp_hwloc_error = TRUE;
115 if (__kmp_affinity_verbose)
116 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
119 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
124 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
125 topology_support->cpubind->get_thisthread_cpubind &&
126 topology_support->discovery->pu && !__kmp_hwloc_error) {
128 KMP_AFFINITY_ENABLE(TRUE);
131 __kmp_hwloc_error = TRUE;
132 KMP_AFFINITY_DISABLE();
135 void bind_thread(
int which)
override {
136 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
137 "Illegal set affinity operation when not capable");
138 KMPAffinity::Mask *mask;
139 KMP_CPU_ALLOC_ON_STACK(mask);
141 KMP_CPU_SET(which, mask);
142 __kmp_set_system_affinity(mask, TRUE);
143 KMP_CPU_FREE_FROM_STACK(mask);
145 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
146 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
147 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
148 return new Mask[num];
150 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
151 Mask *hwloc_array =
static_cast<Mask *
>(array);
152 delete[] hwloc_array;
154 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
155 int index)
override {
156 Mask *hwloc_array =
static_cast<Mask *
>(array);
157 return &(hwloc_array[index]);
159 api_type get_api_type()
const override {
return HWLOC; }
163#if KMP_OS_LINUX || KMP_OS_FREEBSD
169#include <sys/syscall.h>
170#if KMP_ARCH_X86 || KMP_ARCH_ARM
171#ifndef __NR_sched_setaffinity
172#define __NR_sched_setaffinity 241
173#elif __NR_sched_setaffinity != 241
174#error Wrong code for setaffinity system call.
176#ifndef __NR_sched_getaffinity
177#define __NR_sched_getaffinity 242
178#elif __NR_sched_getaffinity != 242
179#error Wrong code for getaffinity system call.
181#elif KMP_ARCH_AARCH64
182#ifndef __NR_sched_setaffinity
183#define __NR_sched_setaffinity 122
184#elif __NR_sched_setaffinity != 122
185#error Wrong code for setaffinity system call.
187#ifndef __NR_sched_getaffinity
188#define __NR_sched_getaffinity 123
189#elif __NR_sched_getaffinity != 123
190#error Wrong code for getaffinity system call.
193#ifndef __NR_sched_setaffinity
194#define __NR_sched_setaffinity 203
195#elif __NR_sched_setaffinity != 203
196#error Wrong code for setaffinity system call.
198#ifndef __NR_sched_getaffinity
199#define __NR_sched_getaffinity 204
200#elif __NR_sched_getaffinity != 204
201#error Wrong code for getaffinity system call.
204#ifndef __NR_sched_setaffinity
205#define __NR_sched_setaffinity 222
206#elif __NR_sched_setaffinity != 222
207#error Wrong code for setaffinity system call.
209#ifndef __NR_sched_getaffinity
210#define __NR_sched_getaffinity 223
211#elif __NR_sched_getaffinity != 223
212#error Wrong code for getaffinity system call.
215# ifndef __NR_sched_setaffinity
216# define __NR_sched_setaffinity 4239
217# elif __NR_sched_setaffinity != 4239
218# error Wrong code for setaffinity system call.
220# ifndef __NR_sched_getaffinity
221# define __NR_sched_getaffinity 4240
222# elif __NR_sched_getaffinity != 4240
223# error Wrong code for getaffinity system call.
225# elif KMP_ARCH_MIPS64
226# ifndef __NR_sched_setaffinity
227# define __NR_sched_setaffinity 5195
228# elif __NR_sched_setaffinity != 5195
229# error Wrong code for setaffinity system call.
231# ifndef __NR_sched_getaffinity
232# define __NR_sched_getaffinity 5196
233# elif __NR_sched_getaffinity != 5196
234# error Wrong code for getaffinity system call.
237#error Unknown or unsupported architecture
241#include <pthread_np.h>
243class KMPNativeAffinity :
public KMPAffinity {
244 class Mask :
public KMPAffinity::Mask {
245 typedef unsigned char mask_t;
246 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
250 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
255 void set(
int i)
override {
256 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
258 bool is_set(
int i)
const override {
259 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
261 void clear(
int i)
override {
262 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
264 void zero()
override {
265 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
268 void copy(
const KMPAffinity::Mask *src)
override {
269 const Mask *convert =
static_cast<const Mask *
>(src);
270 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
271 mask[i] = convert->mask[i];
273 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
274 const Mask *convert =
static_cast<const Mask *
>(rhs);
275 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
276 mask[i] &= convert->mask[i];
278 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
279 const Mask *convert =
static_cast<const Mask *
>(rhs);
280 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
281 mask[i] |= convert->mask[i];
283 void bitwise_not()
override {
284 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
285 mask[i] = ~(mask[i]);
287 int begin()
const override {
289 while (retval < end() && !is_set(retval))
293 int end()
const override {
return __kmp_affin_mask_size * BITS_PER_MASK_T; }
294 int next(
int previous)
const override {
295 int retval = previous + 1;
296 while (retval < end() && !is_set(retval))
300 int get_system_affinity(
bool abort_on_error)
override {
301 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
302 "Illegal get affinity operation when not capable");
305 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
308 pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
309 int retval = (r == 0 ? 0 : -1);
315 if (abort_on_error) {
316 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
320 int set_system_affinity(
bool abort_on_error)
const override {
321 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
322 "Illegal get affinity operation when not capable");
325 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
328 pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
329 int retval = (r == 0 ? 0 : -1);
335 if (abort_on_error) {
336 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
341 void determine_capable(
const char *env_var)
override {
342 __kmp_affinity_determine_capable(env_var);
344 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
345 KMPAffinity::Mask *allocate_mask()
override {
346 KMPNativeAffinity::Mask *retval =
new Mask();
349 void deallocate_mask(KMPAffinity::Mask *m)
override {
350 KMPNativeAffinity::Mask *native_mask =
351 static_cast<KMPNativeAffinity::Mask *
>(m);
354 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
355 return new Mask[num];
357 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
358 Mask *linux_array =
static_cast<Mask *
>(array);
359 delete[] linux_array;
361 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
362 int index)
override {
363 Mask *linux_array =
static_cast<Mask *
>(array);
364 return &(linux_array[index]);
366 api_type get_api_type()
const override {
return NATIVE_OS; }
371class KMPNativeAffinity :
public KMPAffinity {
372 class Mask :
public KMPAffinity::Mask {
373 typedef ULONG_PTR mask_t;
374 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
379 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
385 void set(
int i)
override {
386 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
388 bool is_set(
int i)
const override {
389 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
391 void clear(
int i)
override {
392 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
394 void zero()
override {
395 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
398 void copy(
const KMPAffinity::Mask *src)
override {
399 const Mask *convert =
static_cast<const Mask *
>(src);
400 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
401 mask[i] = convert->mask[i];
403 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
404 const Mask *convert =
static_cast<const Mask *
>(rhs);
405 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
406 mask[i] &= convert->mask[i];
408 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
409 const Mask *convert =
static_cast<const Mask *
>(rhs);
410 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
411 mask[i] |= convert->mask[i];
413 void bitwise_not()
override {
414 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
415 mask[i] = ~(mask[i]);
417 int begin()
const override {
419 while (retval < end() && !is_set(retval))
423 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
424 int next(
int previous)
const override {
425 int retval = previous + 1;
426 while (retval < end() && !is_set(retval))
430 int set_system_affinity(
bool abort_on_error)
const override {
431 if (__kmp_num_proc_groups > 1) {
434 int group = get_proc_group();
436 if (abort_on_error) {
437 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
444 ga.Mask = mask[group];
445 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
447 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
448 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
449 DWORD error = GetLastError();
450 if (abort_on_error) {
451 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
457 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
458 DWORD error = GetLastError();
459 if (abort_on_error) {
460 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
468 int get_system_affinity(
bool abort_on_error)
override {
469 if (__kmp_num_proc_groups > 1) {
472 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
473 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
474 DWORD error = GetLastError();
475 if (abort_on_error) {
476 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
477 KMP_ERR(error), __kmp_msg_null);
481 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
485 mask[ga.Group] = ga.Mask;
487 mask_t newMask, sysMask, retval;
488 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
489 DWORD error = GetLastError();
490 if (abort_on_error) {
491 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
492 KMP_ERR(error), __kmp_msg_null);
496 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
498 DWORD error = GetLastError();
499 if (abort_on_error) {
500 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
501 KMP_ERR(error), __kmp_msg_null);
505 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
507 DWORD error = GetLastError();
508 if (abort_on_error) {
509 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
510 KMP_ERR(error), __kmp_msg_null);
517 int get_proc_group()
const override {
519 if (__kmp_num_proc_groups == 1) {
522 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
532 void determine_capable(
const char *env_var)
override {
533 __kmp_affinity_determine_capable(env_var);
535 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
536 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
537 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
538 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
539 return new Mask[num];
541 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
542 Mask *windows_array =
static_cast<Mask *
>(array);
543 delete[] windows_array;
545 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
546 int index)
override {
547 Mask *windows_array =
static_cast<Mask *
>(array);
548 return &(windows_array[index]);
550 api_type get_api_type()
const override {
return NATIVE_OS; }
557 static const unsigned maxDepth = 32;
558 unsigned labels[maxDepth];
559 unsigned childNums[maxDepth];
562 Address(
unsigned _depth) : depth(_depth), leader(FALSE) {}
563 Address &operator=(
const Address &b) {
565 for (
unsigned i = 0; i < depth; i++) {
566 labels[i] = b.labels[i];
567 childNums[i] = b.childNums[i];
572 bool operator==(
const Address &b)
const {
573 if (depth != b.depth)
575 for (
unsigned i = 0; i < depth; i++)
576 if (labels[i] != b.labels[i])
580 bool isClose(
const Address &b,
int level)
const {
581 if (depth != b.depth)
583 if ((
unsigned)level >= depth)
585 for (
unsigned i = 0; i < (depth - level); i++)
586 if (labels[i] != b.labels[i])
590 bool operator!=(
const Address &b)
const {
return !operator==(b); }
593 printf(
"Depth: %u --- ", depth);
594 for (i = 0; i < depth; i++) {
595 printf(
"%u ", labels[i]);
604 AddrUnsPair(Address _first,
unsigned _second)
605 : first(_first), second(_second) {}
606 AddrUnsPair &operator=(
const AddrUnsPair &b) {
614 printf(
" --- second = %u", second);
616 bool operator==(
const AddrUnsPair &b)
const {
617 if (first != b.first)
619 if (second != b.second)
623 bool operator!=(
const AddrUnsPair &b)
const {
return !operator==(b); }
626static int __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b) {
627 const Address *aa = &(((
const AddrUnsPair *)a)->first);
628 const Address *bb = &(((
const AddrUnsPair *)b)->first);
629 unsigned depth = aa->depth;
631 KMP_DEBUG_ASSERT(depth == bb->depth);
632 for (i = 0; i < depth; i++) {
633 if (aa->labels[i] < bb->labels[i])
635 if (aa->labels[i] > bb->labels[i])
647class hierarchy_info {
651 static const kmp_uint32 maxLeaves = 4;
652 static const kmp_uint32 minBranch = 4;
658 kmp_uint32 maxLevels;
665 kmp_uint32 base_num_threads;
666 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
667 volatile kmp_int8 uninitialized;
669 volatile kmp_int8 resizing;
675 kmp_uint32 *numPerLevel;
676 kmp_uint32 *skipPerLevel;
678 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
679 int hier_depth = adr2os[0].first.depth;
681 for (
int i = hier_depth - 1; i >= 0; --i) {
683 for (
int j = 0; j < num_addrs; ++j) {
684 int next = adr2os[j].first.childNums[i];
688 numPerLevel[level] = max + 1;
694 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
697 if (!uninitialized && numPerLevel) {
698 __kmp_free(numPerLevel);
700 uninitialized = not_initialized;
704 void init(AddrUnsPair *adr2os,
int num_addrs) {
705 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
706 &uninitialized, not_initialized, initializing);
707 if (bool_result == 0) {
708 while (TCR_1(uninitialized) != initialized)
712 KMP_DEBUG_ASSERT(bool_result == 1);
722 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
723 skipPerLevel = &(numPerLevel[maxLevels]);
724 for (kmp_uint32 i = 0; i < maxLevels;
732 qsort(adr2os, num_addrs,
sizeof(*adr2os),
733 __kmp_affinity_cmp_Address_labels);
734 deriveLevels(adr2os, num_addrs);
736 numPerLevel[0] = maxLeaves;
737 numPerLevel[1] = num_addrs / maxLeaves;
738 if (num_addrs % maxLeaves)
742 base_num_threads = num_addrs;
743 for (
int i = maxLevels - 1; i >= 0;
745 if (numPerLevel[i] != 1 || depth > 1)
748 kmp_uint32 branch = minBranch;
749 if (numPerLevel[0] == 1)
750 branch = num_addrs / maxLeaves;
751 if (branch < minBranch)
753 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
754 while (numPerLevel[d] > branch ||
755 (d == 0 && numPerLevel[d] > maxLeaves)) {
756 if (numPerLevel[d] & 1)
758 numPerLevel[d] = numPerLevel[d] >> 1;
759 if (numPerLevel[d + 1] == 1)
761 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
763 if (numPerLevel[0] == 1) {
764 branch = branch >> 1;
770 for (kmp_uint32 i = 1; i < depth; ++i)
771 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
773 for (kmp_uint32 i = depth; i < maxLevels; ++i)
774 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
776 uninitialized = initialized;
780 void resize(kmp_uint32 nproc) {
781 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
782 while (bool_result == 0) {
784 if (nproc <= base_num_threads)
787 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
789 KMP_DEBUG_ASSERT(bool_result != 0);
790 if (nproc <= base_num_threads)
794 kmp_uint32 old_sz = skipPerLevel[depth - 1];
795 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
797 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
798 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
799 numPerLevel[i - 1] *= 2;
803 if (nproc > old_sz) {
804 while (nproc > old_sz) {
812 kmp_uint32 *old_numPerLevel = numPerLevel;
813 kmp_uint32 *old_skipPerLevel = skipPerLevel;
814 numPerLevel = skipPerLevel = NULL;
816 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
817 skipPerLevel = &(numPerLevel[maxLevels]);
820 for (kmp_uint32 i = 0; i < old_maxLevels;
822 numPerLevel[i] = old_numPerLevel[i];
823 skipPerLevel[i] = old_skipPerLevel[i];
827 for (kmp_uint32 i = old_maxLevels; i < maxLevels;
834 __kmp_free(old_numPerLevel);
838 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
839 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
841 base_num_threads = nproc;