x86: Move x86 processor cache info to cpu_features

1. Move x86 processor cache info to _dl_x86_cpu_features in ld.so.
2. Update tunable bounds with TUNABLE_SET_WITH_BOUNDS.
3. Move x86 cache info initialization to dl-cacheinfo.h and initialize
x86 cache info in init_cpu_features ().
4. Put x86 cache info for libc in cacheinfo.h, which is included in
libc-start.c in libc.a and is included in cacheinfo.c in libc.so.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
This commit is contained in:
H.J. Lu 2020-09-18 07:55:14 -07:00
parent d18f59bf92
commit 2d651eb926
5 changed files with 550 additions and 411 deletions

View File

@ -18,11 +18,8 @@
#if IS_IN (libc)
#include <assert.h>
#include <unistd.h>
#include <cpuid.h>
#include <ldsodefs.h>
#include <dl-cacheinfo.h>
/* Get the value of the system variable NAME. */
long int
@ -30,20 +27,45 @@ attribute_hidden
__cache_sysconf (int name)
{
const struct cpu_features *cpu_features = __get_cpu_features ();
switch (name)
{
case _SC_LEVEL1_ICACHE_SIZE:
return cpu_features->level1_icache_size;
if (cpu_features->basic.kind == arch_kind_intel)
return handle_intel (name, cpu_features);
case _SC_LEVEL1_DCACHE_SIZE:
return cpu_features->level1_dcache_size;
if (cpu_features->basic.kind == arch_kind_amd)
return handle_amd (name);
case _SC_LEVEL1_DCACHE_ASSOC:
return cpu_features->level1_dcache_assoc;
if (cpu_features->basic.kind == arch_kind_zhaoxin)
return handle_zhaoxin (name);
case _SC_LEVEL1_DCACHE_LINESIZE:
return cpu_features->level1_dcache_linesize;
// XXX Fill in more vendors.
case _SC_LEVEL2_CACHE_SIZE:
return cpu_features->level2_cache_size;
/* CPU not known, we have no information. */
return 0;
case _SC_LEVEL2_CACHE_ASSOC:
return cpu_features->level2_cache_assoc;
case _SC_LEVEL2_CACHE_LINESIZE:
return cpu_features->level2_cache_linesize;
case _SC_LEVEL3_CACHE_SIZE:
return cpu_features->level3_cache_size;
case _SC_LEVEL3_CACHE_ASSOC:
return cpu_features->level3_cache_assoc;
case _SC_LEVEL3_CACHE_LINESIZE:
return cpu_features->level3_cache_linesize;
case _SC_LEVEL4_CACHE_SIZE:
return cpu_features->level4_cache_size;
default:
break;
}
return -1;
}
# ifdef SHARED

View File

@ -18,7 +18,16 @@
#include <assert.h>
#include <unistd.h>
#include <cpuid.h>
#include <cpu-features.h>
#if HAVE_TUNABLES
# define TUNABLE_NAMESPACE cpu
# include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
# include <elf/dl-tunables.h>
#endif
#if IS_IN (libc)
/* Data cache size for use in memory and string routines, typically
L1 size, rounded to multiple of 256 bytes. */
long int __x86_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
@ -45,385 +54,30 @@ long int __x86_rep_movsb_threshold attribute_hidden = 2048;
/* Threshold to use Enhanced REP STOSB. */
long int __x86_rep_stosb_threshold attribute_hidden = 2048;
static void
get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
long int core)
{
unsigned int eax;
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
/* Number of logical processors sharing L2 cache. */
int threads_l2;
/* Number of logical processors sharing L3 cache. */
int threads_l3;
const struct cpu_features *cpu_features = __get_cpu_features ();
int max_cpuid = cpu_features->basic.max_cpuid;
unsigned int family = cpu_features->basic.family;
unsigned int model = cpu_features->basic.model;
long int shared = *shared_ptr;
unsigned int threads = *threads_ptr;
bool inclusive_cache = true;
bool support_count_mask = true;
/* Try L3 first. */
unsigned int level = 3;
if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
support_count_mask = false;
if (shared <= 0)
{
/* Try L2 otherwise. */
level = 2;
shared = core;
threads_l2 = 0;
threads_l3 = -1;
}
else
{
threads_l2 = 0;
threads_l3 = 0;
}
/* A value of 0 for the HTT bit indicates there is only a single
logical processor. */
if (HAS_CPU_FEATURE (HTT))
{
/* Figure out the number of logical threads that share the
highest cache level. */
if (max_cpuid >= 4)
{
int i = 0;
/* Query until cache level 2 and 3 are enumerated. */
int check = 0x1 | (threads_l3 == 0) << 1;
do
{
__cpuid_count (4, i++, eax, ebx, ecx, edx);
/* There seems to be a bug in at least some Pentium Ds
which sometimes fail to iterate all cache parameters.
Do not loop indefinitely here, stop in this case and
assume there is no such information. */
if (cpu_features->basic.kind == arch_kind_intel
&& (eax & 0x1f) == 0 )
goto intel_bug_no_cache_info;
switch ((eax >> 5) & 0x7)
{
default:
break;
case 2:
if ((check & 0x1))
{
/* Get maximum number of logical processors
sharing L2 cache. */
threads_l2 = (eax >> 14) & 0x3ff;
check &= ~0x1;
}
break;
case 3:
if ((check & (0x1 << 1)))
{
/* Get maximum number of logical processors
sharing L3 cache. */
threads_l3 = (eax >> 14) & 0x3ff;
/* Check if L2 and L3 caches are inclusive. */
inclusive_cache = (edx & 0x2) != 0;
check &= ~(0x1 << 1);
}
break;
}
}
while (check);
/* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
numbers of addressable IDs for logical processors sharing
the cache, instead of the maximum number of threads
sharing the cache. */
if (max_cpuid >= 11 && support_count_mask)
{
/* Find the number of logical processors shipped in
one core and apply count mask. */
i = 0;
/* Count SMT only if there is L3 cache. Always count
core if there is no L3 cache. */
int count = ((threads_l2 > 0 && level == 3)
| ((threads_l3 > 0
|| (threads_l2 > 0 && level == 2)) << 1));
while (count)
{
__cpuid_count (11, i++, eax, ebx, ecx, edx);
int shipped = ebx & 0xff;
int type = ecx & 0xff00;
if (shipped == 0 || type == 0)
break;
else if (type == 0x100)
{
/* Count SMT. */
if ((count & 0x1))
{
int count_mask;
/* Compute count mask. */
asm ("bsr %1, %0"
: "=r" (count_mask) : "g" (threads_l2));
count_mask = ~(-1 << (count_mask + 1));
threads_l2 = (shipped - 1) & count_mask;
count &= ~0x1;
}
}
else if (type == 0x200)
{
/* Count core. */
if ((count & (0x1 << 1)))
{
int count_mask;
int threads_core
= (level == 2 ? threads_l2 : threads_l3);
/* Compute count mask. */
asm ("bsr %1, %0"
: "=r" (count_mask) : "g" (threads_core));
count_mask = ~(-1 << (count_mask + 1));
threads_core = (shipped - 1) & count_mask;
if (level == 2)
threads_l2 = threads_core;
else
threads_l3 = threads_core;
count &= ~(0x1 << 1);
}
}
}
}
if (threads_l2 > 0)
threads_l2 += 1;
if (threads_l3 > 0)
threads_l3 += 1;
if (level == 2)
{
if (threads_l2)
{
threads = threads_l2;
if (cpu_features->basic.kind == arch_kind_intel
&& threads > 2
&& family == 6)
switch (model)
{
case 0x37:
case 0x4a:
case 0x4d:
case 0x5a:
case 0x5d:
/* Silvermont has L2 cache shared by 2 cores. */
threads = 2;
break;
default:
break;
}
}
}
else if (threads_l3)
threads = threads_l3;
}
else
{
intel_bug_no_cache_info:
/* Assume that all logical threads share the highest cache
level. */
threads
= ((cpu_features->features[COMMON_CPUID_INDEX_1].cpuid.ebx
>> 16) & 0xff);
}
/* Cap usage of highest cache level to the number of supported
threads. */
if (shared > 0 && threads > 0)
shared /= threads;
}
/* Account for non-inclusive L2 and L3 caches. */
if (!inclusive_cache)
{
if (threads_l2 > 0)
core /= threads_l2;
shared += core;
}
*shared_ptr = shared;
*threads_ptr = threads;
}
static void
init_cacheinfo (void)
{
/* Find out what brand of processor. */
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
int max_cpuid_ex;
long int data = -1;
long int shared = -1;
long int core;
unsigned int threads = 0;
const struct cpu_features *cpu_features = __get_cpu_features ();
long int data = cpu_features->data_cache_size;
__x86_raw_data_cache_size_half = data / 2;
__x86_raw_data_cache_size = data;
/* Round data cache size to multiple of 256 bytes. */
data = data & ~255L;
__x86_data_cache_size_half = data / 2;
__x86_data_cache_size = data;
/* NB: In libc.so, cpu_features is defined in ld.so and is initialized
by DL_PLATFORM_INIT or IFUNC relocation before init_cacheinfo is
called by IFUNC relocation. In libc.a, init_cacheinfo is called
from init_cpu_features by ARCH_INIT_CPU_FEATURES. */
assert (cpu_features->basic.kind != arch_kind_unknown);
long int shared = cpu_features->shared_cache_size;
__x86_raw_shared_cache_size_half = shared / 2;
__x86_raw_shared_cache_size = shared;
/* Round shared cache size to multiple of 256 bytes. */
shared = shared & ~255L;
__x86_shared_cache_size_half = shared / 2;
__x86_shared_cache_size = shared;
if (cpu_features->basic.kind == arch_kind_intel)
{
data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
get_common_cache_info (&shared, &threads, core);
}
else if (cpu_features->basic.kind == arch_kind_zhaoxin)
{
data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
get_common_cache_info (&shared, &threads, core);
}
else if (cpu_features->basic.kind == arch_kind_amd)
{
data = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
long int core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
/* Get maximum extended function. */
__cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
if (shared <= 0)
/* No shared L3 cache. All we have is the L2 cache. */
shared = core;
else
{
/* Figure out the number of logical threads that share L3. */
if (max_cpuid_ex >= 0x80000008)
{
/* Get width of APIC ID. */
__cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
threads = 1 << ((ecx >> 12) & 0x0f);
}
if (threads == 0 || cpu_features->basic.family >= 0x17)
{
/* If APIC ID width is not available, use logical
processor count. */
__cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
if ((edx & (1 << 28)) != 0)
threads = (ebx >> 16) & 0xff;
}
/* Cap usage of highest cache level to the number of
supported threads. */
if (threads > 0)
shared /= threads;
/* Get shared cache per ccx for Zen architectures. */
if (cpu_features->basic.family >= 0x17)
{
unsigned int eax;
/* Get number of threads share the L3 cache in CCX. */
__cpuid_count (0x8000001D, 0x3, eax, ebx, ecx, edx);
unsigned int threads_per_ccx = ((eax >> 14) & 0xfff) + 1;
shared *= threads_per_ccx;
}
else
{
/* Account for exclusive L2 and L3 caches. */
shared += core;
}
}
}
/* Prefer cache size configure via tuning. */
if (cpu_features->data_cache_size != 0)
data = cpu_features->data_cache_size;
if (data > 0)
{
__x86_raw_data_cache_size_half = data / 2;
__x86_raw_data_cache_size = data;
/* Round data cache size to multiple of 256 bytes. */
data = data & ~255L;
__x86_data_cache_size_half = data / 2;
__x86_data_cache_size = data;
}
/* Prefer cache size configure via tuning. */
if (cpu_features->shared_cache_size != 0)
shared = cpu_features->shared_cache_size;
if (shared > 0)
{
__x86_raw_shared_cache_size_half = shared / 2;
__x86_raw_shared_cache_size = shared;
/* Round shared cache size to multiple of 256 bytes. */
shared = shared & ~255L;
__x86_shared_cache_size_half = shared / 2;
__x86_shared_cache_size = shared;
}
/* The default setting for the non_temporal threshold is 3/4 of one
thread's share of the chip's cache. For most Intel and AMD processors
with an initial release date between 2017 and 2020, a thread's typical
share of the cache is from 500 KBytes to 2 MBytes. Using the 3/4
threshold leaves 125 KBytes to 500 KBytes of the thread's data
in cache after a maximum temporal copy, which will maintain
in cache a reasonable portion of the thread's stack and other
active data. If the threshold is set higher than one thread's
share of the cache, it has a substantial risk of negatively
impacting the performance of other threads running on the chip. */
__x86_shared_non_temporal_threshold
= (cpu_features->non_temporal_threshold != 0
? cpu_features->non_temporal_threshold
: __x86_shared_cache_size * 3 / 4);
= cpu_features->non_temporal_threshold;
/* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8. */
unsigned int minimum_rep_movsb_threshold;
/* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16). */
unsigned int rep_movsb_threshold;
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
&& !CPU_FEATURE_PREFERRED_P (cpu_features, Prefer_No_AVX512))
{
rep_movsb_threshold = 2048 * (64 / 16);
minimum_rep_movsb_threshold = 64 * 8;
}
else if (CPU_FEATURE_PREFERRED_P (cpu_features,
AVX_Fast_Unaligned_Load))
{
rep_movsb_threshold = 2048 * (32 / 16);
minimum_rep_movsb_threshold = 32 * 8;
}
else
{
rep_movsb_threshold = 2048 * (16 / 16);
minimum_rep_movsb_threshold = 16 * 8;
}
if (cpu_features->rep_movsb_threshold > minimum_rep_movsb_threshold)
__x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
else
__x86_rep_movsb_threshold = rep_movsb_threshold;
# if HAVE_TUNABLES
__x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
__x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
# endif
}
#endif

View File

@ -16,22 +16,13 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <cpuid.h>
#include <dl-hwcap.h>
#include <libc-pointer-arith.h>
#include <get-isa-level.h>
#if IS_IN (libc) && !defined SHARED
# include <assert.h>
# include <unistd.h>
# include <dl-cacheinfo.h>
# include <cacheinfo.h>
#endif
#include <cacheinfo.h>
#include <dl-cacheinfo.h>
#if HAVE_TUNABLES
# define TUNABLE_NAMESPACE cpu
# include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
# include <elf/dl-tunables.h>
extern void TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *)
attribute_hidden;
@ -646,24 +637,14 @@ no_cpuid:
cpu_features->basic.model = model;
cpu_features->basic.stepping = stepping;
dl_init_cacheinfo (cpu_features);
#if HAVE_TUNABLES
TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
cpu_features->non_temporal_threshold
= TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
cpu_features->rep_movsb_threshold
= TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
cpu_features->rep_stosb_threshold
= TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
cpu_features->data_cache_size
= TUNABLE_GET (x86_data_cache_size, long int, NULL);
cpu_features->shared_cache_size
= TUNABLE_GET (x86_shared_cache_size, long int, NULL);
#endif
/* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
#if !HAVE_TUNABLES && defined SHARED
/* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do
this. */
#elif defined SHARED
/* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. The
glibc.cpu.hwcap_mask tunable is initialized already, so no
need to do this. */
GLRO(dl_hwcap_mask) = HWCAP_IMPORTANT;
#endif

View File

@ -476,3 +476,463 @@ handle_zhaoxin (int name)
/* Nothing found. */
return 0;
}
static void
get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
long int core)
{
unsigned int eax;
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
/* Number of logical processors sharing L2 cache. */
int threads_l2;
/* Number of logical processors sharing L3 cache. */
int threads_l3;
const struct cpu_features *cpu_features = __get_cpu_features ();
int max_cpuid = cpu_features->basic.max_cpuid;
unsigned int family = cpu_features->basic.family;
unsigned int model = cpu_features->basic.model;
long int shared = *shared_ptr;
unsigned int threads = *threads_ptr;
bool inclusive_cache = true;
bool support_count_mask = true;
/* Try L3 first. */
unsigned int level = 3;
if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
support_count_mask = false;
if (shared <= 0)
{
/* Try L2 otherwise. */
level = 2;
shared = core;
threads_l2 = 0;
threads_l3 = -1;
}
else
{
threads_l2 = 0;
threads_l3 = 0;
}
/* A value of 0 for the HTT bit indicates there is only a single
logical processor. */
if (HAS_CPU_FEATURE (HTT))
{
/* Figure out the number of logical threads that share the
highest cache level. */
if (max_cpuid >= 4)
{
int i = 0;
/* Query until cache level 2 and 3 are enumerated. */
int check = 0x1 | (threads_l3 == 0) << 1;
do
{
__cpuid_count (4, i++, eax, ebx, ecx, edx);
/* There seems to be a bug in at least some Pentium Ds
which sometimes fail to iterate all cache parameters.
Do not loop indefinitely here, stop in this case and
assume there is no such information. */
if (cpu_features->basic.kind == arch_kind_intel
&& (eax & 0x1f) == 0 )
goto intel_bug_no_cache_info;
switch ((eax >> 5) & 0x7)
{
default:
break;
case 2:
if ((check & 0x1))
{
/* Get maximum number of logical processors
sharing L2 cache. */
threads_l2 = (eax >> 14) & 0x3ff;
check &= ~0x1;
}
break;
case 3:
if ((check & (0x1 << 1)))
{
/* Get maximum number of logical processors
sharing L3 cache. */
threads_l3 = (eax >> 14) & 0x3ff;
/* Check if L2 and L3 caches are inclusive. */
inclusive_cache = (edx & 0x2) != 0;
check &= ~(0x1 << 1);
}
break;
}
}
while (check);
/* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
numbers of addressable IDs for logical processors sharing
the cache, instead of the maximum number of threads
sharing the cache. */
if (max_cpuid >= 11 && support_count_mask)
{
/* Find the number of logical processors shipped in
one core and apply count mask. */
i = 0;
/* Count SMT only if there is L3 cache. Always count
core if there is no L3 cache. */
int count = ((threads_l2 > 0 && level == 3)
| ((threads_l3 > 0
|| (threads_l2 > 0 && level == 2)) << 1));
while (count)
{
__cpuid_count (11, i++, eax, ebx, ecx, edx);
int shipped = ebx & 0xff;
int type = ecx & 0xff00;
if (shipped == 0 || type == 0)
break;
else if (type == 0x100)
{
/* Count SMT. */
if ((count & 0x1))
{
int count_mask;
/* Compute count mask. */
asm ("bsr %1, %0"
: "=r" (count_mask) : "g" (threads_l2));
count_mask = ~(-1 << (count_mask + 1));
threads_l2 = (shipped - 1) & count_mask;
count &= ~0x1;
}
}
else if (type == 0x200)
{
/* Count core. */
if ((count & (0x1 << 1)))
{
int count_mask;
int threads_core
= (level == 2 ? threads_l2 : threads_l3);
/* Compute count mask. */
asm ("bsr %1, %0"
: "=r" (count_mask) : "g" (threads_core));
count_mask = ~(-1 << (count_mask + 1));
threads_core = (shipped - 1) & count_mask;
if (level == 2)
threads_l2 = threads_core;
else
threads_l3 = threads_core;
count &= ~(0x1 << 1);
}
}
}
}
if (threads_l2 > 0)
threads_l2 += 1;
if (threads_l3 > 0)
threads_l3 += 1;
if (level == 2)
{
if (threads_l2)
{
threads = threads_l2;
if (cpu_features->basic.kind == arch_kind_intel
&& threads > 2
&& family == 6)
switch (model)
{
case 0x37:
case 0x4a:
case 0x4d:
case 0x5a:
case 0x5d:
/* Silvermont has L2 cache shared by 2 cores. */
threads = 2;
break;
default:
break;
}
}
}
else if (threads_l3)
threads = threads_l3;
}
else
{
intel_bug_no_cache_info:
/* Assume that all logical threads share the highest cache
level. */
threads
= ((cpu_features->features[COMMON_CPUID_INDEX_1].cpuid.ebx
>> 16) & 0xff);
}
/* Cap usage of highest cache level to the number of supported
threads. */
if (shared > 0 && threads > 0)
shared /= threads;
}
/* Account for non-inclusive L2 and L3 caches. */
if (!inclusive_cache)
{
if (threads_l2 > 0)
core /= threads_l2;
shared += core;
}
*shared_ptr = shared;
*threads_ptr = threads;
}
static void
dl_init_cacheinfo (struct cpu_features *cpu_features)
{
/* Find out what brand of processor. */
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
int max_cpuid_ex;
long int data = -1;
long int shared = -1;
long int core;
unsigned int threads = 0;
unsigned long int level1_icache_size = -1;
unsigned long int level1_dcache_size = -1;
unsigned long int level1_dcache_assoc = -1;
unsigned long int level1_dcache_linesize = -1;
unsigned long int level2_cache_size = -1;
unsigned long int level2_cache_assoc = -1;
unsigned long int level2_cache_linesize = -1;
unsigned long int level3_cache_size = -1;
unsigned long int level3_cache_assoc = -1;
unsigned long int level3_cache_linesize = -1;
unsigned long int level4_cache_size = -1;
if (cpu_features->basic.kind == arch_kind_intel)
{
data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
level1_icache_size
= handle_intel (_SC_LEVEL1_ICACHE_SIZE, cpu_features);
level1_dcache_size = data;
level1_dcache_assoc
= handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features);
level1_dcache_linesize
= handle_intel (_SC_LEVEL1_DCACHE_LINESIZE, cpu_features);
level2_cache_size = core;
level2_cache_assoc
= handle_intel (_SC_LEVEL2_CACHE_ASSOC, cpu_features);
level2_cache_linesize
= handle_intel (_SC_LEVEL2_CACHE_LINESIZE, cpu_features);
level3_cache_size = shared;
level3_cache_assoc
= handle_intel (_SC_LEVEL3_CACHE_ASSOC, cpu_features);
level3_cache_linesize
= handle_intel (_SC_LEVEL3_CACHE_LINESIZE, cpu_features);
level4_cache_size
= handle_intel (_SC_LEVEL4_CACHE_SIZE, cpu_features);
get_common_cache_info (&shared, &threads, core);
}
else if (cpu_features->basic.kind == arch_kind_zhaoxin)
{
data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
level1_icache_size = handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE);
level1_dcache_size = data;
level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC);
level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE);
level2_cache_size = core;
level2_cache_assoc = handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC);
level2_cache_linesize = handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE);
level3_cache_size = shared;
level3_cache_assoc = handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC);
level3_cache_linesize = handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE);
get_common_cache_info (&shared, &threads, core);
}
else if (cpu_features->basic.kind == arch_kind_amd)
{
data = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE);
level1_dcache_size = data;
level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC);
level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE);
level2_cache_size = core;
level2_cache_assoc = handle_amd (_SC_LEVEL2_CACHE_ASSOC);
level2_cache_linesize = handle_amd (_SC_LEVEL2_CACHE_LINESIZE);
level3_cache_size = shared;
level3_cache_assoc = handle_amd (_SC_LEVEL3_CACHE_ASSOC);
level3_cache_linesize = handle_amd (_SC_LEVEL3_CACHE_LINESIZE);
/* Get maximum extended function. */
__cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
if (shared <= 0)
/* No shared L3 cache. All we have is the L2 cache. */
shared = core;
else
{
/* Figure out the number of logical threads that share L3. */
if (max_cpuid_ex >= 0x80000008)
{
/* Get width of APIC ID. */
__cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
threads = 1 << ((ecx >> 12) & 0x0f);
}
if (threads == 0 || cpu_features->basic.family >= 0x17)
{
/* If APIC ID width is not available, use logical
processor count. */
__cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
if ((edx & (1 << 28)) != 0)
threads = (ebx >> 16) & 0xff;
}
/* Cap usage of highest cache level to the number of
supported threads. */
if (threads > 0)
shared /= threads;
/* Get shared cache per ccx for Zen architectures. */
if (cpu_features->basic.family >= 0x17)
{
unsigned int eax;
/* Get number of threads share the L3 cache in CCX. */
__cpuid_count (0x8000001D, 0x3, eax, ebx, ecx, edx);
unsigned int threads_per_ccx = ((eax >> 14) & 0xfff) + 1;
shared *= threads_per_ccx;
}
else
{
/* Account for exclusive L2 and L3 caches. */
shared += core;
}
}
}
cpu_features->level1_icache_size = level1_icache_size;
cpu_features->level1_dcache_size = level1_dcache_size;
cpu_features->level1_dcache_assoc = level1_dcache_assoc;
cpu_features->level1_dcache_linesize = level1_dcache_linesize;
cpu_features->level2_cache_size = level2_cache_size;
cpu_features->level2_cache_assoc = level2_cache_assoc;
cpu_features->level2_cache_linesize = level2_cache_linesize;
cpu_features->level3_cache_size = level3_cache_size;
cpu_features->level3_cache_assoc = level3_cache_assoc;
cpu_features->level3_cache_linesize = level3_cache_linesize;
cpu_features->level4_cache_size = level4_cache_size;
/* The default setting for the non_temporal threshold is 3/4 of one
thread's share of the chip's cache. For most Intel and AMD processors
with an initial release date between 2017 and 2020, a thread's typical
share of the cache is from 500 KBytes to 2 MBytes. Using the 3/4
threshold leaves 125 KBytes to 500 KBytes of the thread's data
in cache after a maximum temporal copy, which will maintain
in cache a reasonable portion of the thread's stack and other
active data. If the threshold is set higher than one thread's
share of the cache, it has a substantial risk of negatively
impacting the performance of other threads running on the chip. */
unsigned long int non_temporal_threshold = shared * 3 / 4;
#if HAVE_TUNABLES
/* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8. */
unsigned int minimum_rep_movsb_threshold;
#endif
/* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16). */
unsigned int rep_movsb_threshold;
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
&& !CPU_FEATURE_PREFERRED_P (cpu_features, Prefer_No_AVX512))
{
rep_movsb_threshold = 2048 * (64 / 16);
#if HAVE_TUNABLES
minimum_rep_movsb_threshold = 64 * 8;
#endif
}
else if (CPU_FEATURE_PREFERRED_P (cpu_features,
AVX_Fast_Unaligned_Load))
{
rep_movsb_threshold = 2048 * (32 / 16);
#if HAVE_TUNABLES
minimum_rep_movsb_threshold = 32 * 8;
#endif
}
else
{
rep_movsb_threshold = 2048 * (16 / 16);
#if HAVE_TUNABLES
minimum_rep_movsb_threshold = 16 * 8;
#endif
}
/* The default threshold to use Enhanced REP STOSB. */
unsigned long int rep_stosb_threshold = 2048;
#if HAVE_TUNABLES
long int tunable_size;
tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
/* NB: Ignore the default value 0. */
if (tunable_size != 0)
data = tunable_size;
tunable_size = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
/* NB: Ignore the default value 0. */
if (tunable_size != 0)
shared = tunable_size;
tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
/* NB: Ignore the default value 0. */
if (tunable_size != 0)
non_temporal_threshold = tunable_size;
tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
if (tunable_size > minimum_rep_movsb_threshold)
rep_movsb_threshold = tunable_size;
/* NB: The default value of the x86_rep_stosb_threshold tunable is the
same as the default value of __x86_rep_stosb_threshold and the
minimum value is fixed. */
rep_stosb_threshold = TUNABLE_GET (x86_rep_stosb_threshold,
long int, NULL);
TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, long int, data,
0, (long int) -1);
TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, long int, shared,
0, (long int) -1);
TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, long int,
non_temporal_threshold, 0, (long int) -1);
TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, long int,
rep_movsb_threshold,
minimum_rep_movsb_threshold, (long int) -1);
TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, long int,
rep_stosb_threshold, 1, (long int) -1);
#endif
cpu_features->data_cache_size = data;
cpu_features->shared_cache_size = shared;
cpu_features->non_temporal_threshold = non_temporal_threshold;
cpu_features->rep_movsb_threshold = rep_movsb_threshold;
cpu_features->rep_stosb_threshold = rep_stosb_threshold;
}

View File

@ -149,6 +149,28 @@ struct cpu_features
unsigned long int rep_movsb_threshold;
/* Threshold to use "rep stosb". */
unsigned long int rep_stosb_threshold;
/* _SC_LEVEL1_ICACHE_SIZE. */
unsigned long int level1_icache_size;
/* _SC_LEVEL1_DCACHE_SIZE. */
unsigned long int level1_dcache_size;
/* _SC_LEVEL1_DCACHE_ASSOC. */
unsigned long int level1_dcache_assoc;
/* _SC_LEVEL1_DCACHE_LINESIZE. */
unsigned long int level1_dcache_linesize;
/* _SC_LEVEL2_CACHE_ASSOC. */
unsigned long int level2_cache_size;
/* _SC_LEVEL2_DCACHE_ASSOC. */
unsigned long int level2_cache_assoc;
/* _SC_LEVEL2_CACHE_LINESIZE. */
unsigned long int level2_cache_linesize;
/* /_SC_LEVEL3_CACHE_SIZE. */
unsigned long int level3_cache_size;
/* _SC_LEVEL3_CACHE_ASSOC. */
unsigned long int level3_cache_assoc;
/* _SC_LEVEL3_CACHE_LINESIZE. */
unsigned long int level3_cache_linesize;
/* /_SC_LEVEL4_CACHE_SIZE. */
unsigned long int level4_cache_size;
};
#if defined (_LIBC) && !IS_IN (nonlib)