From 06d79808f6faf6025c5a7d4e27d949a8216275cc Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Fri, 28 Apr 2017 10:04:15 -0700 Subject: x86: Use AVX2 memcpy/memset on Skylake server [BZ #21396] On Skylake server, AVX512 load/store instructions in memcpy/memset may lead to lower CPU turbo frequency in certain situations. Use of AVX2 in memcpy/memset has been observed to have improved overall performance in many workloads due to the higher frequency. Since AVX512ER is unique to Xeon Phi, this patch sets Prefer_No_AVX512 if AVX512ER isn't available so that AVX2 versions of memcpy/memset are used on Skylake server. [BZ #21396] * sysdeps/x86/cpu-features.c (init_cpu_features): Set Prefer_No_AVX512 if AVX512ER isn't available. * sysdeps/x86/cpu-features.h (bit_arch_Prefer_No_AVX512): New. (index_arch_Prefer_No_AVX512): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Don't use AVX512 version if Prefer_No_AVX512 is set. * sysdeps/x86_64/multiarch/memcpy_chk.S (__memcpy_chk): Likewise. * sysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Likewise. * sysdeps/x86_64/multiarch/memmove_chk.S (__memmove_chk): Likewise. * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Likewise. * sysdeps/x86_64/multiarch/mempcpy_chk.S (__mempcpy_chk): Likewise. * sysdeps/x86_64/multiarch/memset.S (memset): Likewise. * sysdeps/x86_64/multiarch/memset_chk.S (__memset_chk): Likewise. (cherry picked from commit 4cb334c4d6249686653137ec273d081371b3672d) diff --git a/ChangeLog b/ChangeLog index dc49c78b8c..adebc03b78 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,26 @@ 2017-04-28 H.J. Lu + [BZ #21396] + * sysdeps/x86/cpu-features.c (init_cpu_features): Set + Prefer_No_AVX512 if AVX512ER isn't available. + * sysdeps/x86/cpu-features.h (bit_arch_Prefer_No_AVX512): New. + (index_arch_Prefer_No_AVX512): Likewise. + * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Don't use + AVX512 version if Prefer_No_AVX512 is set. + * sysdeps/x86_64/multiarch/memcpy_chk.S (__memcpy_chk): + Likewise. + * sysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Likewise. + * sysdeps/x86_64/multiarch/memmove_chk.S (__memmove_chk): + Likewise. + * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Likewise. + * sysdeps/x86_64/multiarch/mempcpy_chk.S (__mempcpy_chk): + Likewise. + * sysdeps/x86_64/multiarch/memset.S (memset): Likewise. + * sysdeps/x86_64/multiarch/memset_chk.S (__memset_chk): + Likewise. + +2017-04-28 H.J. Lu + * sysdeps/x86/cpu-features.c (init_cpu_features): Set Prefer_No_VZEROUPPER if AVX512ER is available. * sysdeps/x86/cpu-features.h diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index 41d0be2815..9afd74c42e 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -225,10 +225,14 @@ init_cpu_features (struct cpu_features *cpu_features) |= bit_arch_AVX_Fast_Unaligned_Load; /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER - if AVX512ER is available. */ + if AVX512ER is available. Don't use AVX512 to avoid lower CPU + frequency if AVX512ER isn't available. */ if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER)) cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] |= bit_arch_Prefer_No_VZEROUPPER; + else + cpu_features->feature[index_arch_Prefer_No_AVX512] + |= bit_arch_Prefer_No_AVX512; /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow. If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt. */ diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index 2ee8a0a350..a409db67d8 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -39,6 +39,7 @@ #define bit_arch_Prefer_ERMS (1 << 19) #define bit_arch_Use_dl_runtime_resolve_opt (1 << 20) #define bit_arch_Use_dl_runtime_resolve_slow (1 << 21) +#define bit_arch_Prefer_No_AVX512 (1 << 22) /* CPUID Feature flags. */ @@ -116,6 +117,7 @@ # define index_arch_Prefer_ERMS FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Prefer_No_AVX512 FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -298,6 +300,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_Prefer_ERMS FEATURE_INDEX_1 # define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1 # define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1 +# define index_arch_Prefer_No_AVX512 FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S index 1f83ee3e84..af2770397c 100644 --- a/sysdeps/x86_64/multiarch/memcpy.S +++ b/sysdeps/x86_64/multiarch/memcpy.S @@ -32,6 +32,8 @@ ENTRY(__new_memcpy) lea __memcpy_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memcpy_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memcpy_chk.S b/sysdeps/x86_64/multiarch/memcpy_chk.S index 54923420f1..8737fb9755 100644 --- a/sysdeps/x86_64/multiarch/memcpy_chk.S +++ b/sysdeps/x86_64/multiarch/memcpy_chk.S @@ -30,6 +30,8 @@ ENTRY(__memcpy_chk) .type __memcpy_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S index 2021bfc30c..8c534e83e0 100644 --- a/sysdeps/x86_64/multiarch/memmove.S +++ b/sysdeps/x86_64/multiarch/memmove.S @@ -30,6 +30,8 @@ ENTRY(__libc_memmove) lea __memmove_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memmove_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memmove_chk.S b/sysdeps/x86_64/multiarch/memmove_chk.S index 8a252adcae..7870dd0247 100644 --- a/sysdeps/x86_64/multiarch/memmove_chk.S +++ b/sysdeps/x86_64/multiarch/memmove_chk.S @@ -29,6 +29,8 @@ ENTRY(__memmove_chk) .type __memmove_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memmove_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S index 79c840d075..b8b2b28094 100644 --- a/sysdeps/x86_64/multiarch/mempcpy.S +++ b/sysdeps/x86_64/multiarch/mempcpy.S @@ -32,6 +32,8 @@ ENTRY(__mempcpy) lea __mempcpy_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __mempcpy_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/mempcpy_chk.S b/sysdeps/x86_64/multiarch/mempcpy_chk.S index 6927962e81..072b22c49f 100644 --- a/sysdeps/x86_64/multiarch/mempcpy_chk.S +++ b/sysdeps/x86_64/multiarch/mempcpy_chk.S @@ -30,6 +30,8 @@ ENTRY(__mempcpy_chk) .type __mempcpy_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __mempcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S index c958b2f49f..9d33118cf8 100644 --- a/sysdeps/x86_64/multiarch/memset.S +++ b/sysdeps/x86_64/multiarch/memset.S @@ -41,6 +41,8 @@ ENTRY(memset) jnz L(AVX512F) lea __memset_avx2_unaligned(%rip), %RAX_LP L(AVX512F): + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 2f HAS_ARCH_FEATURE (AVX512F_Usable) jz 2f lea __memset_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memset_chk.S b/sysdeps/x86_64/multiarch/memset_chk.S index 79eaa37bb6..7e08311cdf 100644 --- a/sysdeps/x86_64/multiarch/memset_chk.S +++ b/sysdeps/x86_64/multiarch/memset_chk.S @@ -38,6 +38,8 @@ ENTRY(__memset_chk) jnz L(AVX512F) lea __memset_chk_avx2_unaligned(%rip), %RAX_LP L(AVX512F): + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 2f HAS_ARCH_FEATURE (AVX512F_Usable) jz 2f lea __memset_chk_avx512_no_vzeroupper(%rip), %RAX_LP -- 2.13.1