From e989cc0f83949acf9127b83c6a3efc9f797fa78c Mon Sep 17 00:00:00 2001 From: zhangjian Date: Thu, 6 Nov 2025 22:33:56 +0800 Subject: [PATCH] add back delete patch Signed-off-by: zhangjian (cherry picked from commit 58666705112203fab0b35385d2b1a27da7aa9819) --- glibc.spec | 6 +++- strcmp-delete-align-for-loop_aligned.patch | 32 ++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 strcmp-delete-align-for-loop_aligned.patch diff --git a/glibc.spec b/glibc.spec index e58260f1..5b986f17 100644 --- a/glibc.spec +++ b/glibc.spec @@ -67,7 +67,7 @@ ############################################################################## Name: glibc Version: 2.38 -Release: 81 +Release: 82 Summary: The GNU libc libraries License: %{all_license} URL: http://www.gnu.org/software/glibc/ @@ -418,6 +418,7 @@ Patch9038: backport-Fix-UB-on__dl_map_object_from_fd.patch Patch9039: backport-Fix-handling-of-symbol-versions-which-hash-to-zero.patch Patch9040: 0024-Sw64-Change-libdir-from-lib-to-lib64.patch Patch9041: AArch64-modify_the_SVE_memcpy_implementation_for_32-byte_aligned_access.patch +Patch9042: strcmp-delete-align-for-loop_aligned.patch Provides: ldconfig rtld(GNU_HASH) bundled(gnulib) @@ -1603,6 +1604,9 @@ fi %endif %changelog +* Thu Nov 6 2025 zhangjian - 2.38-82 +- delete align for align_loop + * Wed Nov 19 2025 Qingqing Li - 2.38-81 - AArch64: Optimise SVE scalar callbacks diff --git a/strcmp-delete-align-for-loop_aligned.patch b/strcmp-delete-align-for-loop_aligned.patch new file mode 100644 index 00000000..cf5b15a2 --- /dev/null +++ b/strcmp-delete-align-for-loop_aligned.patch @@ -0,0 +1,32 @@ +From 9bbffed83b93f633b272368fc536a4f24e9942e6 Mon Sep 17 00:00:00 2001 +From: Yang Yanchao +Date: Mon, 21 Feb 2022 14:25:25 +0800 +Subject: [PATCH] strcmp: delete align for loop_aligned + +In Kunpeng-920, the performance of strcmp deteriorates only +when the 16 to 23 characters are different.Or the string is +only 16-23 characters.That shows 2 misses per iteration which +means this is a branch predictor issue indeed. +In the preceding scenario, strcmp performance is 300% worse than expected. + +Fortunately, this problem can be solved by modifying the alignment of the functions. +--- + sysdeps/aarch64/strcmp.S | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/sysdeps/aarch64/strcmp.S b/sysdeps/aarch64/strcmp.S +index f225d718..7a048b66 100644 +--- a/sysdeps/aarch64/strcmp.S ++++ b/sysdeps/aarch64/strcmp.S +@@ -71,8 +71,6 @@ ENTRY(strcmp) + b.ne L(misaligned8) + cbnz tmp, L(mutual_align) + +- .p2align 4 +- + L(loop_aligned): + ldr data2, [src1, off2] + ldr data1, [src1], 8 +-- +2.33.0 + -- Gitee