diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 87f568a94e558716c9ced5459803947bffd81adf..a45e49d5d857531fc22316cb96c2fbc84a9ffec8 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -16,6 +16,7 @@ __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " " #asm_op " %w[i], %[v]\n" \ : [v] "+Q" (v->counter) \ : [i] "r" (i)); \ @@ -41,6 +42,7 @@ __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " " #asm_op #mb " %w[i], %w[old], %[v]" \ : [v] "+Q" (v->counter), \ [old] "=r" (old) \ @@ -123,6 +125,7 @@ __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " " #asm_op " %[i], %[v]\n" \ : [v] "+Q" (v->counter) \ : [i] "r" (i)); \ @@ -148,6 +151,7 @@ __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " " #asm_op #mb " %[i], %[old], %[v]" \ : [v] "+Q" (v->counter), \ [old] "=r" (old) \ @@ -230,6 +234,7 @@ static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) asm volatile( __LSE_PREAMBLE + " prfm pstl1strm, %[v]\n" \ "1: ldr %x[tmp], %[v]\n" " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" @@ -253,6 +258,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ { \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \ : [v] "+Q" (*(u##sz *)ptr), \ [old] "+r" (old) \ @@ -295,6 +301,7 @@ __lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \ \ asm volatile( \ __LSE_PREAMBLE \ + " prfm pstl1strm, %[v]\n" \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(u128 *)ptr) \ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index bb842dab5d0efe3f5e8dfdcb2c45087e675dca12..c50388453f187e2fbe62b81cddf4f0652fca1436 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -32,8 +32,9 @@ static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ " cbnz %w1, 1b\n" \ " " #mb, \ /* LSE atomics */ \ + " prfm pstl1strm, %2\n" \ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ - __nops(3) \ + __nops(2) \ " " #nop_lse) \ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ : "r" (x) \