From c4f635c8c90edc2f536b19e3f27ba0de1402bd65 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Sat, 3 Jun 2023 11:58:55 +0800 Subject: [PATCH 1/3] mm/highmem: Take kmap_high_get() properly into account commit 7ad48f40fea82e1aa4e4a40772b5cd7144413827 upstream. kunmap_local() warns when the virtual address to unmap is below PAGE_OFFSET. This is correct except for the case that the mapping was obtained via kmap_high_get() because the PKMAP addresses are right below PAGE_OFFSET. Cure it by skipping the WARN_ON() when the unmap was handled by kunmap_high(). Fixes: 298fa1ad5571 ("highmem: Provide generic variant of kmap_atomic*") Reported-by: vtolkm@googlemail.com Reported-by: Marek Szyprowski Signed-off-by: Thomas Gleixner Tested-by: Marek Szyprowski Tested-by: Sebastian Andrzej Siewior Cc: Andrew Morton Link: https://lore.kernel.org/r/87y2j6n8mj.fsf@nanos.tec.linutronix.de Signed-off-by: Sebastian Andrzej Siewior --- mm/highmem.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/mm/highmem.c b/mm/highmem.c index 8db577e5290c..72b9a2d95c72 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -422,12 +422,15 @@ static inline void *arch_kmap_local_high_get(struct page *page) #endif /* Unmap a local mapping which was obtained by kmap_high_get() */ -static inline void kmap_high_unmap_local(unsigned long vaddr) +static inline bool kmap_high_unmap_local(unsigned long vaddr) { #ifdef ARCH_NEEDS_KMAP_HIGH_GET - if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) + if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + return true; + } #endif + return false; } static inline int kmap_local_calc_idx(int idx) @@ -493,10 +496,14 @@ void kunmap_local_indexed(void *vaddr) if (addr < __fix_to_virt(FIX_KMAP_END) || addr > __fix_to_virt(FIX_KMAP_BEGIN)) { - WARN_ON_ONCE(addr < PAGE_OFFSET); - - /* Handle mappings which were obtained by kmap_high_get() */ - kmap_high_unmap_local(addr); + /* + * Handle mappings which were obtained by kmap_high_get() + * first as the virtual address of such mappings is below + * PAGE_OFFSET. Warn for all other addresses which are in + * the user space part of the virtual address space. + */ + if (!kmap_high_unmap_local(addr)) + WARN_ON_ONCE(addr < PAGE_OFFSET); return; } -- Gitee From 3e7efff90cbd14654967aba6860f3bd6a499a912 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Sat, 3 Jun 2023 11:59:26 +0800 Subject: [PATCH 2/3] highmem: Don't disable preemption on RT in kmap_atomic() commit 72fbdc6e61846fd27233f93659b72469e702c7a5 upstream. Disabling preemption makes it impossible to acquire sleeping locks within kmap_atomic() section. For PREEMPT_RT it is sufficient to disable migration. Signed-off-by: Sebastian Andrzej Siewior --- include/linux/highmem-internal.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index bd15bf9164c2..f9bc6acd3679 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -90,7 +90,10 @@ static inline void __kunmap_local(void *vaddr) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); pagefault_disable(); return __kmap_local_page_prot(page, kmap_prot); } @@ -99,7 +102,10 @@ static inline void __kunmap_atomic(void *addr) { kunmap_local_indexed(addr); pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } unsigned int __nr_free_highpages(void); @@ -172,7 +178,10 @@ static inline void __kunmap_local(void *addr) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); pagefault_disable(); return page_address(page); } @@ -183,7 +192,10 @@ static inline void __kunmap_atomic(void *addr) kunmap_flush_on_unmap(addr); #endif pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } static inline unsigned int nr_free_highpages(void) { return 0; } -- Gitee From 5f8ef05d03316ad2c99105433cf4f657d16a4639 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Sat, 3 Jun 2023 12:00:02 +0800 Subject: [PATCH 3/3] lib/test_lockup: Minimum fix to get it compiled on PREEMPT_RT commit 5c5969e3fe5edf3c933a262648ebfbd9fab17b76 upstream. On PREEMPT_RT the locks are quite different so they can't be tested as it is done below. The alternative is test for the waitlock within rtmutex. This is the bare minim to get it compiled. Problems which exists on PREEMP_RT: - none of the locks (spinlock_t, rwlock_t, mutex_t, rw_semaphore) may be acquired with disabled preemption or interrupts. If I read the code correct the it is possible to acquire a mutex with disabled interrupts. I don't know how to obtain a lock pointer. Technically they are not exported to userland. - memory can not be allocated with disabled premption or interrupts even with GFP_ATOMIC. Signed-off-by: Sebastian Andrzej Siewior --- lib/test_lockup.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/test_lockup.c b/lib/test_lockup.c index 78a630bbd03d..d27a80502204 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -485,6 +485,21 @@ static int __init test_lockup_init(void) return -EINVAL; #ifdef CONFIG_DEBUG_SPINLOCK +#ifdef CONFIG_PREEMPT_RT + if (test_magic(lock_spinlock_ptr, + offsetof(spinlock_t, lock.wait_lock.magic), + SPINLOCK_MAGIC) || + test_magic(lock_rwlock_ptr, + offsetof(rwlock_t, rtmutex.wait_lock.magic), + SPINLOCK_MAGIC) || + test_magic(lock_mutex_ptr, + offsetof(struct mutex, lock.wait_lock.magic), + SPINLOCK_MAGIC) || + test_magic(lock_rwsem_ptr, + offsetof(struct rw_semaphore, rtmutex.wait_lock.magic), + SPINLOCK_MAGIC)) + return -EINVAL; +#else if (test_magic(lock_spinlock_ptr, offsetof(spinlock_t, rlock.magic), SPINLOCK_MAGIC) || @@ -498,6 +513,7 @@ static int __init test_lockup_init(void) offsetof(struct rw_semaphore, wait_lock.magic), SPINLOCK_MAGIC)) return -EINVAL; +#endif #endif if ((wait_state != TASK_RUNNING || -- Gitee