diff --git a/Documentation/driver-api/io-mapping.rst b/Documentation/driver-api/io-mapping.rst index a966239f04e489c58d2fc07f9f10922cf7f2010b..e33b88268554bf68f71227dc9a27edc856d3e5ee 100644 --- a/Documentation/driver-api/io-mapping.rst +++ b/Documentation/driver-api/io-mapping.rst @@ -73,25 +73,3 @@ for pages mapped with io_mapping_map_wc. At driver close time, the io_mapping object must be freed:: void io_mapping_free(struct io_mapping *mapping) - -Current Implementation -====================== - -The initial implementation of these functions uses existing mapping -mechanisms and so provides only an abstraction layer and no new -functionality. - -On 64-bit processors, io_mapping_create_wc calls ioremap_wc for the whole -range, creating a permanent kernel-visible mapping to the resource. The -map_atomic and map functions add the requested offset to the base of the -virtual address returned by ioremap_wc. - -On 32-bit processors with HIGHMEM defined, io_mapping_map_atomic_wc uses -kmap_atomic_pfn to map the specified page in an atomic fashion; -kmap_atomic_pfn isn't really supposed to be used with device pages, but it -provides an efficient mapping for this usage. - -On 32-bit processors without HIGHMEM defined, io_mapping_map_atomic_wc and -io_mapping_map_wc both use ioremap_wc, a terribly inefficient function which -performs an IPI to inform all processors about the new mapping. This results -in a significant performance penalty. diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h deleted file mode 100644 index 651714b45729a1901d8418df9f19c37ab6e19124..0000000000000000000000000000000000000000 --- a/arch/alpha/include/asm/kmap_types.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H - -/* Dummy header just to define km_type. */ - -#ifdef CONFIG_DEBUG_HIGHMEM -#define __WITH_KM_FENCE -#endif - -#include - -#undef __WITH_KM_FENCE - -#endif diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h deleted file mode 100644 index 5c268cf7c2bd5c55b84451e45d9871a977ef75aa..0000000000000000000000000000000000000000 --- a/arch/ia64/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_IA64_KMAP_TYPES_H -#define _ASM_IA64_KMAP_TYPES_H - -#ifdef CONFIG_DEBUG_HIGHMEM -#define __WITH_KM_FENCE -#endif - -#include - -#undef __WITH_KM_FENCE - -#endif /* _ASM_IA64_KMAP_TYPES_H */ diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 5e88c351e6a45a52847c07936c4f8528eef87571..f3fa02b8838af34ccd441872501d995911bc2da0 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index a978590d802d0c3b90ae8a23b0d443fda2c0da17..5aed97a18bac934dd7e0abfb1e9b5d44c99dcfec 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h deleted file mode 100644 index 3e70b5cd112317e741450b73c759c7ef81afe7ba..0000000000000000000000000000000000000000 --- a/arch/parisc/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H - -#ifdef CONFIG_DEBUG_HIGHMEM -#define __WITH_KM_FENCE -#endif - -#include - -#undef __WITH_KM_FENCE - -#endif diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h index 2c697a145ac1b3a9419b155f210ee4d6f1550479..2efac582718808201bbe54f732be42d0d97873cd 100644 --- a/arch/um/include/asm/fixmap.h +++ b/arch/um/include/asm/fixmap.h @@ -3,7 +3,6 @@ #define __UM_FIXMAP_H #include -#include #include #include #include diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h deleted file mode 100644 index b0bd12de1d23cf47b8ae33c20990d0bcc7037ce6..0000000000000000000000000000000000000000 --- a/arch/um/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) - */ - -#ifndef __UM_KMAP_TYPES_H -#define __UM_KMAP_TYPES_H - -/* No more #include "asm/arch/kmap_types.h" ! */ - -#define KM_TYPE_NR 14 - -#endif diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index 0be7a30fd6bc1601f0e0dd8f3b2f708ce148efaa..e2de092fc38cb5cc633061a9fd44972a72d46ae7 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -13,14 +13,7 @@ #include #include -void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot); - -static inline void iounmap_atomic(void __iomem *vaddr) -{ - kunmap_local_indexed((void __force *)vaddr); - pagefault_enable(); - preempt_enable(); -} +void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot); int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index e0a40d7cc66c806b2d342af4db8e91c98abffba4..9aaa756ddf21942dbc62e5956eb74883e7358ba3 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size) } EXPORT_SYMBOL_GPL(iomap_free); -void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot) +void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in @@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot) /* Filter out unsupported __PAGE_KERNEL* bits: */ pgprot_val(prot) &= __default_kernel_pte_mask; - preempt_disable(); - pagefault_disable(); return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot); } -EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot); +EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot); diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 3114a6da7e56136b863a2f11c9ed4ed44ba40fe9..267f6dfb8960b0c2131c6f91ee1bc482f8fb3993 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -30,7 +30,6 @@ mandatory-y += irq.h mandatory-y += irq_regs.h mandatory-y += irq_work.h mandatory-y += kdebug.h -mandatory-y += kmap_types.h mandatory-y += kmap_size.h mandatory-y += kprobes.h mandatory-y += linkage.h diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h deleted file mode 100644 index 9f95b7b63d192817c2a94cc8f6a18d14a7550446..0000000000000000000000000000000000000000 --- a/include/asm-generic/kmap_types.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_KMAP_TYPES_H -#define _ASM_GENERIC_KMAP_TYPES_H - -#ifdef __WITH_KM_FENCE -# define KM_TYPE_NR 41 -#else -# define KM_TYPE_NR 20 -#endif - -#endif diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h new file mode 100644 index 0000000000000000000000000000000000000000..6ceed907b14e2de9fc5d130a7884535677d39e44 --- /dev/null +++ b/include/linux/highmem-internal.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HIGHMEM_INTERNAL_H +#define _LINUX_HIGHMEM_INTERNAL_H + +/* + * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. + */ +#ifdef CONFIG_KMAP_LOCAL +void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); +void *__kmap_local_page_prot(struct page *page, pgprot_t prot); +void kunmap_local_indexed(void *vaddr); +#endif + +#ifdef CONFIG_HIGHMEM +#include + +#ifndef ARCH_HAS_KMAP_FLUSH_TLB +static inline void kmap_flush_tlb(unsigned long addr) { } +#endif + +#ifndef kmap_prot +#define kmap_prot PAGE_KERNEL +#endif + +void *kmap_high(struct page *page); +void kunmap_high(struct page *page); +void __kmap_flush_unused(void); +struct page *__kmap_to_page(void *addr); + +static inline void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + addr = page_address(page); + else + addr = kmap_high(page); + kmap_flush_tlb((unsigned long)addr); + return addr; +} + +static inline void kunmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline struct page *kmap_to_page(void *addr) +{ + return __kmap_to_page(addr); +} + +static inline void kmap_flush_unused(void) +{ + __kmap_flush_unused(); +} + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_pfn_prot(pfn, kmap_prot); +} + +static inline void __kunmap_atomic(void *addr) +{ + kunmap_local_indexed(addr); + pagefault_enable(); + preempt_enable(); +} + +unsigned int __nr_free_highpages(void); +extern atomic_long_t _totalhigh_pages; + +static inline unsigned int nr_free_highpages(void) +{ + return __nr_free_highpages(); +} + +static inline unsigned long totalhigh_pages(void) +{ + return (unsigned long)atomic_long_read(&_totalhigh_pages); +} + +static inline void totalhigh_pages_inc(void) +{ + atomic_long_inc(&_totalhigh_pages); +} + +static inline void totalhigh_pages_add(long count) +{ + atomic_long_add(count, &_totalhigh_pages); +} + +#else /* CONFIG_HIGHMEM */ + +static inline struct page *kmap_to_page(void *addr) +{ + return virt_to_page(addr); +} + +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +static inline void kunmap_high(struct page *page) { } +static inline void kmap_flush_unused(void) { } + +static inline void kunmap(struct page *page) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(page_address(page)); +#endif +} + +static inline void *kmap_atomic(struct page *page) +{ + preempt_disable(); + pagefault_disable(); + return page_address(page); +} + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + return kmap_atomic(page); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + return kmap_atomic(pfn_to_page(pfn)); +} + +static inline void __kunmap_atomic(void *addr) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif + pagefault_enable(); + preempt_enable(); +} + +static inline unsigned int nr_free_highpages(void) { return 0; } +static inline unsigned long totalhigh_pages(void) { return 0UL; } + +#endif /* CONFIG_HIGHMEM */ + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic(__addr) \ +do { \ + BUILD_BUG_ON(__same_type((__addr), struct page *)); \ + __kunmap_atomic(__addr); \ +} while (0) + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 99054839d1ee3220baea5357f0808c2200603699..0660a2e5ae968ed5da829a2c321991bccc2db16c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,256 +11,125 @@ #include -#ifndef ARCH_HAS_FLUSH_ANON_PAGE -static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) -{ -} -#endif - -#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} -static inline void flush_kernel_vmap_range(void *vaddr, int size) -{ -} -static inline void invalidate_kernel_vmap_range(void *vaddr, int size) -{ -} -#endif +#include "highmem-internal.h" -#include - -/* - * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. +/** + * kmap - Map a page for long term usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Can only be invoked from preemptible task context because on 32bit + * systems with CONFIG_HIGHMEM enabled this function might sleep. + * + * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area + * this returns the virtual address of the direct kernel mapping. + * + * The returned virtual address is globally visible and valid up to the + * point where it is unmapped via kunmap(). The pointer can be handed to + * other contexts. + * + * For highmem pages on 32bit systems this can be slow as the mapping space + * is limited and protected by a global lock. In case that there is no + * mapping slot available the function blocks until a slot is released via + * kunmap(). */ -#ifdef CONFIG_KMAP_LOCAL -void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); -void *__kmap_local_page_prot(struct page *page, pgprot_t prot); -void kunmap_local_indexed(void *vaddr); -#endif +static inline void *kmap(struct page *page); -#ifdef CONFIG_HIGHMEM -#include - -#ifndef ARCH_HAS_KMAP_FLUSH_TLB -static inline void kmap_flush_tlb(unsigned long addr) { } -#endif - -#ifndef kmap_prot -#define kmap_prot PAGE_KERNEL -#endif - -void *kmap_high(struct page *page); -static inline void *kmap(struct page *page) -{ - void *addr; - - might_sleep(); - if (!PageHighMem(page)) - addr = page_address(page); - else - addr = kmap_high(page); - kmap_flush_tlb((unsigned long)addr); - return addr; -} +/** + * kunmap - Unmap the virtual address mapped by kmap() + * @addr: Virtual address to be unmapped + * + * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of + * pages in the low memory area. + */ +static inline void kunmap(struct page *page); -void kunmap_high(struct page *page); +/** + * kmap_to_page - Get the page for a kmap'ed address + * @addr: The address to look up + * + * Returns: The page which is mapped to @addr. + */ +static inline struct page *kmap_to_page(void *addr); -static inline void kunmap(struct page *page) -{ - might_sleep(); - if (!PageHighMem(page)) - return; - kunmap_high(page); -} +/** + * kmap_flush_unused - Flush all unused kmap mappings in order to + * remove stray mappings + */ +static inline void kmap_flush_unused(void); -/* - * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because - * no global lock is needed and because the kmap code must perform a global TLB - * invalidation when the kmap pool wraps. +/** + * kmap_atomic - Atomically map a page for temporary usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Side effect: On return pagefaults and preemption are disabled. + * + * Can be invoked from any context. + * + * Requires careful handling when nesting multiple mappings because the map + * management is stack based. The unmap has to be in the reverse order of + * the map operation: + * + * addr1 = kmap_atomic(page1); + * addr2 = kmap_atomic(page2); + * ... + * kunmap_atomic(addr2); + * kunmap_atomic(addr1); + * + * Unmapping addr1 before addr2 is invalid and causes malfunction. * - * However when holding an atomic kmap it is not legal to sleep, so atomic - * kmaps are appropriate for short, tight code paths only. + * Contrary to kmap() mappings the mapping is only valid in the context of + * the caller and cannot be handed to other contexts. * - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the + * virtual address of the direct mapping. Only real highmem pages are + * temporarily mapped. + * + * While it is significantly faster than kmap() it comes with restrictions + * about the pointer validity and the side effects of disabling page faults + * and preemption. Use it only when absolutely necessary, e.g. from non + * preemptible contexts. */ +static inline void *kmap_atomic(struct page *page); -#ifndef CONFIG_KMAP_LOCAL -void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); -void kunmap_atomic_high(void *kvaddr); - -static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - return kmap_atomic_high_prot(page, prot); -} - -static inline void __kunmap_atomic(void *vaddr) -{ - kunmap_atomic_high(vaddr); -} -#else /* !CONFIG_KMAP_LOCAL */ - -static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - preempt_disable(); - pagefault_disable(); - return __kmap_local_page_prot(page, prot); -} - -static inline void *kmap_atomic_pfn(unsigned long pfn) -{ - preempt_disable(); - pagefault_disable(); - return __kmap_local_pfn_prot(pfn, kmap_prot); -} - -static inline void __kunmap_atomic(void *addr) -{ - kunmap_local_indexed(addr); -} - -#endif /* CONFIG_KMAP_LOCAL */ - -static inline void *kmap_atomic(struct page *page) -{ - return kmap_atomic_prot(page, kmap_prot); -} - -/* declarations for linux/mm/highmem.c */ -unsigned int nr_free_highpages(void); -extern atomic_long_t _totalhigh_pages; -static inline unsigned long totalhigh_pages(void) -{ - return (unsigned long)atomic_long_read(&_totalhigh_pages); -} - -static inline void totalhigh_pages_inc(void) -{ - atomic_long_inc(&_totalhigh_pages); -} - -static inline void totalhigh_pages_add(long count) -{ - atomic_long_add(count, &_totalhigh_pages); -} - -void kmap_flush_unused(void); - -struct page *kmap_to_page(void *addr); - -#else /* CONFIG_HIGHMEM */ - -static inline unsigned int nr_free_highpages(void) { return 0; } - -static inline struct page *kmap_to_page(void *addr) -{ - return virt_to_page(addr); -} - -static inline unsigned long totalhigh_pages(void) { return 0UL; } - -static inline void *kmap(struct page *page) -{ - might_sleep(); - return page_address(page); -} - -static inline void kunmap_high(struct page *page) -{ -} - -static inline void kunmap(struct page *page) -{ -#ifdef ARCH_HAS_FLUSH_ON_KUNMAP - kunmap_flush_on_unmap(page_address(page)); -#endif -} - -static inline void *kmap_atomic(struct page *page) -{ - preempt_disable(); - pagefault_disable(); - return page_address(page); -} +/** + * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() + * @addr: Virtual address to be unmapped + * + * Counterpart to kmap_atomic(). + * + * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and + * preemption. + * + * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages + * in the low memory area. For real highmen pages the mapping which was + * established with kmap_atomic() is destroyed. + */ -static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - return kmap_atomic(page); -} +/* Highmem related interfaces for management code */ +static inline unsigned int nr_free_highpages(void); +static inline unsigned long totalhigh_pages(void); -static inline void *kmap_atomic_pfn(unsigned long pfn) +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { - return kmap_atomic(pfn_to_page(pfn)); } - -static inline void __kunmap_atomic(void *addr) -{ - /* - * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() - * handles re-enabling faults and preemption - */ -#ifdef ARCH_HAS_FLUSH_ON_KUNMAP - kunmap_flush_on_unmap(addr); #endif -} - -#define kmap_flush_unused() do {} while(0) - -#endif /* CONFIG_HIGHMEM */ -#if !defined(CONFIG_KMAP_LOCAL) -#if defined(CONFIG_HIGHMEM) - -DECLARE_PER_CPU(int, __kmap_atomic_idx); - -static inline int kmap_atomic_idx_push(void) +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) { - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - -#ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); -#endif - return idx; } - -static inline int kmap_atomic_idx(void) +static inline void flush_kernel_vmap_range(void *vaddr, int size) { - return __this_cpu_read(__kmap_atomic_idx) - 1; } - -static inline void kmap_atomic_idx_pop(void) +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { -#ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); -#else - __this_cpu_dec(__kmap_atomic_idx); -#endif } #endif -#endif - -/* - * Prevent people trying to call kunmap_atomic() as if it were kunmap() - * kunmap_atomic() should get the return value of kmap_atomic, not the page. - */ -#define kunmap_atomic(__addr) \ -do { \ - BUILD_BUG_ON(__same_type((__addr), struct page *)); \ - __kunmap_atomic(__addr); \ - pagefault_enable(); \ - preempt_enable(); \ -} while (0) /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 3b0940be72e96bc03b73a77d86ff6984cf0c8e9f..60e7c83e49047fb3687828aacbaa194c25820e66 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); + preempt_disable(); + pagefault_disable(); + return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { - iounmap_atomic(vaddr); + kunmap_local_indexed((void __force *)vaddr); + pagefault_enable(); + preempt_enable(); } static inline void __iomem * diff --git a/mm/highmem.c b/mm/highmem.c index 77677c6844f73aad498a4a6da42dd8ee4caa6d29..54bd233846c9009e105636c83c0cfccf5f8f44a5 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -31,12 +31,6 @@ #include #include -#ifndef CONFIG_KMAP_LOCAL -#ifdef CONFIG_HIGHMEM -DEFINE_PER_CPU(int, __kmap_atomic_idx); -#endif -#endif - /* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped @@ -110,7 +104,7 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) atomic_long_t _totalhigh_pages __read_mostly; EXPORT_SYMBOL(_totalhigh_pages); -unsigned int nr_free_highpages (void) +unsigned int __nr_free_highpages (void) { struct zone *zone; unsigned int pages = 0; @@ -147,7 +141,7 @@ pte_t * pkmap_page_table; do { spin_unlock(&kmap_lock); (void)(flags); } while (0) #endif -struct page *kmap_to_page(void *vaddr) +struct page *__kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; @@ -158,7 +152,7 @@ struct page *kmap_to_page(void *vaddr) return virt_to_page(addr); } -EXPORT_SYMBOL(kmap_to_page); +EXPORT_SYMBOL(__kmap_to_page); static void flush_all_zero_pkmaps(void) { @@ -200,10 +194,7 @@ static void flush_all_zero_pkmaps(void) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } -/** - * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings - */ -void kmap_flush_unused(void) +void __kmap_flush_unused(void) { lock_kmap(); flush_all_zero_pkmaps(); @@ -410,6 +401,7 @@ static inline void kmap_local_idx_pop(void) #ifndef arch_kmap_local_post_map # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) #endif + #ifndef arch_kmap_local_pre_unmap # define arch_kmap_local_pre_unmap(vaddr) do { } while (0) #endif