diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 353ec955915ea19211c1e44ce9720f942ea0c35d..6fba755e5d6f9197f17bce482f88f89196a74f7f 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -15,6 +15,8 @@ #include #include #include +#define __GENKSYMS__ +#include /* * Raw TLBI operations. @@ -251,6 +253,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) asid = __TLBI_VADDR(0, ASID(mm)); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); dsb(ish); } @@ -263,6 +266,8 @@ static inline void __flush_tlb_page_nosync(struct mm_struct *mm, addr = __TLBI_VADDR(uaddr, ASID(mm)); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK, + (uaddr & PAGE_MASK) + PAGE_SIZE); } static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, @@ -324,6 +329,10 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, int num = 0; int scale = 0; unsigned long asid, addr, pages; + unsigned long ustart, uend; + + ustart = start; + uend = end; start = round_down(start, stride); end = round_up(end, stride); @@ -339,6 +348,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, (end - start) >= (MAX_TLBI_OPS * stride)) || pages >= MAX_TLBI_RANGE_PAGES) { flush_tlb_mm(vma->vm_mm); + /* + * We think it is necessary to obtain the accurate address in the callback. + * However, the address of flush_tlb_mm is from 0 to -1, so we have added + * the callback with the accurate address here + */ + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, ustart, uend); + dsb(ish); return; } @@ -395,6 +411,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, } scale++; } + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, ustart, uend); dsb(ish); } diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 857df3eea53f39a4dc95402ec6b36e2750bff74e..86e95ccd71a046dc2b3a175181a08db360b8bba1 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -204,6 +204,27 @@ struct mmu_notifier_ops { unsigned long start, unsigned long end); + + /* + * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB + * which shares page-tables with the CPU. The + * invalidate_range_start()/end() callbacks should not be implemented as + * arch_invalidate_secondary_tlbs() already catches the points in time when + * an external TLB needs to be flushed. + * + * This requires arch_invalidate_secondary_tlbs() to be called while + * holding the ptl spin-lock and therefore this callback is not allowed + * to sleep. + * + * This is called by architecture code whenever invalidating a TLB + * entry. It is assumed that any secondary TLB has the same rules for + * when invalidations are required. If this is not the case architecture + * code will need to call this explicitly when required for secondary + * TLB invalidation. + */ + void (*arch_invalidate_secondary_tlbs)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); + /* * These callbacks are used with the get/put interface to manage the * lifetime of the mmu_notifier memory. alloc_notifier() returns a new @@ -403,6 +424,8 @@ extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, bool only_end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); +extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end); extern bool mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range); @@ -502,6 +525,13 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, __mmu_notifier_invalidate_range(mm, start, end); } +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); +} + static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm) { mm->notifier_subscriptions = NULL; @@ -727,6 +757,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, { } +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm) { } diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 9165ca619c8cfdb282ac33db4b5d5e0246474d2c..44c0f37f9ac23a5ad50a53fd70af93dd87209371 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -621,6 +621,23 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm, srcu_read_unlock(&srcu, id); } +void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *subscription; + int id; + + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(subscription, + &mm->notifier_subscriptions->list, hlist, + srcu_read_lock_held(&srcu)) { + if (subscription->ops->arch_invalidate_secondary_tlbs) + subscription->ops->arch_invalidate_secondary_tlbs( + subscription, mm, start, end); + } + srcu_read_unlock(&srcu, id); +} + /* * Same as mmu_notifier_register but here the caller must hold the mmap_lock in * write mode. A NULL mn signals the notifier is being registered for itree