Blob Blame Raw
commit 285994a62c80f1d72c6924282bcb59608098d5ec
Author: Catalin Marinas <>
Date:   Wed Mar 11 12:20:39 2015 +0000

    arm64: Invalidate the TLB corresponding to intermediate page table levels
    The ARM architecture allows the caching of intermediate page table
    levels and page table freeing requires a sequence like:
    	TLB invalidation
    	pte page freeing
    With commit 5e5f6dc10546 (arm64: mm: enable HAVE_RCU_TABLE_FREE logic),
    the page table freeing batching was moved from tlb_remove_page() to
    tlb_remove_table(). The former takes care of TLB invalidation as this is
    also shared with pte clearing and page cache page freeing. The latter,
    however, does not invalidate the TLBs for intermediate page table levels
    as it probably relies on the architecture code to do it if required.
    When the mm->mm_users < 2, tlb_remove_table() does not do any batching
    and page table pages are freed before tlb_finish_mmu() which performs
    the actual TLB invalidation.
    This patch introduces __tlb_flush_pgtable() for arm64 and calls it from
    the {pte,pmd,pud}_free_tlb() directly without relying on deferred page
    table freeing.
    Fixes: 5e5f6dc10546 arm64: mm: enable HAVE_RCU_TABLE_FREE logic
    Reported-by: Jon Masters <>
    Tested-by: Jon Masters <>
    Tested-by: Steve Capper <>
    Signed-off-by: Catalin Marinas <>

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c028fe3..53d9c35 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 				  unsigned long addr)
+	__flush_tlb_pgtable(tlb->mm, addr);
 	tlb_remove_entry(tlb, pte);
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 				  unsigned long addr)
+	__flush_tlb_pgtable(tlb->mm, addr);
 	tlb_remove_entry(tlb, virt_to_page(pmdp));
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 				  unsigned long addr)
+	__flush_tlb_pgtable(tlb->mm, addr);
 	tlb_remove_entry(tlb, virt_to_page(pudp));
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 4abe9b9..c3bb05b 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+static inline void __flush_tlb_pgtable(struct mm_struct *mm,
+				       unsigned long uaddr)
+	unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+	dsb(ishst);
+	asm("tlbi	vae1is, %0" : : "r" (addr));
+	dsb(ish);
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
 static inline void update_mmu_cache(struct vm_area_struct *vma,