Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 37 additions & 26 deletions arch/arm64/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,11 @@
*/
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )

#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : "r" (arg))
"tlbi " #op ", %x0\n" \
: : "rZ" (arg))

#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)

Expand Down Expand Up @@ -181,6 +173,34 @@ static inline unsigned long get_trans_granule(void)
(__pages >> (5 * (scale) + 1)) - 1; \
})

#define __repeat_tlbi_sync(op, arg...) \
do { \
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) \
break; \
__tlbi(op, ##arg); \
dsb(ish); \
} while (0)

/*
* Complete broadcast TLB maintenance issued by the host which invalidates
* stage 1 information in the host's own translation regime.
*/
static inline void __tlbi_sync_s1ish(void)
{
dsb(ish);
__repeat_tlbi_sync(vale1is, 0);
}

/*
* Complete broadcast TLB maintenance issued by hyp code which invalidates
* stage 1 translation information in any translation regime.
*/
static inline void __tlbi_sync_s1ish_hyp(void)
{
dsb(ish);
__repeat_tlbi_sync(vale2is, 0);
}

/*
* TLB Invalidation
* ================
Expand Down Expand Up @@ -266,7 +286,7 @@ static inline void flush_tlb_all(void)
{
dsb(ishst);
__tlbi(vmalle1is);
dsb(ish);
__tlbi_sync_s1ish();
isb();
}

Expand All @@ -278,7 +298,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
dsb(ish);
__tlbi_sync_s1ish();
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}

Expand All @@ -305,20 +325,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr)
{
flush_tlb_page_nosync(vma, uaddr);
dsb(ish);
__tlbi_sync_s1ish();
}

static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
/*
* TLB flush deferral is not required on systems which are affected by
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
* will have two consecutive TLBI instructions with a dsb(ish) in between
* defeating the purpose (i.e save overall 'dsb ish' cost).
*/
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
return false;

return true;
}

Expand All @@ -334,7 +345,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
*/
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
dsb(ish);
__tlbi_sync_s1ish();
}

/*
Expand Down Expand Up @@ -469,7 +480,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
last_level, tlb_level);
dsb(ish);
__tlbi_sync_s1ish();
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
Expand Down Expand Up @@ -501,7 +512,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
dsb(ish);
__tlbi_sync_s1ish();
isb();
}

Expand All @@ -515,7 +526,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)

dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
__tlbi_sync_s1ish();
isb();
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/sys_compat.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
* We pick the reserved-ASID to minimise the impact.
*/
__tlbi(aside1is, __TLBI_VADDR(0, 0));
dsb(ish);
__tlbi_sync_s1ish();
}

ret = caches_clean_inval_user_pou(start, start + chunk);
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/nvhe/mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
*/
dsb(ishst);
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();
}

Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/kvm/hyp/nvhe/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
*/
dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand Down Expand Up @@ -226,7 +226,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,

dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand All @@ -240,7 +240,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
enter_vmid_context(mmu, &cxt, false);

__tlbi(vmalls12e1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand All @@ -266,5 +266,5 @@ void __kvm_flush_vm_context(void)
/* Same remark as in enter_vmid_context() */
dsb(ish);
__tlbi(alle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
}
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
*unmapped += granule;
}

dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();
mm_ops->put_page(ctx->ptep);

Expand Down
10 changes: 5 additions & 5 deletions arch/arm64/kvm/hyp/vhe/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
*/
dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand Down Expand Up @@ -176,7 +176,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,

dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand All @@ -192,7 +192,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
enter_vmid_context(mmu, &cxt);

__tlbi(vmalls12e1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

exit_vmid_context(&cxt);
Expand All @@ -217,7 +217,7 @@ void __kvm_flush_vm_context(void)
{
dsb(ishst);
__tlbi(alle1is);
dsb(ish);
__tlbi_sync_s1ish_hyp();
}

/*
Expand Down Expand Up @@ -358,7 +358,7 @@ int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
default:
ret = -EINVAL;
}
dsb(ish);
__tlbi_sync_s1ish_hyp();
isb();

if (mmu)
Expand Down