Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/irqflags.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/tlbflush.h> struct tlb_inv_context { unsigned long flags; u64 tcr; u64 sctlr; }; static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, struct tlb_inv_context *cxt) { u64 val; local_irq_save(cxt->flags); if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, * we cannot trust stage-1 to be in a correct state at that * point. Since we do not want to force a full load of the * vcpu state, we prevent the EL1 page-table walker to * allocate new TLBs. This is done by setting the EPD bits * in the TCR_EL1 register. We also need to prevent it to * allocate IPA->PA walks, so we enable the S1 MMU... */ val = cxt->tcr = read_sysreg_el1(SYS_TCR); val |= TCR_EPD1_MASK | TCR_EPD0_MASK; write_sysreg_el1(val, SYS_TCR); val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); val |= SCTLR_ELx_M; write_sysreg_el1(val, SYS_SCTLR); } /* * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and * most TLB operations target EL2/EL0. In order to affect the * guest TLBs (EL1/EL0), we need to change one of these two * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so * let's flip TGE before executing the TLB operation. * * ARM erratum 1165522 requires some special handling (again), * as we need to make sure both stages of translation are in * place before clearing TGE. __load_guest_stage2() already * has an ISB in order to deal with this. */ __load_guest_stage2(kvm); val = read_sysreg(hcr_el2); val &= ~HCR_TGE; write_sysreg(val, hcr_el2); isb(); } static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* * For CPUs that are affected by ARM 1319367, we need to * avoid a host Stage-1 walk while we have the guest's * VMID set in the VTTBR in order to invalidate TLBs. * We're guaranteed that the S1 MMU is enabled, so we can * simply set the EPD bits to avoid any further TLB fill. */ val = cxt->tcr = read_sysreg_el1(SYS_TCR); val |= TCR_EPD1_MASK | TCR_EPD0_MASK; write_sysreg_el1(val, SYS_TCR); isb(); } __load_guest_stage2(kvm); isb(); } static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt) { if (has_vhe()) __tlb_switch_to_guest_vhe(kvm, cxt); else __tlb_switch_to_guest_nvhe(kvm, cxt); } static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, struct tlb_inv_context *cxt) { /* * We're done with the TLB operation, let's restore the host's * view of HCR_EL2. */ write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR); } local_irq_restore(cxt->flags); } static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { write_sysreg(0, vttbr_el2); if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { /* Ensure write of the host VMID */ isb(); /* Restore the host's TCR_EL1 */ write_sysreg_el1(cxt->tcr, SYS_TCR); } } static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt) { if (has_vhe()) __tlb_switch_to_host_vhe(kvm, cxt); else __tlb_switch_to_host_nvhe(kvm, cxt); } void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { struct tlb_inv_context cxt; dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); __tlb_switch_to_guest(kvm, &cxt); /* * We could do so much better if we had the VA as well. * Instead, we invalidate Stage-2 for this IPA, and the * whole of Stage-1. Weep... */ ipa >>= 12; __tlbi(ipas2e1is, ipa); /* * We have to ensure completion of the invalidation at Stage-2, * since a table walk on another CPU could refill a TLB with a * complete (S1 + S2) walk based on the old Stage-2 mapping if * the Stage-1 invalidation happened first. */ dsb(ish); __tlbi(vmalle1is); dsb(ish); isb(); /* * If the host is running at EL1 and we have a VPIPT I-cache, * then we must perform I-cache maintenance at EL2 in order for * it to have an effect on the guest. Since the guest cannot hit * I-cache lines allocated with a different VMID, we don't need * to worry about junk out of guest reset (we nuke the I-cache on * VMID rollover), but we do need to be careful when remapping * executable pages for the same guest. This can happen when KSM * takes a CoW fault on an executable page, copies the page into * a page that was previously mapped in the guest and then needs * to invalidate the guest view of the I-cache for that page * from EL1. To solve this, we invalidate the entire I-cache when * unmapping a page from a guest if we have a VPIPT I-cache but * the host is running at EL1. As above, we could do better if * we had the VA. * * The moral of this story is: if you have a VPIPT I-cache, then * you should be running with VHE enabled. */ if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { struct tlb_inv_context cxt; dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); __tlb_switch_to_guest(kvm, &cxt); __tlbi(vmalls12e1is); dsb(ish); isb(); __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); struct tlb_inv_context cxt; /* Switch to requested VMID */ __tlb_switch_to_guest(kvm, &cxt); __tlbi(vmalle1); dsb(nsh); isb(); __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_flush_vm_context(void) { dsb(ishst); __tlbi(alle1is); /* * VIPT and PIPT caches are not affected by VMID, so no maintenance * is necessary across a VMID rollover. * * VPIPT caches constrain lookup and maintenance to the active VMID, * so we need to invalidate lines with a stale VMID to avoid an ABA * race after multiple rollovers. * */ if (icache_is_vpipt()) asm volatile("ic ialluis"); dsb(ish); } |