Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 ARM Ltd. * Author: Marc Zyngier <marc.zyngier@arm.com> */ #include <linux/kvm_host.h> #include <linux/random.h> #include <linux/memblock.h> #include <asm/alternative.h> #include <asm/debug-monitors.h> #include <asm/insn.h> #include <asm/kvm_mmu.h> /* * The LSB of the HYP VA tag */ static u8 tag_lsb; /* * The HYP VA tag value with the region bit */ static u64 tag_val; static u64 va_mask; /* * We want to generate a hyp VA with the following format (with V == * vabits_actual): * * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 * --------------------------------------------------------- * | 0000000 | hyp_va_msb | random tag | kern linear VA | * |--------- tag_val -----------|----- va_mask ---| * * which does not conflict with the idmap regions. */ __init void kvm_compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; /* Where is my RAM region? */ hyp_va_msb = idmap_addr & BIT(vabits_actual - 1); hyp_va_msb ^= BIT(vabits_actual - 1); tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ (u64)(high_memory - 1)); va_mask = GENMASK_ULL(tag_lsb - 1, 0); tag_val = hyp_va_msb; if (tag_lsb != (vabits_actual - 1)) { /* We have some free bits to insert a random tag. */ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); } tag_val >>= tag_lsb; } static u32 compute_instruction(int n, u32 rd, u32 rn) { u32 insn = AARCH64_BREAK_FAULT; switch (n) { case 0: insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND, AARCH64_INSN_VARIANT_64BIT, rn, rd, va_mask); break; case 1: /* ROR is a variant of EXTR with Rm = Rn */ insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, rn, rn, rd, tag_lsb); break; case 2: insn = aarch64_insn_gen_add_sub_imm(rd, rn, tag_val & GENMASK(11, 0), AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_ADSB_ADD); break; case 3: insn = aarch64_insn_gen_add_sub_imm(rd, rn, tag_val & GENMASK(23, 12), AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_ADSB_ADD); break; case 4: /* ROR is a variant of EXTR with Rm = Rn */ insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, rn, rn, rd, 64 - tag_lsb); break; } return insn; } void __init kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { int i; BUG_ON(nr_inst != 5); for (i = 0; i < nr_inst; i++) { u32 rd, rn, insn, oinsn; /* * VHE doesn't need any address translation, let's NOP * everything. * * Alternatively, if the tag is zero (because the layout * dictates it and we don't have any spare bits in the * address), NOP everything after masking the kernel VA. */ if (has_vhe() || (!tag_val && i > 0)) { updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); continue; } oinsn = le32_to_cpu(origptr[i]); rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn); rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn); insn = compute_instruction(i, rd, rn); BUG_ON(insn == AARCH64_BREAK_FAULT); updptr[i] = cpu_to_le32(insn); } } void *__kvm_bp_vect_base; int __kvm_harden_el2_vector_slot; void kvm_patch_vector_branch(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { u64 addr; u32 insn; BUG_ON(nr_inst != 5); if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)); return; } /* * Compute HYP VA by using the same computation as kern_hyp_va() */ addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); addr &= va_mask; addr |= tag_val << tag_lsb; /* Use PC[10:7] to branch to the same vector in KVM */ addr |= ((u64)origptr & GENMASK_ULL(10, 7)); /* * Branch over the preamble in order to avoid the initial store on * the stack (which we already perform in the hardening vectors). */ addr += KVM_VECTOR_PREAMBLE; /* stp x0, x1, [sp, #-16]! */ insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0, AARCH64_INSN_REG_1, AARCH64_INSN_REG_SP, -16, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX); *updptr++ = cpu_to_le32(insn); /* movz x0, #(addr & 0xffff) */ insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, (u16)addr, 0, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_MOVEWIDE_ZERO); *updptr++ = cpu_to_le32(insn); /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */ insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, (u16)(addr >> 16), 16, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_MOVEWIDE_KEEP); *updptr++ = cpu_to_le32(insn); /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */ insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, (u16)(addr >> 32), 32, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_MOVEWIDE_KEEP); *updptr++ = cpu_to_le32(insn); /* br x0 */ insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0, AARCH64_INSN_BRANCH_NOLINK); *updptr++ = cpu_to_le32(insn); } |