linux-xiaomi-chiron/arch/arm64/include/asm/kvm_asm.h

199 lines
5.6 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#include <asm/virt.h>
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_EL1_SERROR 1
#define ARM_EXCEPTION_TRAP 2
#define ARM_EXCEPTION_IL 3
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
#define kvm_arm_exception_type \
{ARM_EXCEPTION_IRQ, "IRQ" }, \
{ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
{ARM_EXCEPTION_TRAP, "TRAP" }, \
{ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
/*
* Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
* that jumps over this.
*/
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#ifndef __ASSEMBLY__
#include <linux/mm.h>
/*
* Translate name of a symbol defined in nVHE hyp to the name seen
* by kernel proper. All nVHE symbols are prefixed by the build system
* to avoid clashes with the VHE variants.
*/
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
/*
* Define a pair of symbols sharing the same name but one defined in
* VHE and the other in nVHE hyp implementations.
*/
#define DECLARE_KVM_HYP_SYM(sym) \
DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_NVHE_SYM(sym)
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
KVM: arm64: Don't use has_vhe() for CHOOSE_HYP_SYM() The recently introduced CHOOSE_HYP_SYM() macro picks one symbol or another, depending on whether the kernel run as a VHE hypervisor or not. For that, it uses the has_vhe() helper, which is itself implemented as a final capability. Unfortunately, __copy_hyp_vect_bpi now indirectly uses CHOOSE_HYP_SYM to get the __bp_harden_hyp_vecs symbol, using has_vhe() in the process. At this stage, the capability isn't final and things explode: [ 0.000000] ACPI: SRAT not present [ 0.000000] percpu: Embedded 34 pages/cpu s101264 r8192 d29808 u139264 [ 0.000000] Detected PIPT I-cache on CPU0 [ 0.000000] ------------[ cut here ]------------ [ 0.000000] kernel BUG at arch/arm64/include/asm/cpufeature.h:459! [ 0.000000] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.8.0-rc4-00080-gd630681366e5 #1388 [ 0.000000] pstate: 80000085 (Nzcv daIf -PAN -UAO BTYPE=--) [ 0.000000] pc : check_branch_predictor+0x3a4/0x408 [ 0.000000] lr : check_branch_predictor+0x2a4/0x408 [ 0.000000] sp : ffff800011693e90 [ 0.000000] x29: ffff800011693e90 x28: ffff8000116a1530 [ 0.000000] x27: ffff8000112c1008 x26: ffff800010ca6ff8 [ 0.000000] x25: ffff8000112c1000 x24: ffff8000116a1320 [ 0.000000] x23: 0000000000000000 x22: ffff8000112c1000 [ 0.000000] x21: ffff800010177120 x20: ffff8000116ae108 [ 0.000000] x19: 0000000000000000 x18: ffff800011965c90 [ 0.000000] x17: 0000000000022000 x16: 0000000000000003 [ 0.000000] x15: 00000000ffffffff x14: ffff8000118c3a38 [ 0.000000] x13: 0000000000000021 x12: 0000000000000022 [ 0.000000] x11: d37a6f4de9bd37a7 x10: 000000000000001d [ 0.000000] x9 : 0000000000000000 x8 : ffff800011f8dad8 [ 0.000000] x7 : ffff800011965ad0 x6 : 0000000000000003 [ 0.000000] x5 : 0000000000000000 x4 : 0000000000000000 [ 0.000000] x3 : 0000000000000100 x2 : 0000000000000004 [ 0.000000] x1 : ffff8000116ae148 x0 : 0000000000000000 [ 0.000000] Call trace: [ 0.000000] check_branch_predictor+0x3a4/0x408 [ 0.000000] update_cpu_capabilities+0x84/0x138 [ 0.000000] init_cpu_features+0x2c0/0x2d8 [ 0.000000] cpuinfo_store_boot_cpu+0x54/0x64 [ 0.000000] smp_prepare_boot_cpu+0x2c/0x60 [ 0.000000] start_kernel+0x16c/0x574 [ 0.000000] Code: 17ffffc7 91010281 14000198 17ffffca (d4210000) This is addressed using a two-fold process: - Replace has_vhe() with is_kernel_in_hyp_mode(), which tests whether we are running at EL2. - Make CHOOSE_HYP_SYM() return an *undefined* symbol when compiled in the nVHE hypervisor, as we really should never use this helper in the nVHE-specific code. With this in place, we're back to a bootable kernel again. Fixes: b877e9849d41 ("KVM: arm64: Build hyp-entry.S separately for VHE/nVHE") Signed-off-by: Marc Zyngier <maz@kernel.org>
2020-07-07 15:34:06 +01:00
#ifndef __KVM_NVHE_HYPERVISOR__
/*
* BIG FAT WARNINGS:
*
* - Don't be tempted to change the following is_kernel_in_hyp_mode()
* to has_vhe(). has_vhe() is implemented as a *final* capability,
* while this is used early at boot time, when the capabilities are
* not final yet....
*
* - Don't let the nVHE hypervisor have access to this, as it will
* pick the *wrong* symbol (yes, it runs at EL2...).
*/
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
: CHOOSE_NVHE_SYM(sym))
KVM: arm64: Don't use has_vhe() for CHOOSE_HYP_SYM() The recently introduced CHOOSE_HYP_SYM() macro picks one symbol or another, depending on whether the kernel run as a VHE hypervisor or not. For that, it uses the has_vhe() helper, which is itself implemented as a final capability. Unfortunately, __copy_hyp_vect_bpi now indirectly uses CHOOSE_HYP_SYM to get the __bp_harden_hyp_vecs symbol, using has_vhe() in the process. At this stage, the capability isn't final and things explode: [ 0.000000] ACPI: SRAT not present [ 0.000000] percpu: Embedded 34 pages/cpu s101264 r8192 d29808 u139264 [ 0.000000] Detected PIPT I-cache on CPU0 [ 0.000000] ------------[ cut here ]------------ [ 0.000000] kernel BUG at arch/arm64/include/asm/cpufeature.h:459! [ 0.000000] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.8.0-rc4-00080-gd630681366e5 #1388 [ 0.000000] pstate: 80000085 (Nzcv daIf -PAN -UAO BTYPE=--) [ 0.000000] pc : check_branch_predictor+0x3a4/0x408 [ 0.000000] lr : check_branch_predictor+0x2a4/0x408 [ 0.000000] sp : ffff800011693e90 [ 0.000000] x29: ffff800011693e90 x28: ffff8000116a1530 [ 0.000000] x27: ffff8000112c1008 x26: ffff800010ca6ff8 [ 0.000000] x25: ffff8000112c1000 x24: ffff8000116a1320 [ 0.000000] x23: 0000000000000000 x22: ffff8000112c1000 [ 0.000000] x21: ffff800010177120 x20: ffff8000116ae108 [ 0.000000] x19: 0000000000000000 x18: ffff800011965c90 [ 0.000000] x17: 0000000000022000 x16: 0000000000000003 [ 0.000000] x15: 00000000ffffffff x14: ffff8000118c3a38 [ 0.000000] x13: 0000000000000021 x12: 0000000000000022 [ 0.000000] x11: d37a6f4de9bd37a7 x10: 000000000000001d [ 0.000000] x9 : 0000000000000000 x8 : ffff800011f8dad8 [ 0.000000] x7 : ffff800011965ad0 x6 : 0000000000000003 [ 0.000000] x5 : 0000000000000000 x4 : 0000000000000000 [ 0.000000] x3 : 0000000000000100 x2 : 0000000000000004 [ 0.000000] x1 : ffff8000116ae148 x0 : 0000000000000000 [ 0.000000] Call trace: [ 0.000000] check_branch_predictor+0x3a4/0x408 [ 0.000000] update_cpu_capabilities+0x84/0x138 [ 0.000000] init_cpu_features+0x2c0/0x2d8 [ 0.000000] cpuinfo_store_boot_cpu+0x54/0x64 [ 0.000000] smp_prepare_boot_cpu+0x2c/0x60 [ 0.000000] start_kernel+0x16c/0x574 [ 0.000000] Code: 17ffffc7 91010281 14000198 17ffffca (d4210000) This is addressed using a two-fold process: - Replace has_vhe() with is_kernel_in_hyp_mode(), which tests whether we are running at EL2. - Make CHOOSE_HYP_SYM() return an *undefined* symbol when compiled in the nVHE hypervisor, as we really should never use this helper in the nVHE-specific code. With this in place, we're back to a bootable kernel again. Fixes: b877e9849d41 ("KVM: arm64: Build hyp-entry.S separately for VHE/nVHE") Signed-off-by: Marc Zyngier <maz@kernel.org>
2020-07-07 15:34:06 +01:00
#else
/* The nVHE hypervisor shouldn't even try to access anything */
extern void *__nvhe_undefined_symbol;
#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
#endif
/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr) \
({ \
void *val = (ptr); \
if (!is_kernel_in_hyp_mode()) \
val = lm_alias((ptr)); \
val; \
})
#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
struct kvm;
struct kvm_vcpu;
struct kvm_s2_mmu;
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __kvm_enable_ssbs(void);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})
/*
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
* provided that sym is really a *symbol* and not a pointer obtained from
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
* sparse quiet.
*/
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr; \
__verify_pcpu_ptr(&sym); \
__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(sym) __kernel __force *)__ptr; \
})
#define __hyp_this_cpu_read(sym) \
({ \
*__hyp_this_cpu_ptr(sym); \
})
2017-10-08 17:01:56 +02:00
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
adr_l \reg, \sym
2017-10-08 17:01:56 +02:00
mrs \tmp, tpidr_el2
add \reg, \reg, \tmp
.endm
.macro hyp_ldr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
ldr \reg, [\reg, \tmp]
.endm
.macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_data, \tmp
add \reg, \reg, #HOST_DATA_CONTEXT
.endm
2017-10-08 17:01:56 +02:00
.macro get_vcpu_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
#endif
#endif /* __ARM_KVM_ASM_H__ */