summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2018-08-23 13:18:21 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2018-08-23 13:18:21 -0400
commitdb151d40a09befbc96702236e1c842a8a90c3da5 (patch)
tree81b2adb3900ec151ed59fb4de0f7ca071087dfdf
parent1105556fc74d8793c0c129b40b3171fb04e97014 (diff)
downloadlongterm-queue-4.12-db151d40a09befbc96702236e1c842a8a90c3da5.tar.gz
queue: add some possible ARM patches
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--queue/KVM-arm-arm64-Convert-kvm_host_cpu_state-to-a-static.patch81
-rw-r--r--queue/KVM-arm-arm64-Do-not-use-kern_hyp_va-with-kvm_vgic_g.patch87
-rw-r--r--queue/KVM-arm64-Avoid-storing-the-vcpu-pointer-on-the-stac.patch249
-rw-r--r--queue/KVM-arm64-Change-hyp_panic-s-dependency-on-tpidr_el2.patch163
-rw-r--r--queue/KVM-arm64-Stop-save-restoring-host-tpidr_el1-on-VHE.patch120
-rw-r--r--queue/KVM-arm64-Store-vcpu-on-the-stack-during-__guest_ent.patch95
-rw-r--r--queue/arm-arm64-smccc-Add-SMCCC-specific-return-codes.patch37
-rw-r--r--queue/arm64-Add-ARCH_WORKAROUND_2-probing.patch144
-rw-r--r--queue/arm64-Add-per-cpu-infrastructure-to-call-ARCH_WORKAR.patch75
-rw-r--r--queue/arm64-Add-ssbd-command-line-option.patch212
-rw-r--r--queue/arm64-Call-ARCH_WORKAROUND_2-on-transitions-between-.patch130
-rw-r--r--queue/arm64-KVM-Add-ARCH_WORKAROUND_2-discovery-through-AR.patch128
-rw-r--r--queue/arm64-KVM-Add-ARCH_WORKAROUND_2-support-for-guests.patch205
-rw-r--r--queue/arm64-KVM-Add-HYP-per-cpu-accessors.patch64
-rw-r--r--queue/arm64-KVM-Handle-guest-s-ARCH_WORKAROUND_2-requests.patch85
-rw-r--r--queue/arm64-alternatives-Add-dynamic-patching-feature.patch212
-rw-r--r--queue/arm64-alternatives-use-tpidr_el2-on-VHE-hosts.patch196
-rw-r--r--queue/arm64-ssbd-Add-global-mitigation-state-accessor.patch43
-rw-r--r--queue/arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch147
-rw-r--r--queue/arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch50
-rw-r--r--queue/arm64-ssbd-Restore-mitigation-status-on-CPU-resume.patch97
-rw-r--r--queue/arm64-ssbd-Skip-apply_ssbd-if-not-using-dynamic-miti.patch62
-rw-r--r--queue/series22
23 files changed, 2704 insertions, 0 deletions
diff --git a/queue/KVM-arm-arm64-Convert-kvm_host_cpu_state-to-a-static.patch b/queue/KVM-arm-arm64-Convert-kvm_host_cpu_state-to-a-static.patch
new file mode 100644
index 0000000..4663e96
--- /dev/null
+++ b/queue/KVM-arm-arm64-Convert-kvm_host_cpu_state-to-a-static.patch
@@ -0,0 +1,81 @@
+From 36989e7fd386a9a5822c48691473863f8fbb404d Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 8 Jan 2018 15:38:04 +0000
+Subject: [PATCH] KVM: arm/arm64: Convert kvm_host_cpu_state to a static
+ per-cpu allocation
+
+commit 36989e7fd386a9a5822c48691473863f8fbb404d upstream.
+
+kvm_host_cpu_state is a per-cpu allocation made from kvm_arch_init()
+used to store the host EL1 registers when KVM switches to a guest.
+
+Make it easier for ASM to generate pointers into this per-cpu memory
+by making it a static allocation.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index 2df6a5c42f77..2fc6009a766c 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -53,8 +53,8 @@
+ __asm__(".arch_extension virt");
+ #endif
+
++DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
+
+ /* Per-CPU variable containing the currently running vcpu. */
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+@@ -354,7 +354,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ }
+
+ vcpu->cpu = cpu;
+- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
++ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
+
+ kvm_arm_set_running_vcpu(vcpu);
+ kvm_vgic_load(vcpu);
+@@ -1272,19 +1272,8 @@ static inline void hyp_cpu_pm_exit(void)
+ }
+ #endif
+
+-static void teardown_common_resources(void)
+-{
+- free_percpu(kvm_host_cpu_state);
+-}
+-
+ static int init_common_resources(void)
+ {
+- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+- if (!kvm_host_cpu_state) {
+- kvm_err("Cannot allocate host CPU state\n");
+- return -ENOMEM;
+- }
+-
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+@@ -1426,7 +1415,7 @@ static int init_hyp_mode(void)
+ for_each_possible_cpu(cpu) {
+ kvm_cpu_context_t *cpu_ctxt;
+
+- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
++ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
+ err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
+
+ if (err) {
+@@ -1550,7 +1539,6 @@ out_hyp:
+ if (!in_hyp_mode)
+ teardown_hyp_mode();
+ out_err:
+- teardown_common_resources();
+ return err;
+ }
+
+--
+2.15.0
+
diff --git a/queue/KVM-arm-arm64-Do-not-use-kern_hyp_va-with-kvm_vgic_g.patch b/queue/KVM-arm-arm64-Do-not-use-kern_hyp_va-with-kvm_vgic_g.patch
new file mode 100644
index 0000000..87cda9c
--- /dev/null
+++ b/queue/KVM-arm-arm64-Do-not-use-kern_hyp_va-with-kvm_vgic_g.patch
@@ -0,0 +1,87 @@
+From 44a497abd621a71c645f06d3d545ae2f46448830 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Sun, 3 Dec 2017 19:28:56 +0000
+Subject: [PATCH] KVM: arm/arm64: Do not use kern_hyp_va() with
+ kvm_vgic_global_state
+
+commit 44a497abd621a71c645f06d3d545ae2f46448830 upstream.
+
+kvm_vgic_global_state is part of the read-only section, and is
+usually accessed using a PC-relative address generation (adrp + add).
+
+It is thus useless to use kern_hyp_va() on it, and actively problematic
+if kern_hyp_va() becomes non-idempotent. On the other hand, there is
+no way that the compiler is going to guarantee that such access is
+always PC relative.
+
+So let's bite the bullet and provide our own accessor.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index de1b919404e4..93395b7c2322 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -28,6 +28,13 @@
+ */
+ #define kern_hyp_va(kva) (kva)
+
++/* Contrary to arm64, there is no need to generate a PC-relative address */
++#define hyp_symbol_addr(s) \
++ ({ \
++ typeof(s) *addr = &(s); \
++ addr; \
++ })
++
+ /*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
+ */
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index 0656c79d968f..021d3a8117a8 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -110,6 +110,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
+
+ #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+
++/*
++ * Obtain the PC-relative address of a kernel symbol
++ * s: symbol
++ *
++ * The goal of this macro is to return a symbol's address based on a
++ * PC-relative computation, as opposed to a loading the VA from a
++ * constant pool or something similar. This works well for HYP, as an
++ * absolute VA is guaranteed to be wrong. Only use this if trying to
++ * obtain the address of a symbol (i.e. not something you obtained by
++ * following a pointer).
++ */
++#define hyp_symbol_addr(s) \
++ ({ \
++ typeof(s) *addr; \
++ asm("adrp %0, %1\n" \
++ "add %0, %0, :lo12:%1\n" \
++ : "=r" (addr) : "S" (&s)); \
++ addr; \
++ })
++
+ /*
+ * We currently only support a 40bit IPA.
+ */
+diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+index 97f357ea9c72..10eb2e96b3e6 100644
+--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
++++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+@@ -60,7 +60,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
+ return -1;
+
+ rd = kvm_vcpu_dabt_get_rd(vcpu);
+- addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
++ addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
+ addr += fault_ipa - vgic->vgic_cpu_base;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu)) {
+--
+2.15.0
+
diff --git a/queue/KVM-arm64-Avoid-storing-the-vcpu-pointer-on-the-stac.patch b/queue/KVM-arm64-Avoid-storing-the-vcpu-pointer-on-the-stac.patch
new file mode 100644
index 0000000..f164441
--- /dev/null
+++ b/queue/KVM-arm64-Avoid-storing-the-vcpu-pointer-on-the-stac.patch
@@ -0,0 +1,249 @@
+From 4464e210de9e80e38de59df052fe09ea2ff80b1b Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Sun, 8 Oct 2017 17:01:56 +0200
+Subject: [PATCH] KVM: arm64: Avoid storing the vcpu pointer on the stack
+
+commit 4464e210de9e80e38de59df052fe09ea2ff80b1b upstream.
+
+We already have the percpu area for the host cpu state, which points to
+the VCPU, so there's no need to store the VCPU pointer on the stack on
+every context switch. We can be a little more clever and just use
+tpidr_el2 for the percpu offset and load the VCPU pointer from the host
+context.
+
+This has the benefit of being able to retrieve the host context even
+when our stack is corrupted, and it has a potential performance benefit
+because we trade a store plus a load for an mrs and a load on a round
+trip to the guest.
+
+This does require us to calculate the percpu offset without including
+the offset from the kernel mapping of the percpu array to the linear
+mapping of the array (which is what we store in tpidr_el1), because a
+PC-relative generated address in EL2 is already giving us the hyp alias
+of the linear mapping of a kernel address. We do this in
+__cpu_init_hyp_mode() by using kvm_ksym_ref().
+
+The code that accesses ESR_EL2 was previously using an alternative to
+use the _EL1 accessor on VHE systems, but this was actually unnecessary
+as the _EL1 accessor aliases the ESR_EL2 register on VHE, and the _EL2
+accessor does the same thing on both systems.
+
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index 24961b732e65..7149f1520382 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,7 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+ #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
++/* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym) \
+ ({ \
+ void *val = &sym; \
+@@ -70,6 +71,20 @@ extern u32 __init_stage2_translation(void);
+
+ extern void __qcom_hyp_sanitize_btac_predictors(void);
+
++#else /* __ASSEMBLY__ */
++
++.macro get_host_ctxt reg, tmp
++ adr_l \reg, kvm_host_cpu_state
++ mrs \tmp, tpidr_el2
++ add \reg, \reg, \tmp
++.endm
++
++.macro get_vcpu_ptr vcpu, ctxt
++ get_host_ctxt \ctxt, \vcpu
++ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
++ kern_hyp_va \vcpu
++.endm
++
+ #endif
+
+ #endif /* __ARM_KVM_ASM_H__ */
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 596f8e414a4c..618cfee7206a 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -358,10 +358,15 @@ int kvm_perf_teardown(void);
+
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
++void __kvm_set_tpidr_el2(u64 tpidr_el2);
++DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+ {
++ u64 tpidr_el2;
++
+ /*
+ * Call initialization code, and switch to the full blown HYP code.
+ * If the cpucaps haven't been finalized yet, something has gone very
+@@ -370,6 +375,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+ */
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
++
++ /*
++ * Calculate the raw per-cpu offset without a translation from the
++ * kernel's mapping to the linear mapping, and store it in tpidr_el2
++ * so that we can use adr_l to access per-cpu variables in EL2.
++ */
++ tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
++ - (u64)kvm_ksym_ref(kvm_host_cpu_state);
++
++ kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
+ }
+
+ static inline void kvm_arch_hardware_unsetup(void) {}
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 1303e04110cd..78e1b0a70aaf 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -138,6 +138,7 @@ int main(void)
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+ DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
+ DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
++ DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ #endif
+ #ifdef CONFIG_CPU_PM
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
+index fdd1068ee3a5..1f458f7c3b44 100644
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,9 +62,6 @@ ENTRY(__guest_enter)
+ // Store the host regs
+ save_callee_saved_regs x1
+
+- // Store host_ctxt and vcpu for use at exit time
+- stp x1, x0, [sp, #-16]!
+-
+ add x18, x0, #VCPU_CONTEXT
+
+ // Restore guest regs x0-x17
+@@ -118,8 +115,7 @@ ENTRY(__guest_exit)
+ // Store the guest regs x19-x29, lr
+ save_callee_saved_regs x1
+
+- // Restore the host_ctxt from the stack
+- ldr x2, [sp], #16
++ get_host_ctxt x2, x3
+
+ // Now restore the host regs
+ restore_callee_saved_regs x2
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index f36464bd57c5..82fbc368f738 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call)
+ el1_sync: // Guest trapped into EL2
+ stp x0, x1, [sp, #-16]!
+
+-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+- mrs x1, esr_el2
+-alternative_else
+- mrs x1, esr_el1
+-alternative_endif
+- lsr x0, x1, #ESR_ELx_EC_SHIFT
+-
++ mrs x0, esr_el2
++ lsr x0, x0, #ESR_ELx_EC_SHIFT
+ cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
+ b.ne el1_trap
+@@ -117,10 +112,14 @@ el1_hvc_guest:
+ eret
+
+ el1_trap:
++ get_vcpu_ptr x1, x0
++
++ mrs x0, esr_el2
++ lsr x0, x0, #ESR_ELx_EC_SHIFT
+ /*
+ * x0: ESR_EC
++ * x1: vcpu pointer
+ */
+- ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
+
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+@@ -138,13 +137,13 @@ alternative_else_nop_endif
+
+ el1_irq:
+ stp x0, x1, [sp, #-16]!
+- ldr x1, [sp, #16 + 8]
++ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ el1_error:
+ stp x0, x1, [sp, #-16]!
+- ldr x1, [sp, #16 + 8]
++ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_EL1_SERROR
+ b __guest_exit
+
+@@ -180,14 +179,7 @@ ENTRY(__hyp_do_panic)
+ ENDPROC(__hyp_do_panic)
+
+ ENTRY(__hyp_panic)
+- /*
+- * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
+- * not be accessible by this address from EL2, hyp_panic() converts
+- * it with kern_hyp_va() before use.
+- */
+- ldr x0, =kvm_host_cpu_state
+- mrs x1, tpidr_el2
+- add x0, x0, x1
++ get_host_ctxt x0, x1
+ b hyp_panic
+ ENDPROC(__hyp_panic)
+
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 24f52fedfb9e..46717da75643 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -469,7 +469,7 @@ static hyp_alternate_select(__hyp_call_panic,
+ __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+-void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
+ {
+ struct kvm_vcpu *vcpu = NULL;
+
+@@ -478,9 +478,6 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+- struct kvm_cpu_context *host_ctxt;
+-
+- host_ctxt = kern_hyp_va(__host_ctxt);
+ vcpu = host_ctxt->__hyp_running_vcpu;
+ __timer_disable_traps(vcpu);
+ __deactivate_traps(vcpu);
+diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
+index 2c17afd2be96..43b7dd65e3e6 100644
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -189,3 +189,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+ }
++
++void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
++{
++ asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
++}
+--
+2.15.0
+
diff --git a/queue/KVM-arm64-Change-hyp_panic-s-dependency-on-tpidr_el2.patch b/queue/KVM-arm64-Change-hyp_panic-s-dependency-on-tpidr_el2.patch
new file mode 100644
index 0000000..36028f0
--- /dev/null
+++ b/queue/KVM-arm64-Change-hyp_panic-s-dependency-on-tpidr_el2.patch
@@ -0,0 +1,163 @@
+From c97e166e54b662717d20ec2e36761758d2b6a7c2 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 8 Jan 2018 15:38:05 +0000
+Subject: [PATCH] KVM: arm64: Change hyp_panic()s dependency on tpidr_el2
+
+commit c97e166e54b662717d20ec2e36761758d2b6a7c2 upstream.
+
+Make tpidr_el2 a cpu-offset for per-cpu variables in the same way the
+host uses tpidr_el1. This lets tpidr_el{1,2} have the same value, and
+on VHE they can be the same register.
+
+KVM calls hyp_panic() when anything unexpected happens. This may occur
+while a guest owns the EL1 registers. KVM stashes the vcpu pointer in
+tpidr_el2, which it uses to find the host context in order to restore
+the host EL1 registers before parachuting into the host's panic().
+
+The host context is a struct kvm_cpu_context allocated in the per-cpu
+area, and mapped to hyp. Given the per-cpu offset for this CPU, this is
+easy to find. Change hyp_panic() to take a pointer to the
+struct kvm_cpu_context. Wrap these calls with an asm function that
+retrieves the struct kvm_cpu_context from the host's per-cpu area.
+
+Copy the per-cpu offset from the hosts tpidr_el1 into tpidr_el2 during
+kvm init. (Later patches will make this unnecessary for VHE hosts)
+
+We print out the vcpu pointer as part of the panic message. Add a back
+reference to the 'running vcpu' in the host cpu context to preserve this.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index ea6cb5b24258..7ee72b402907 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -192,6 +192,8 @@ struct kvm_cpu_context {
+ u64 sys_regs[NR_SYS_REGS];
+ u32 copro[NR_COPRO_REGS];
+ };
++
++ struct kvm_vcpu *__hyp_running_vcpu;
+ };
+
+ typedef struct kvm_cpu_context kvm_cpu_context_t;
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index fce7cc507e0a..e4f37b9dd47c 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -163,6 +163,18 @@ ENTRY(__hyp_do_panic)
+ eret
+ ENDPROC(__hyp_do_panic)
+
++ENTRY(__hyp_panic)
++ /*
++ * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
++ * not be accessible by this address from EL2, hyp_panic() converts
++ * it with kern_hyp_va() before use.
++ */
++ ldr x0, =kvm_host_cpu_state
++ mrs x1, tpidr_el2
++ add x0, x0, x1
++ b hyp_panic
++ENDPROC(__hyp_panic)
++
+ .macro invalid_vector label, target = __hyp_panic
+ .align 2
+ \label:
+diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
+index 603e1ee83e89..74306c33a6de 100644
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -86,5 +86,8 @@ u32 __hyp_text __init_stage2_translation(void)
+
+ write_sysreg(val, vtcr_el2);
+
++ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
+ return parange;
+ }
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 170e1917f83c..324f4202cdd5 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -306,9 +306,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ u64 exit_code;
+
+ vcpu = kern_hyp_va(vcpu);
+- write_sysreg(vcpu, tpidr_el2);
+
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++ host_ctxt->__hyp_running_vcpu = vcpu;
+ guest_ctxt = &vcpu->arch.ctxt;
+
+ __sysreg_save_host_state(host_ctxt);
+@@ -443,7 +443,8 @@ again:
+
+ static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+
+-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
++ struct kvm_vcpu *vcpu)
+ {
+ unsigned long str_va;
+
+@@ -457,35 +458,35 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
+ __hyp_do_panic(str_va,
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg_el2(far),
+- read_sysreg(hpfar_el2), par,
+- (void *)read_sysreg(tpidr_el2));
++ read_sysreg(hpfar_el2), par, vcpu);
+ }
+
+-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
++static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
++ struct kvm_vcpu *vcpu)
+ {
+ panic(__hyp_panic_string,
+ spsr, elr,
+ read_sysreg_el2(esr), read_sysreg_el2(far),
+- read_sysreg(hpfar_el2), par,
+- (void *)read_sysreg(tpidr_el2));
++ read_sysreg(hpfar_el2), par, vcpu);
+ }
+
+ static hyp_alternate_select(__hyp_call_panic,
+ __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+-void __hyp_text __noreturn __hyp_panic(void)
++void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
+ {
++ struct kvm_vcpu *vcpu = NULL;
++
+ u64 spsr = read_sysreg_el2(spsr);
+ u64 elr = read_sysreg_el2(elr);
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+- struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *host_ctxt;
+
+- vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
++ host_ctxt = kern_hyp_va(__host_ctxt);
++ vcpu = host_ctxt->__hyp_running_vcpu;
+ __timer_disable_traps(vcpu);
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+@@ -493,7 +494,7 @@ void __hyp_text __noreturn __hyp_panic(void)
+ }
+
+ /* Call panic for real */
+- __hyp_call_panic()(spsr, elr, par);
++ __hyp_call_panic()(spsr, elr, par, vcpu);
+
+ unreachable();
+ }
+--
+2.15.0
+
diff --git a/queue/KVM-arm64-Stop-save-restoring-host-tpidr_el1-on-VHE.patch b/queue/KVM-arm64-Stop-save-restoring-host-tpidr_el1-on-VHE.patch
new file mode 100644
index 0000000..198e347
--- /dev/null
+++ b/queue/KVM-arm64-Stop-save-restoring-host-tpidr_el1-on-VHE.patch
@@ -0,0 +1,120 @@
+From 1f742679c33bc083722cb0b442a95d458c491b56 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 8 Jan 2018 15:38:07 +0000
+Subject: [PATCH] KVM: arm64: Stop save/restoring host tpidr_el1 on VHE
+
+commit 1f742679c33bc083722cb0b442a95d458c491b56 upstream.
+
+Now that a VHE host uses tpidr_el2 for the cpu offset we no longer
+need KVM to save/restore tpidr_el1. Move this from the 'common' code
+into the non-vhe code. While we're at it, on VHE we don't need to
+save the ELR or SPSR as kernel_entry in entry.S will have pushed these
+onto the kernel stack, and will restore them from there. Move these
+to the non-vhe code as we need them to get back to the host.
+
+Finally remove the always-copy-tpidr we hid in the stage2 setup
+code, cpufeature's enable callback will do this for VHE, we only
+need KVM to do it for non-vhe. Add the copy into kvm-init instead.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
+index 33c40b3eea01..8a00de187e56 100644
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -120,6 +120,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
+ kern_hyp_va x2
+ msr vbar_el2, x2
+
++ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
++ mrs x1, tpidr_el1
++ msr tpidr_el2, x1
++
+ /* Hello, World! */
+ eret
+ ENDPROC(__kvm_hyp_init)
+diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
+index 74306c33a6de..603e1ee83e89 100644
+--- a/arch/arm64/kvm/hyp/s2-setup.c
++++ b/arch/arm64/kvm/hyp/s2-setup.c
+@@ -86,8 +86,5 @@ u32 __hyp_text __init_stage2_translation(void)
+
+ write_sysreg(val, vtcr_el2);
+
+- /* copy tpidr_el1 into tpidr_el2 for use by HYP */
+- write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+-
+ return parange;
+ }
+diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
+index 934137647837..c54cc2afb92b 100644
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
+ /*
+ * Non-VHE: Both host and guest must save everything.
+ *
+- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
+- * pstate, and guest must save everything.
++ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
++ * and guest must save everything.
+ */
+
+ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+ ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
+ ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
+ ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
+- ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
+ ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
+- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
+- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ }
+
+ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+@@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
+ ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
++ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+
+ ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
+ ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
++ ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
++ ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ }
+
+ static hyp_alternate_select(__sysreg_call_save_host_state,
+@@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+- write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+ write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
+- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
+- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ }
+
+ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+@@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+ write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
+ write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
+ write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
++ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+
+ write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
+ write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
+ write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
++ write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
++ write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ }
+
+ static hyp_alternate_select(__sysreg_call_restore_host_state,
+--
+2.15.0
+
diff --git a/queue/KVM-arm64-Store-vcpu-on-the-stack-during-__guest_ent.patch b/queue/KVM-arm64-Store-vcpu-on-the-stack-during-__guest_ent.patch
new file mode 100644
index 0000000..539b218
--- /dev/null
+++ b/queue/KVM-arm64-Store-vcpu-on-the-stack-during-__guest_ent.patch
@@ -0,0 +1,95 @@
+From 32b03d1059667a39e089c45ee38ec9c16332430f Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 8 Jan 2018 15:38:03 +0000
+Subject: [PATCH] KVM: arm64: Store vcpu on the stack during __guest_enter()
+
+commit 32b03d1059667a39e089c45ee38ec9c16332430f upstream.
+
+KVM uses tpidr_el2 as its private vcpu register, which makes sense for
+non-vhe world switch as only KVM can access this register. This means
+vhe Linux has to use tpidr_el1, which KVM has to save/restore as part
+of the host context.
+
+If the SDEI handler code runs behind KVMs back, it mustn't access any
+per-cpu variables. To allow this on systems with vhe we need to make
+the host use tpidr_el2, saving KVM from save/restoring it.
+
+__guest_enter() stores the host_ctxt on the stack, do the same with
+the vcpu.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
+index 9c45c6af1f58..fe4678f20a85 100644
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -62,8 +62,8 @@ ENTRY(__guest_enter)
+ // Store the host regs
+ save_callee_saved_regs x1
+
+- // Store the host_ctxt for use at exit time
+- str x1, [sp, #-16]!
++ // Store host_ctxt and vcpu for use at exit time
++ stp x1, x0, [sp, #-16]!
+
+ add x18, x0, #VCPU_CONTEXT
+
+@@ -159,6 +159,10 @@ abort_guest_exit_end:
+ ENDPROC(__guest_exit)
+
+ ENTRY(__fpsimd_guest_restore)
++ // x0: esr
++ // x1: vcpu
++ // x2-x29,lr: vcpu regs
++ // vcpu x0-x1 on the stack
+ stp x2, x3, [sp, #-16]!
+ stp x4, lr, [sp, #-16]!
+
+@@ -173,7 +177,7 @@ alternative_else
+ alternative_endif
+ isb
+
+- mrs x3, tpidr_el2
++ mov x3, x1
+
+ ldr x0, [x3, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x0
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index 5170ce1021da..fce7cc507e0a 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -104,6 +104,7 @@ el1_trap:
+ /*
+ * x0: ESR_EC
+ */
++ ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
+
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+@@ -116,19 +117,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
+ b.eq __fpsimd_guest_restore
+ alternative_else_nop_endif
+
+- mrs x1, tpidr_el2
+ mov x0, #ARM_EXCEPTION_TRAP
+ b __guest_exit
+
+ el1_irq:
+ stp x0, x1, [sp, #-16]!
+- mrs x1, tpidr_el2
++ ldr x1, [sp, #16 + 8]
+ mov x0, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ el1_error:
+ stp x0, x1, [sp, #-16]!
+- mrs x1, tpidr_el2
++ ldr x1, [sp, #16 + 8]
+ mov x0, #ARM_EXCEPTION_EL1_SERROR
+ b __guest_exit
+
+--
+2.15.0
+
diff --git a/queue/arm-arm64-smccc-Add-SMCCC-specific-return-codes.patch b/queue/arm-arm64-smccc-Add-SMCCC-specific-return-codes.patch
new file mode 100644
index 0000000..57eadd2
--- /dev/null
+++ b/queue/arm-arm64-smccc-Add-SMCCC-specific-return-codes.patch
@@ -0,0 +1,37 @@
+From eff0e9e1078ea7dc1d794dc50e31baef984c46d7 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:05 +0100
+Subject: [PATCH] arm/arm64: smccc: Add SMCCC-specific return codes
+
+commit eff0e9e1078ea7dc1d794dc50e31baef984c46d7 upstream.
+
+We've so far used the PSCI return codes for SMCCC because they
+were extremely similar. But with the new ARM DEN 0070A specification,
+"NOT_REQUIRED" (-2) is clashing with PSCI's "PSCI_RET_INVALID_PARAMS".
+
+Let's bite the bullet and add SMCCC specific return codes. Users
+can be repainted as and when required.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index a031897fca76..c89da86de99f 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -291,5 +291,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
+ */
+ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
++/* Return codes defined in ARM DEN 0070A */
++#define SMCCC_RET_SUCCESS 0
++#define SMCCC_RET_NOT_SUPPORTED -1
++#define SMCCC_RET_NOT_REQUIRED -2
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
+--
+2.15.0
+
diff --git a/queue/arm64-Add-ARCH_WORKAROUND_2-probing.patch b/queue/arm64-Add-ARCH_WORKAROUND_2-probing.patch
new file mode 100644
index 0000000..1a7f029
--- /dev/null
+++ b/queue/arm64-Add-ARCH_WORKAROUND_2-probing.patch
@@ -0,0 +1,144 @@
+From a725e3dda1813ed306734823ac4c65ca04e38500 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:08 +0100
+Subject: [PATCH] arm64: Add ARCH_WORKAROUND_2 probing
+
+commit a725e3dda1813ed306734823ac4c65ca04e38500 upstream.
+
+As for Spectre variant-2, we rely on SMCCC 1.1 to provide the
+discovery mechanism for detecting the SSBD mitigation.
+
+A new capability is also allocated for that purpose, and a
+config option.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 3aed13626fd7..0b98a6c42454 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -940,6 +940,15 @@ config HARDEN_EL2_VECTORS
+
+ If unsure, say Y.
+
++config ARM64_SSBD
++ bool "Speculative Store Bypass Disable" if EXPERT
++ default y
++ help
++ This enables mitigation of the bypassing of previous stores
++ by speculative loads.
++
++ If unsure, say Y.
++
+ menuconfig ARMV8_DEPRECATED
+ bool "Emulate deprecated/obsolete ARMv8 instructions"
+ depends on COMPAT
+diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
+index bc51b72fafd4..8a699c708fc9 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -48,7 +48,8 @@
+ #define ARM64_HAS_CACHE_IDC 27
+ #define ARM64_HAS_CACHE_DIC 28
+ #define ARM64_HW_DBM 29
++#define ARM64_SSBD 30
+
+-#define ARM64_NCAPS 30
++#define ARM64_NCAPS 31
+
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index cd91ca0250f1..7e8f12d85d99 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -256,6 +256,67 @@ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+
+ *updptr = cpu_to_le32(insn);
+ }
++
++static void arm64_set_ssbd_mitigation(bool state)
++{
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
++ break;
++
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
++}
++
++static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ struct arm_smccc_res res;
++ bool supported = true;
++
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ return false;
++
++ /*
++ * The probe function return value is either negative
++ * (unsupported or mitigated), positive (unaffected), or zero
++ * (requires mitigation). We only need to do anything in the
++ * last case.
++ */
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
++ if ((int)res.a0 != 0)
++ supported = false;
++ break;
++
++ default:
++ supported = false;
++ }
++
++ if (supported) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++
++ return supported;
++}
+ #endif /* CONFIG_ARM64_SSBD */
+
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+@@ -512,6 +573,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
+ },
++#endif
++#ifdef CONFIG_ARM64_SSBD
++ {
++ .desc = "Speculative Store Bypass Disable",
++ .capability = ARM64_SSBD,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .matches = has_ssbd_mitigation,
++ },
+ #endif
+ {
+ }
+--
+2.15.0
+
diff --git a/queue/arm64-Add-per-cpu-infrastructure-to-call-ARCH_WORKAR.patch b/queue/arm64-Add-per-cpu-infrastructure-to-call-ARCH_WORKAR.patch
new file mode 100644
index 0000000..1b8a5a5
--- /dev/null
+++ b/queue/arm64-Add-per-cpu-infrastructure-to-call-ARCH_WORKAR.patch
@@ -0,0 +1,75 @@
+From 5cf9ce6e5ea50f805c6188c04ed0daaec7b6887d Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:07 +0100
+Subject: [PATCH] arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
+
+commit 5cf9ce6e5ea50f805c6188c04ed0daaec7b6887d upstream.
+
+In a heterogeneous system, we can end up with both affected and
+unaffected CPUs. Let's check their status before calling into the
+firmware.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index c1eda6be7758..cd91ca0250f1 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -233,6 +233,8 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
+ #ifdef CONFIG_ARM64_SSBD
++DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index f33e6aed3037..29ad672a6abd 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -140,8 +140,10 @@ alternative_else_nop_endif
+
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+- .macro apply_ssbd, state
++ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
++ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
+@@ -176,12 +178,13 @@ alternative_cb_end
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
+- apply_ssbd 1
++ apply_ssbd 1, 1f, x22, x23
+
+ #ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ #endif
++1:
+
+ mov x29, xzr // fp pointed to user-space
+ .else
+@@ -323,8 +326,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
+- apply_ssbd 0
+-
++ apply_ssbd 0, 5f, x0, x1
++5:
+ .endif
+
+ msr elr_el1, x21 // set up the return data
+--
+2.15.0
+
diff --git a/queue/arm64-Add-ssbd-command-line-option.patch b/queue/arm64-Add-ssbd-command-line-option.patch
new file mode 100644
index 0000000..0715e83
--- /dev/null
+++ b/queue/arm64-Add-ssbd-command-line-option.patch
@@ -0,0 +1,212 @@
+From a43ae4dfe56a01f5b98ba0cb2f784b6a43bafcc6 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:09 +0100
+Subject: [PATCH] arm64: Add 'ssbd' command-line option
+
+commit a43ae4dfe56a01f5b98ba0cb2f784b6a43bafcc6 upstream.
+
+On a system where the firmware implements ARCH_WORKAROUND_2,
+it may be useful to either permanently enable or disable the
+workaround for cases where the user decides that they'd rather
+not get a trap overhead, and keep the mitigation permanently
+on or off instead of switching it on exception entry/exit.
+
+In any case, default to the mitigation being enabled.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 11fc28ecdb6d..7db8868fabab 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4047,6 +4047,23 @@
+ expediting. Set to zero to disable automatic
+ expediting.
+
++ ssbd= [ARM64,HW]
++ Speculative Store Bypass Disable control
++
++ On CPUs that are vulnerable to the Speculative
++ Store Bypass vulnerability and offer a
++ firmware based mitigation, this parameter
++ indicates how the mitigation should be used:
++
++ force-on: Unconditionally enable mitigation for
++ for both kernel and userspace
++ force-off: Unconditionally disable mitigation for
++ for both kernel and userspace
++ kernel: Always enable mitigation in the
++ kernel, and offer a prctl interface
++ to allow userspace to register its
++ interest in being mitigated too.
++
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 09b0f2a80c8f..b50650f3e496 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -537,6 +537,12 @@ static inline u64 read_zcr_features(void)
+ return zcr;
+ }
+
++#define ARM64_SSBD_UNKNOWN -1
++#define ARM64_SSBD_FORCE_DISABLE 0
++#define ARM64_SSBD_KERNEL 1
++#define ARM64_SSBD_FORCE_ENABLE 2
++#define ARM64_SSBD_MITIGATED 3
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 7e8f12d85d99..1075f90fdd8c 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -235,6 +235,38 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ #ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
++int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++
++static const struct ssbd_options {
++ const char *str;
++ int state;
++} ssbd_options[] = {
++ { "force-on", ARM64_SSBD_FORCE_ENABLE, },
++ { "force-off", ARM64_SSBD_FORCE_DISABLE, },
++ { "kernel", ARM64_SSBD_KERNEL, },
++};
++
++static int __init ssbd_cfg(char *buf)
++{
++ int i;
++
++ if (!buf || !buf[0])
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
++ int len = strlen(ssbd_options[i].str);
++
++ if (strncmp(buf, ssbd_options[i].str, len))
++ continue;
++
++ ssbd_state = ssbd_options[i].state;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++early_param("ssbd", ssbd_cfg);
++
+ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+@@ -278,44 +310,83 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
+ struct arm_smccc_res res;
+- bool supported = true;
++ bool required = true;
++ s32 val;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
++ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
++ }
+
+- /*
+- * The probe function return value is either negative
+- * (unsupported or mitigated), positive (unaffected), or zero
+- * (requires mitigation). We only need to do anything in the
+- * last case.
+- */
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+- if ((int)res.a0 != 0)
+- supported = false;
+ break;
+
+ default:
+- supported = false;
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
+ }
+
+- if (supported) {
+- __this_cpu_write(arm64_ssbd_callback_required, 1);
++ val = (s32)res.a0;
++
++ switch (val) {
++ case SMCCC_RET_NOT_SUPPORTED:
++ ssbd_state = ARM64_SSBD_UNKNOWN;
++ return false;
++
++ case SMCCC_RET_NOT_REQUIRED:
++ pr_info_once("%s mitigation not required\n", entry->desc);
++ ssbd_state = ARM64_SSBD_MITIGATED;
++ return false;
++
++ case SMCCC_RET_SUCCESS:
++ required = true;
++ break;
++
++ case 1: /* Mitigation not required on this CPU */
++ required = false;
++ break;
++
++ default:
++ WARN_ON(1);
++ return false;
++ }
++
++ switch (ssbd_state) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ pr_info_once("%s disabled from command-line\n", entry->desc);
++ arm64_set_ssbd_mitigation(false);
++ required = false;
++ break;
++
++ case ARM64_SSBD_KERNEL:
++ if (required) {
++ __this_cpu_write(arm64_ssbd_callback_required, 1);
++ arm64_set_ssbd_mitigation(true);
++ }
++ break;
++
++ case ARM64_SSBD_FORCE_ENABLE:
++ pr_info_once("%s forced from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(true);
++ required = true;
++ break;
++
++ default:
++ WARN_ON(1);
++ break;
+ }
+
+- return supported;
++ return required;
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
+--
+2.15.0
+
diff --git a/queue/arm64-Call-ARCH_WORKAROUND_2-on-transitions-between-.patch b/queue/arm64-Call-ARCH_WORKAROUND_2-on-transitions-between-.patch
new file mode 100644
index 0000000..1214779
--- /dev/null
+++ b/queue/arm64-Call-ARCH_WORKAROUND_2-on-transitions-between-.patch
@@ -0,0 +1,130 @@
+From 8e2906245f1e3b0d027169d9f2e55ce0548cb96e Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:06 +0100
+Subject: [PATCH] arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and
+ EL1
+
+commit 8e2906245f1e3b0d027169d9f2e55ce0548cb96e upstream.
+
+In order for the kernel to protect itself, let's call the SSBD mitigation
+implemented by the higher exception level (either hypervisor or firmware)
+on each transition between userspace and kernel.
+
+We must take the PSCI conduit into account in order to target the
+right exception level, hence the introduction of a runtime patching
+callback.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index a900befadfe8..c1eda6be7758 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -232,6 +232,30 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ }
+ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
++#ifdef CONFIG_ARM64_SSBD
++void __init arm64_update_smccc_conduit(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ u32 insn;
++
++ BUG_ON(nr_inst != 1);
++
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ insn = aarch64_insn_get_hvc_value();
++ break;
++ case PSCI_CONDUIT_SMC:
++ insn = aarch64_insn_get_smc_value();
++ break;
++ default:
++ return;
++ }
++
++ *updptr = cpu_to_le32(insn);
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+ .matches = is_affected_midr_range, \
+ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index ec2ee720e33e..f33e6aed3037 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -18,6 +18,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+
+@@ -137,6 +138,18 @@ alternative_else_nop_endif
+ add \dst, \dst, #(\sym - .entry.tramp.text)
+ .endm
+
++ // This macro corrupts x0-x3. It is the caller's duty
++ // to save/restore them if required.
++ .macro apply_ssbd, state
++#ifdef CONFIG_ARM64_SSBD
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ mov w1, #\state
++alternative_cb arm64_update_smccc_conduit
++ nop // Patched to SMC/HVC #0
++alternative_cb_end
++#endif
++ .endm
++
+ .macro kernel_entry, el, regsize = 64
+ .if \regsize == 32
+ mov w0, w0 // zero upper 32 bits of x0
+@@ -163,6 +176,13 @@ alternative_else_nop_endif
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
+
++ apply_ssbd 1
++
++#ifdef CONFIG_ARM64_SSBD
++ ldp x0, x1, [sp, #16 * 0]
++ ldp x2, x3, [sp, #16 * 1]
++#endif
++
+ mov x29, xzr // fp pointed to user-space
+ .else
+ add x21, sp, #S_FRAME_SIZE
+@@ -303,6 +323,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
++ apply_ssbd 0
++
+ .endif
+
+ msr elr_el1, x21 // set up the return data
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index c89da86de99f..ca1d2cc2cdfa 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -80,6 +80,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
++#define ARM_SMCCC_ARCH_WORKAROUND_2 \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0x7fff)
++
+ #ifndef __ASSEMBLY__
+
+ #include <linux/linkage.h>
+--
+2.15.0
+
diff --git a/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-discovery-through-AR.patch b/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-discovery-through-AR.patch
new file mode 100644
index 0000000..4c747d9
--- /dev/null
+++ b/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-discovery-through-AR.patch
@@ -0,0 +1,128 @@
+From 5d81f7dc9bca4f4963092433e27b508cbe524a32 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:18 +0100
+Subject: [PATCH] arm64: KVM: Add ARCH_WORKAROUND_2 discovery through
+ ARCH_FEATURES_FUNC_ID
+
+commit 5d81f7dc9bca4f4963092433e27b508cbe524a32 upstream.
+
+Now that all our infrastructure is in place, let's expose the
+availability of ARCH_WORKAROUND_2 to guests. We take this opportunity
+to tidy up a couple of SMCCC constants.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index c7c28c885a19..7001fb871429 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -315,6 +315,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
+ return false;
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ /* No way to detect it yet, pretend it is not there. */
++ return KVM_SSBD_UNKNOWN;
++}
++
+ static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
+ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
+
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 9bef3f69bdcd..95d8a0e15b5f 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -455,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
+
++#define KVM_SSBD_UNKNOWN -1
++#define KVM_SSBD_FORCE_DISABLE 0
++#define KVM_SSBD_KERNEL 1
++#define KVM_SSBD_FORCE_ENABLE 2
++#define KVM_SSBD_MITIGATED 3
++
++static inline int kvm_arm_have_ssbd(void)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ return KVM_SSBD_FORCE_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ return KVM_SSBD_KERNEL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return KVM_SSBD_FORCE_ENABLE;
++ case ARM64_SSBD_MITIGATED:
++ return KVM_SSBD_MITIGATED;
++ case ARM64_SSBD_UNKNOWN:
++ default:
++ return KVM_SSBD_UNKNOWN;
++ }
++}
++
+ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
+ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 3256b9228e75..a74311beda35 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ /* Reset PMU */
+ kvm_pmu_vcpu_reset(vcpu);
+
++ /* Default workaround setup is enabled (if supported) */
++ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
++ vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
++
+ /* Reset timer */
+ return kvm_timer_vcpu_reset(vcpu);
+ }
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index c4762bef13c6..c95ab4c5a475 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -405,7 +405,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ {
+ u32 func_id = smccc_get_function(vcpu);
+- u32 val = PSCI_RET_NOT_SUPPORTED;
++ u32 val = SMCCC_RET_NOT_SUPPORTED;
+ u32 feature;
+
+ switch (func_id) {
+@@ -417,7 +417,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+- val = 0;
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case ARM_SMCCC_ARCH_WORKAROUND_2:
++ switch (kvm_arm_have_ssbd()) {
++ case KVM_SSBD_FORCE_DISABLE:
++ case KVM_SSBD_UNKNOWN:
++ break;
++ case KVM_SSBD_KERNEL:
++ val = SMCCC_RET_SUCCESS;
++ break;
++ case KVM_SSBD_FORCE_ENABLE:
++ case KVM_SSBD_MITIGATED:
++ val = SMCCC_RET_NOT_REQUIRED;
++ break;
++ }
+ break;
+ }
+ break;
+--
+2.15.0
+
diff --git a/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-support-for-guests.patch b/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-support-for-guests.patch
new file mode 100644
index 0000000..310e4a9
--- /dev/null
+++ b/queue/arm64-KVM-Add-ARCH_WORKAROUND_2-support-for-guests.patch
@@ -0,0 +1,205 @@
+From 55e3748e8902ff641e334226bdcb432f9a5d78d3 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:16 +0100
+Subject: [PATCH] arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
+
+commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.
+
+In order to offer ARCH_WORKAROUND_2 support to guests, we need
+a bit of infrastructure.
+
+Let's add a flag indicating whether or not the guest uses
+SSBD mitigation. Depending on the state of this flag, allow
+KVM to disable ARCH_WORKAROUND_2 before entering the guest,
+and enable it when exiting it.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 707a1f06dc5d..b0c17d88ed40 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -319,6 +319,11 @@ static inline int kvm_map_vectors(void)
+ return 0;
+ }
+
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++
+ #define kvm_phys_to_vttbr(addr) (addr)
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index fefd8cf42c35..d4fbb1356c4c 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -33,6 +33,9 @@
+ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+ #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
++#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
++#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
++
+ /* Translate a kernel address of @sym into its equivalent linear mapping */
+ #define kvm_ksym_ref(sym) \
+ ({ \
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 469de8acd06f..9bef3f69bdcd 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
+ /* Exception Information */
+ struct kvm_vcpu_fault_info fault;
+
++ /* State of various workarounds, see kvm_asm.h for bit assignment */
++ u64 workaround_flags;
++
+ /* Guest debug state */
+ u64 debug_flags;
+
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index f74987b76d91..fbe4ddd9da09 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -456,6 +456,30 @@ static inline int kvm_map_vectors(void)
+ }
+ #endif
+
++#ifdef CONFIG_ARM64_SSBD
++DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
++
++static inline int hyp_map_aux_data(void)
++{
++ int cpu, err;
++
++ for_each_possible_cpu(cpu) {
++ u64 *ptr;
++
++ ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
++ err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
++ if (err)
++ return err;
++ }
++ return 0;
++}
++#else
++static inline int hyp_map_aux_data(void)
++{
++ return 0;
++}
++#endif
++
+ #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index d9645236e474..c50cedc447f1 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -15,6 +15,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
+@@ -389,6 +390,39 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ return false;
+ }
+
++static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
++{
++ if (!cpus_have_const_cap(ARM64_SSBD))
++ return false;
++
++ return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
++}
++
++static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * The host runs with the workaround always present. If the
++ * guest wants it disabled, so be it...
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
++#endif
++}
++
++static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_ARM64_SSBD
++ /*
++ * If the guest has disabled the workaround, bring it back on.
++ */
++ if (__needs_ssbd_off(vcpu) &&
++ __hyp_this_cpu_read(arm64_ssbd_callback_required))
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
++#endif
++}
++
+ /* Switch to the guest for VHE systems running in EL2 */
+ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ {
+@@ -409,6 +443,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ sysreg_restore_guest_state_vhe(guest_ctxt);
+ __debug_switch_to_guest(vcpu);
+
++ __set_guest_arch_workaround_state(vcpu);
++
+ do {
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -416,6 +452,8 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+ /* And we're baaack! */
+ } while (fixup_guest_exit(vcpu, &exit_code));
+
++ __set_host_arch_workaround_state(vcpu);
++
+ fp_enabled = fpsimd_enabled_vhe();
+
+ sysreg_save_guest_state_vhe(guest_ctxt);
+@@ -465,6 +503,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
+ __sysreg_restore_state_nvhe(guest_ctxt);
+ __debug_switch_to_guest(vcpu);
+
++ __set_guest_arch_workaround_state(vcpu);
++
+ do {
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+@@ -472,6 +512,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
+ /* And we're baaack! */
+ } while (fixup_guest_exit(vcpu, &exit_code));
+
++ __set_host_arch_workaround_state(vcpu);
++
+ fp_enabled = __fpsimd_enabled_nvhe();
+
+ __sysreg_save_state_nvhe(guest_ctxt);
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index a4c1b76240df..2d9b4795edb2 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -1490,6 +1490,10 @@ static int init_hyp_mode(void)
+ }
+ }
+
++ err = hyp_map_aux_data();
++ if (err)
++ kvm_err("Cannot map host auxilary data: %d\n", err);
++
+ return 0;
+
+ out_err:
+--
+2.15.0
+
diff --git a/queue/arm64-KVM-Add-HYP-per-cpu-accessors.patch b/queue/arm64-KVM-Add-HYP-per-cpu-accessors.patch
new file mode 100644
index 0000000..5856d43
--- /dev/null
+++ b/queue/arm64-KVM-Add-HYP-per-cpu-accessors.patch
@@ -0,0 +1,64 @@
+From 85478bab409171de501b719971fd25a3d5d639f9 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:15 +0100
+Subject: [PATCH] arm64: KVM: Add HYP per-cpu accessors
+
+commit 85478bab409171de501b719971fd25a3d5d639f9 upstream.
+
+As we're going to require to access per-cpu variables at EL2,
+let's craft the minimum set of accessors required to implement
+reading a per-cpu variable, relying on tpidr_el2 to contain the
+per-cpu offset.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index f6648a3e4152..fefd8cf42c35 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -71,14 +71,37 @@ extern u32 __kvm_get_mdcr_el2(void);
+
+ extern u32 __init_stage2_translation(void);
+
++/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
++#define __hyp_this_cpu_ptr(sym) \
++ ({ \
++ void *__ptr = hyp_symbol_addr(sym); \
++ __ptr += read_sysreg(tpidr_el2); \
++ (typeof(&sym))__ptr; \
++ })
++
++#define __hyp_this_cpu_read(sym) \
++ ({ \
++ *__hyp_this_cpu_ptr(sym); \
++ })
++
+ #else /* __ASSEMBLY__ */
+
+-.macro get_host_ctxt reg, tmp
+- adr_l \reg, kvm_host_cpu_state
++.macro hyp_adr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
+ mrs \tmp, tpidr_el2
+ add \reg, \reg, \tmp
+ .endm
+
++.macro hyp_ldr_this_cpu reg, sym, tmp
++ adr_l \reg, \sym
++ mrs \tmp, tpidr_el2
++ ldr \reg, [\reg, \tmp]
++.endm
++
++.macro get_host_ctxt reg, tmp
++ hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
++.endm
++
+ .macro get_vcpu_ptr vcpu, ctxt
+ get_host_ctxt \ctxt, \vcpu
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+--
+2.15.0
+
diff --git a/queue/arm64-KVM-Handle-guest-s-ARCH_WORKAROUND_2-requests.patch b/queue/arm64-KVM-Handle-guest-s-ARCH_WORKAROUND_2-requests.patch
new file mode 100644
index 0000000..05848a2
--- /dev/null
+++ b/queue/arm64-KVM-Handle-guest-s-ARCH_WORKAROUND_2-requests.patch
@@ -0,0 +1,85 @@
+From b4f18c063a13dfb33e3a63fe1844823e19c2265e Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:17 +0100
+Subject: [PATCH] arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
+
+commit b4f18c063a13dfb33e3a63fe1844823e19c2265e upstream.
+
+In order to forward the guest's ARCH_WORKAROUND_2 calls to EL3,
+add a small(-ish) sequence to handle it at EL2. Special care must
+be taken to track the state of the guest itself by updating the
+workaround flags. We also rely on patching to enable calls into
+the firmware.
+
+Note that since we need to execute branches, this always executes
+after the Spectre-v2 mitigation has been applied.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 5bdda651bd05..323aeb5f2fe6 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -136,6 +136,7 @@ int main(void)
+ #ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
++ DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index bffece27b5c1..05d836979032 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -106,8 +106,44 @@ el1_hvc_guest:
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++ cbz w1, wa_epilogue
++
++ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
++ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
++ ARM_SMCCC_ARCH_WORKAROUND_2)
+ cbnz w1, el1_trap
+- mov x0, x1
++
++#ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b wa2_end
++alternative_cb_end
++ get_vcpu_ptr x2, x0
++ ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ // Sanitize the argument and update the guest flags
++ ldr x1, [sp, #8] // Guest's x1
++ clz w1, w1 // Murphy's device:
++ lsr w1, w1, #5 // w1 = !!w1 without using
++ eor w1, w1, #1 // the flags...
++ bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
++ str x0, [x2, #VCPU_WORKAROUND_FLAGS]
++
++ /* Check that we actually need to perform the call */
++ hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
++ cbz x0, wa2_end
++
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
++ smc #0
++
++ /* Don't leak data from the SMC call */
++ mov x3, xzr
++wa2_end:
++ mov x2, xzr
++ mov x1, xzr
++#endif
++
++wa_epilogue:
++ mov x0, xzr
+ add sp, sp, #16
+ eret
+
+--
+2.15.0
+
diff --git a/queue/arm64-alternatives-Add-dynamic-patching-feature.patch b/queue/arm64-alternatives-Add-dynamic-patching-feature.patch
new file mode 100644
index 0000000..8abedea
--- /dev/null
+++ b/queue/arm64-alternatives-Add-dynamic-patching-feature.patch
@@ -0,0 +1,212 @@
+From dea5e2a4c5bcf196f879a66cebdcca07793e8ba4 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Sun, 3 Dec 2017 12:02:14 +0000
+Subject: [PATCH] arm64: alternatives: Add dynamic patching feature
+
+commit dea5e2a4c5bcf196f879a66cebdcca07793e8ba4 upstream.
+
+We've so far relied on a patching infrastructure that only gave us
+a single alternative, without any way to provide a range of potential
+replacement instructions. For a single feature, this is an all or
+nothing thing.
+
+It would be interesting to have a more flexible grained way of patching
+the kernel though, where we could dynamically tune the code that gets
+injected.
+
+In order to achive this, let's introduce a new form of dynamic patching,
+assiciating a callback to a patching site. This callback gets source and
+target locations of the patching request, as well as the number of
+instructions to be patched.
+
+Dynamic patching is declared with the new ALTERNATIVE_CB and alternative_cb
+directives:
+
+ asm volatile(ALTERNATIVE_CB("mov %0, #0\n", callback)
+ : "r" (v));
+or
+ alternative_cb callback
+ mov x0, #0
+ alternative_cb_end
+
+where callback is the C function computing the alternative.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+
+diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
+index 669028172fd6..a91933b1e2e6 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -5,6 +5,8 @@
+ #include <asm/cpucaps.h>
+ #include <asm/insn.h>
+
++#define ARM64_CB_PATCH ARM64_NCAPS
++
+ #ifndef __ASSEMBLY__
+
+ #include <linux/init.h>
+@@ -22,12 +24,19 @@ struct alt_instr {
+ u8 alt_len; /* size of new instruction(s), <= orig_len */
+ };
+
++typedef void (*alternative_cb_t)(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst);
++
+ void __init apply_alternatives_all(void);
+ void apply_alternatives(void *start, size_t length);
+
+-#define ALTINSTR_ENTRY(feature) \
++#define ALTINSTR_ENTRY(feature,cb) \
+ " .word 661b - .\n" /* label */ \
++ " .if " __stringify(cb) " == 0\n" \
+ " .word 663f - .\n" /* new instruction */ \
++ " .else\n" \
++ " .word " __stringify(cb) "- .\n" /* callback */ \
++ " .endif\n" \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+@@ -45,15 +54,18 @@ void apply_alternatives(void *start, size_t length);
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
++ *
++ * Alternatives with callbacks do not generate replacement instructions.
+ */
+-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
++#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+- ALTINSTR_ENTRY(feature) \
++ ALTINSTR_ENTRY(feature,cb) \
+ ".popsection\n" \
++ " .if " __stringify(cb) " == 0\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" \
+ newinstr "\n" \
+@@ -61,11 +73,17 @@ void apply_alternatives(void *start, size_t length);
+ ".popsection\n\t" \
+ ".org . - (664b-663b) + (662b-661b)\n\t" \
+ ".org . - (662b-661b) + (664b-663b)\n" \
++ ".else\n\t" \
++ "663:\n\t" \
++ "664:\n\t" \
++ ".endif\n" \
+ ".endif\n"
+
+ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
+- __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
++ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+
++#define ALTERNATIVE_CB(oldinstr, cb) \
++ __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+ #else
+
+ #include <asm/assembler.h>
+@@ -132,6 +150,14 @@ void apply_alternatives(void *start, size_t length);
+ 661:
+ .endm
+
++.macro alternative_cb cb
++ .set .Lasm_alt_mode, 0
++ .pushsection .altinstructions, "a"
++ altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
++ .popsection
++661:
++.endm
++
+ /*
+ * Provide the other half of the alternative code sequence.
+ */
+@@ -157,6 +183,13 @@ void apply_alternatives(void *start, size_t length);
+ .org . - (662b-661b) + (664b-663b)
+ .endm
+
++/*
++ * Callback-based alternative epilogue
++ */
++.macro alternative_cb_end
++662:
++.endm
++
+ /*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
+index 414288a558c8..5c4bce4ac381 100644
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -107,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
+ return insn;
+ }
+
++static void patch_alternative(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++ __le32 *replptr;
++ int i;
++
++ replptr = ALT_REPL_PTR(alt);
++ for (i = 0; i < nr_inst; i++) {
++ u32 insn;
++
++ insn = get_alt_insn(alt, origptr + i, replptr + i);
++ updptr[i] = cpu_to_le32(insn);
++ }
++}
++
+ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+ {
+ struct alt_instr *alt;
+ struct alt_region *region = alt_region;
+- __le32 *origptr, *replptr, *updptr;
++ __le32 *origptr, *updptr;
++ alternative_cb_t alt_cb;
+
+ for (alt = region->begin; alt < region->end; alt++) {
+- u32 insn;
+- int i, nr_inst;
++ int nr_inst;
+
+- if (!cpus_have_cap(alt->cpufeature))
++ /* Use ARM64_CB_PATCH as an unconditional patch */
++ if (alt->cpufeature < ARM64_CB_PATCH &&
++ !cpus_have_cap(alt->cpufeature))
+ continue;
+
+- BUG_ON(alt->alt_len != alt->orig_len);
++ if (alt->cpufeature == ARM64_CB_PATCH)
++ BUG_ON(alt->alt_len != 0);
++ else
++ BUG_ON(alt->alt_len != alt->orig_len);
+
+ pr_info_once("patching kernel code\n");
+
+ origptr = ALT_ORIG_PTR(alt);
+- replptr = ALT_REPL_PTR(alt);
+ updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+- nr_inst = alt->alt_len / sizeof(insn);
++ nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
+
+- for (i = 0; i < nr_inst; i++) {
+- insn = get_alt_insn(alt, origptr + i, replptr + i);
+- updptr[i] = cpu_to_le32(insn);
+- }
++ if (alt->cpufeature < ARM64_CB_PATCH)
++ alt_cb = patch_alternative;
++ else
++ alt_cb = ALT_REPL_PTR(alt);
++
++ alt_cb(alt, origptr, updptr, nr_inst);
+
+ flush_icache_range((uintptr_t)origptr,
+ (uintptr_t)(origptr + nr_inst));
+--
+2.15.0
+
diff --git a/queue/arm64-alternatives-use-tpidr_el2-on-VHE-hosts.patch b/queue/arm64-alternatives-use-tpidr_el2-on-VHE-hosts.patch
new file mode 100644
index 0000000..188fa7c
--- /dev/null
+++ b/queue/arm64-alternatives-use-tpidr_el2-on-VHE-hosts.patch
@@ -0,0 +1,196 @@
+From 6d99b68933fbcf51f84fcbba49246ce1209ec193 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Mon, 8 Jan 2018 15:38:06 +0000
+Subject: [PATCH] arm64: alternatives: use tpidr_el2 on VHE hosts
+
+commit 6d99b68933fbcf51f84fcbba49246ce1209ec193 upstream.
+
+Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
+tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
+on VHE hosts, and allows future code to blindly access per-cpu variables
+without triggering world-switch.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
+index 4a85c6952a22..669028172fd6 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -12,6 +12,8 @@
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
+
++extern int alternatives_applied;
++
+ struct alt_instr {
+ s32 orig_offset; /* offset to original instruction */
+ s32 alt_offset; /* offset to replacement instruction */
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index a6f90b648655..5dc4856f3bb9 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -254,7 +254,11 @@ lr .req x30 // link register
+ #else
+ adr_l \dst, \sym
+ #endif
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs \tmp, tpidr_el1
++alternative_else
++ mrs \tmp, tpidr_el2
++alternative_endif
+ add \dst, \dst, \tmp
+ .endm
+
+@@ -265,7 +269,11 @@ lr .req x30 // link register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs \tmp, tpidr_el1
++alternative_else
++ mrs \tmp, tpidr_el2
++alternative_endif
+ ldr \dst, [\dst, \tmp]
+ .endm
+
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 3bd498e4de4c..43393208229e 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -16,11 +16,15 @@
+ #ifndef __ASM_PERCPU_H
+ #define __ASM_PERCPU_H
+
++#include <asm/alternative.h>
+ #include <asm/stack_pointer.h>
+
+ static inline void set_my_cpu_offset(unsigned long off)
+ {
+- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
++ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
++ "msr tpidr_el2, %0",
++ ARM64_HAS_VIRT_HOST_EXTN)
++ :: "r" (off) : "memory");
+ }
+
+ static inline unsigned long __my_cpu_offset(void)
+@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+- asm("mrs %0, tpidr_el1" : "=r" (off) :
++ asm(ALTERNATIVE("mrs %0, tpidr_el1",
++ "mrs %0, tpidr_el2",
++ ARM64_HAS_VIRT_HOST_EXTN)
++ : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
+
+ return off;
+diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
+index 6dd0a3a3e5c9..414288a558c8 100644
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -32,6 +32,8 @@
+ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+ #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
++int alternatives_applied;
++
+ struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+@@ -143,7 +145,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+ */
+ static int __apply_alternatives_multi_stop(void *unused)
+ {
+- static int patched = 0;
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+@@ -151,14 +152,14 @@ static int __apply_alternatives_multi_stop(void *unused)
+
+ /* We always have a CPU 0 at this point (__init) */
+ if (smp_processor_id()) {
+- while (!READ_ONCE(patched))
++ while (!READ_ONCE(alternatives_applied))
+ cpu_relax();
+ isb();
+ } else {
+- BUG_ON(patched);
++ BUG_ON(alternatives_applied);
+ __apply_alternatives(&region, true);
+ /* Barriers provided by the cache flushing */
+- WRITE_ONCE(patched, 1);
++ WRITE_ONCE(alternatives_applied, 1);
+ }
+
+ return 0;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index da6722db50b0..9ef84d0def9a 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -886,6 +886,22 @@ static int __init parse_kpti(char *str)
+ __setup("kpti=", parse_kpti);
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
++static int cpu_copy_el2regs(void *__unused)
++{
++ /*
++ * Copy register values that aren't redirected by hardware.
++ *
++ * Before code patching, we only set tpidr_el1, all CPUs need to copy
++ * this value to tpidr_el2 before we patch the code. Once we've done
++ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
++ * do anything here.
++ */
++ if (!alternatives_applied)
++ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
++
++ return 0;
++}
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+@@ -955,6 +971,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ .capability = ARM64_HAS_VIRT_HOST_EXTN,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = runs_at_el2,
++ .enable = cpu_copy_el2regs,
+ },
+ {
+ .desc = "32-bit EL0 Support",
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index bc86f7ef8620..5a59eea49395 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
+ mrs x8, mdscr_el1
+ mrs x9, oslsr_el1
+ mrs x10, sctlr_el1
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs x11, tpidr_el1
++alternative_else
++ mrs x11, tpidr_el2
++alternative_endif
+ mrs x12, sp_el0
+ stp x2, x3, [x0]
+ stp x4, xzr, [x0, #16]
+@@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
+ msr mdscr_el1, x10
+
+ msr sctlr_el1, x12
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, x13
++alternative_else
++ msr tpidr_el2, x13
++alternative_endif
+ msr sp_el0, x14
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+--
+2.15.0
+
diff --git a/queue/arm64-ssbd-Add-global-mitigation-state-accessor.patch b/queue/arm64-ssbd-Add-global-mitigation-state-accessor.patch
new file mode 100644
index 0000000..eb9553f
--- /dev/null
+++ b/queue/arm64-ssbd-Add-global-mitigation-state-accessor.patch
@@ -0,0 +1,43 @@
+From c32e1736ca03904c03de0e4459a673be194f56fd Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:10 +0100
+Subject: [PATCH] arm64: ssbd: Add global mitigation state accessor
+
+commit c32e1736ca03904c03de0e4459a673be194f56fd upstream.
+
+We're about to need the mitigation state in various parts of the
+kernel in order to do the right thing for userspace and guests.
+
+Let's expose an accessor that will let other subsystems know
+about the state.
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index b50650f3e496..b0fc3224ce8a 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -543,6 +543,16 @@ static inline u64 read_zcr_features(void)
+ #define ARM64_SSBD_FORCE_ENABLE 2
+ #define ARM64_SSBD_MITIGATED 3
+
++static inline int arm64_get_ssbd_state(void)
++{
++#ifdef CONFIG_ARM64_SSBD
++ extern int ssbd_state;
++ return ssbd_state;
++#else
++ return ARM64_SSBD_UNKNOWN;
++#endif
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--
+2.15.0
+
diff --git a/queue/arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch b/queue/arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch
new file mode 100644
index 0000000..fbf6d39
--- /dev/null
+++ b/queue/arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch
@@ -0,0 +1,147 @@
+From 9cdc0108baa8ef87c76ed834619886a46bd70cbe Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:14 +0100
+Subject: [PATCH] arm64: ssbd: Add prctl interface for per-thread mitigation
+
+commit 9cdc0108baa8ef87c76ed834619886a46bd70cbe upstream.
+
+If running on a system that performs dynamic SSBD mitigation, allow
+userspace to request the mitigation for itself. This is implemented
+as a prctl call, allowing the mitigation to be enabled or disabled at
+will for this particular thread.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
+index bf825f38d206..0025f8691046 100644
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
+ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+ arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
++arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+
+ obj-y += $(arm64-obj-y) vdso/ probes/
+ obj-m += $(arm64-obj-m)
+diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
+new file mode 100644
+index 000000000000..3432e5ef9f41
+--- /dev/null
++++ b/arch/arm64/kernel/ssbd.c
+@@ -0,0 +1,110 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/thread_info.h>
++
++#include <asm/cpufeature.h>
++
++/*
++ * prctl interface for SSBD
++ * FIXME: Drop the below ifdefery once merged in 4.18.
++ */
++#ifdef PR_SPEC_STORE_BYPASS
++static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++ int state = arm64_get_ssbd_state();
++
++ /* Unsupported */
++ if (state == ARM64_SSBD_UNKNOWN)
++ return -EINVAL;
++
++ /* Treat the unaffected/mitigated state separately */
++ if (state == ARM64_SSBD_MITIGATED) {
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ return -EPERM;
++ case PR_SPEC_DISABLE:
++ case PR_SPEC_FORCE_DISABLE:
++ return 0;
++ }
++ }
++
++ /*
++ * Things are a bit backward here: the arm64 internal API
++ * *enables the mitigation* when the userspace API *disables
++ * speculation*. So much fun.
++ */
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (state == ARM64_SSBD_FORCE_ENABLE ||
++ task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ clear_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ if (state == ARM64_SSBD_FORCE_DISABLE)
++ return -EPERM;
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ set_tsk_thread_flag(task, TIF_SSBD);
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ return 0;
++}
++
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++static int ssbd_prctl_get(struct task_struct *task)
++{
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_UNKNOWN:
++ return -EINVAL;
++ case ARM64_SSBD_FORCE_ENABLE:
++ return PR_SPEC_DISABLE;
++ case ARM64_SSBD_KERNEL:
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ case ARM64_SSBD_FORCE_DISABLE:
++ return PR_SPEC_ENABLE;
++ default:
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssbd_prctl_get(task);
++ default:
++ return -ENODEV;
++ }
++}
++#endif /* PR_SPEC_STORE_BYPASS */
+--
+2.15.0
+
diff --git a/queue/arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch b/queue/arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch
new file mode 100644
index 0000000..f71f8b6
--- /dev/null
+++ b/queue/arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch
@@ -0,0 +1,50 @@
+From 9dd9614f5476687abbff8d4b12cd08ae70d7c2ad Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:13 +0100
+Subject: [PATCH] arm64: ssbd: Introduce thread flag to control userspace
+ mitigation
+
+commit 9dd9614f5476687abbff8d4b12cd08ae70d7c2ad upstream.
+
+In order to allow userspace to be mitigated on demand, let's
+introduce a new thread flag that prevents the mitigation from
+being turned off when exiting to userspace, and doesn't turn
+it on on entry into the kernel (with the assumption that the
+mitigation is always enabled in the kernel itself).
+
+This will be used by a prctl interface introduced in a later
+patch.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
+index 740aa03c5f0d..cbcf11b5e637 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk);
+ #define TIF_32BIT 22 /* 32bit process */
+ #define TIF_SVE 23 /* Scalable Vector Extension in use */
+ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
++#define TIF_SSBD 25 /* Wants SSB mitigation */
+
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index e6f6e2339b22..28ad8799406f 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -147,6 +147,8 @@ alternative_cb arm64_enable_wa2_handling
+ alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
++ ldr \tmp2, [tsk, #TSK_TI_FLAGS]
++ tbnz \tmp2, #TIF_SSBD, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+ alternative_cb arm64_update_smccc_conduit
+--
+2.15.0
+
diff --git a/queue/arm64-ssbd-Restore-mitigation-status-on-CPU-resume.patch b/queue/arm64-ssbd-Restore-mitigation-status-on-CPU-resume.patch
new file mode 100644
index 0000000..3905146
--- /dev/null
+++ b/queue/arm64-ssbd-Restore-mitigation-status-on-CPU-resume.patch
@@ -0,0 +1,97 @@
+From 647d0519b53f440a55df163de21c52a8205431cc Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:12 +0100
+Subject: [PATCH] arm64: ssbd: Restore mitigation status on CPU resume
+
+commit 647d0519b53f440a55df163de21c52a8205431cc upstream.
+
+On a system where firmware can dynamically change the state of the
+mitigation, the CPU will always come up with the mitigation enabled,
+including when coming back from suspend.
+
+If the user has requested "no mitigation" via a command line option,
+let's enforce it by calling into the firmware again to disable it.
+
+Similarily, for a resume from hibernate, the mitigation could have
+been disabled by the boot kernel. Let's ensure that it is set
+back on in that case.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index b0fc3224ce8a..55bc1f073bfb 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -553,6 +553,12 @@ static inline int arm64_get_ssbd_state(void)
+ #endif
+ }
+
++#ifdef CONFIG_ARM64_SSBD
++void arm64_set_ssbd_mitigation(bool state);
++#else
++static inline void arm64_set_ssbd_mitigation(bool state) {}
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 2797bc2c8c6a..cf37ca6fa5f2 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -303,7 +303,7 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+ }
+
+-static void arm64_set_ssbd_mitigation(bool state)
++void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index 1ec5f28c39fc..6b2686d54411 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -313,6 +313,17 @@ int swsusp_arch_suspend(void)
+
+ sleep_cpu = -EINVAL;
+ __cpu_suspend_exit();
++
++ /*
++ * Just in case the boot kernel did turn the SSBD
++ * mitigation off behind our back, let's set the state
++ * to what we expect it to be.
++ */
++ switch (arm64_get_ssbd_state()) {
++ case ARM64_SSBD_FORCE_ENABLE:
++ case ARM64_SSBD_KERNEL:
++ arm64_set_ssbd_mitigation(true);
++ }
+ }
+
+ local_daif_restore(flags);
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index a307b9e13392..70c283368b64 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void)
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(cpu);
++
++ /*
++ * On resume, firmware implementing dynamic mitigation will
++ * have turned the mitigation on. If the user has forcefully
++ * disabled it, make sure their wishes are obeyed.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++ arm64_set_ssbd_mitigation(false);
+ }
+
+ /*
+--
+2.15.0
+
diff --git a/queue/arm64-ssbd-Skip-apply_ssbd-if-not-using-dynamic-miti.patch b/queue/arm64-ssbd-Skip-apply_ssbd-if-not-using-dynamic-miti.patch
new file mode 100644
index 0000000..a3e19a2
--- /dev/null
+++ b/queue/arm64-ssbd-Skip-apply_ssbd-if-not-using-dynamic-miti.patch
@@ -0,0 +1,62 @@
+From 986372c4367f46b34a3c0f6918d7fb95cbdf39d6 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 29 May 2018 13:11:11 +0100
+Subject: [PATCH] arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
+
+commit 986372c4367f46b34a3c0f6918d7fb95cbdf39d6 upstream.
+
+In order to avoid checking arm64_ssbd_callback_required on each
+kernel entry/exit even if no mitigation is required, let's
+add yet another alternative that by default jumps over the mitigation,
+and that gets nop'ed out if we're doing dynamic mitigation.
+
+Think of it as a poor man's static key...
+
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 1075f90fdd8c..2797bc2c8c6a 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -289,6 +289,20 @@ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ *updptr = cpu_to_le32(insn);
+ }
+
++void __init arm64_enable_wa2_handling(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr,
++ int nr_inst)
++{
++ BUG_ON(nr_inst != 1);
++ /*
++ * Only allow mitigation on EL1 entry/exit and guest
++ * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
++ * be flipped.
++ */
++ if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
++ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
++}
++
+ static void arm64_set_ssbd_mitigation(bool state)
+ {
+ switch (psci_ops.conduit) {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 29ad672a6abd..e6f6e2339b22 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -142,6 +142,9 @@ alternative_else_nop_endif
+ // to save/restore them if required.
+ .macro apply_ssbd, state, targ, tmp1, tmp2
+ #ifdef CONFIG_ARM64_SSBD
++alternative_cb arm64_enable_wa2_handling
++ b \targ
++alternative_cb_end
+ ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
+ cbz \tmp2, \targ
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+--
+2.15.0
+
diff --git a/queue/series b/queue/series
new file mode 100644
index 0000000..df825c6
--- /dev/null
+++ b/queue/series
@@ -0,0 +1,22 @@
+KVM-arm64-Store-vcpu-on-the-stack-during-__guest_ent.patch
+KVM-arm-arm64-Convert-kvm_host_cpu_state-to-a-static.patch
+KVM-arm64-Change-hyp_panic-s-dependency-on-tpidr_el2.patch
+arm64-alternatives-use-tpidr_el2-on-VHE-hosts.patch
+KVM-arm64-Stop-save-restoring-host-tpidr_el1-on-VHE.patch
+arm64-alternatives-Add-dynamic-patching-feature.patch
+KVM-arm-arm64-Do-not-use-kern_hyp_va-with-kvm_vgic_g.patch
+KVM-arm64-Avoid-storing-the-vcpu-pointer-on-the-stac.patch
+arm-arm64-smccc-Add-SMCCC-specific-return-codes.patch
+arm64-Call-ARCH_WORKAROUND_2-on-transitions-between-.patch
+arm64-Add-per-cpu-infrastructure-to-call-ARCH_WORKAR.patch
+arm64-Add-ARCH_WORKAROUND_2-probing.patch
+arm64-Add-ssbd-command-line-option.patch
+arm64-ssbd-Add-global-mitigation-state-accessor.patch
+arm64-ssbd-Skip-apply_ssbd-if-not-using-dynamic-miti.patch
+arm64-ssbd-Restore-mitigation-status-on-CPU-resume.patch
+arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch
+arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch
+arm64-KVM-Add-HYP-per-cpu-accessors.patch
+arm64-KVM-Add-ARCH_WORKAROUND_2-support-for-guests.patch
+arm64-KVM-Handle-guest-s-ARCH_WORKAROUND_2-requests.patch
+arm64-KVM-Add-ARCH_WORKAROUND_2-discovery-through-AR.patch