summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2019-09-18 08:41:09 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2019-09-18 08:41:09 -0400
commit9ba5087776d9b1ea61b0775518931a6c1e29b397 (patch)
tree98705920e047a87ce848f71debe70df4ad998a75
parentbc9ee7ee61a323ab3193a498fb03f5257da70408 (diff)
downloadlongterm-queue-4.18-9ba5087776d9b1ea61b0775518931a6c1e29b397.tar.gz
x86: add swapgs commits
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--queue/Documentation-Add-swapgs-description-to-the-Spectre-.patch166
-rw-r--r--queue/series7
-rw-r--r--queue/x86-cpufeatures-Carve-out-CQM-features-retrieval.patch105
-rw-r--r--queue/x86-cpufeatures-Combine-word-11-and-12-into-a-new-sc.patch201
-rw-r--r--queue/x86-entry-64-Use-JMP-instead-of-JMPQ.patch35
-rw-r--r--queue/x86-speculation-Enable-Spectre-v1-swapgs-mitigations.patch263
-rw-r--r--queue/x86-speculation-Prepare-entry-code-for-Spectre-v1-sw.patch194
-rw-r--r--queue/x86-speculation-swapgs-Exclude-ATOMs-from-speculatio.patch155
8 files changed, 1126 insertions, 0 deletions
diff --git a/queue/Documentation-Add-swapgs-description-to-the-Spectre-.patch b/queue/Documentation-Add-swapgs-description-to-the-Spectre-.patch
new file mode 100644
index 0000000..b116e27
--- /dev/null
+++ b/queue/Documentation-Add-swapgs-description-to-the-Spectre-.patch
@@ -0,0 +1,166 @@
+From 4c92057661a3412f547ede95715641d7ee16ddac Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Sat, 3 Aug 2019 21:21:54 +0200
+Subject: [PATCH] Documentation: Add swapgs description to the Spectre v1
+ documentation
+
+commit 4c92057661a3412f547ede95715641d7ee16ddac upstream.
+
+Add documentation to the Spectre document about the new swapgs variant of
+Spectre v1.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 25f3b2532198..e05e581af5cf 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -41,10 +41,11 @@ Related CVEs
+
+ The following CVE entries describe Spectre variants:
+
+- ============= ======================= =================
++ ============= ======================= ==========================
+ CVE-2017-5753 Bounds check bypass Spectre variant 1
+ CVE-2017-5715 Branch target injection Spectre variant 2
+- ============= ======================= =================
++ CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
++ ============= ======================= ==========================
+
+ Problem
+ -------
+@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
+ over the network, see :ref:`[12] <spec_ref12>`. However such attacks
+ are difficult, low bandwidth, fragile, and are considered low risk.
+
++Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
++only about user-controlled array bounds checks. It can affect any
++conditional checks. The kernel entry code interrupt, exception, and NMI
++handlers all have conditional swapgs checks. Those may be problematic
++in the context of Spectre v1, as kernel code can speculatively run with
++a user GS.
++
+ Spectre variant 2 (Branch Target Injection)
+ -------------------------------------------
+
+@@ -132,6 +140,9 @@ not cover all possible attack vectors.
+ 1. A user process attacking the kernel
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
+ The attacker passes a parameter to the kernel via a register or
+ via a known address in memory during a syscall. Such parameter may
+ be used later by the kernel as an index to an array or to derive
+@@ -144,7 +155,40 @@ not cover all possible attack vectors.
+ potentially be influenced for Spectre attacks, new "nospec" accessor
+ macros are used to prevent speculative loading of data.
+
+- Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
++Spectre variant 1 (swapgs)
++~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++ An attacker can train the branch predictor to speculatively skip the
++ swapgs path for an interrupt or exception. If they initialize
++ the GS register to a user-space value, if the swapgs is speculatively
++ skipped, subsequent GS-related percpu accesses in the speculation
++ window will be done with the attacker-controlled GS value. This
++ could cause privileged memory to be accessed and leaked.
++
++ For example:
++
++ ::
++
++ if (coming from user space)
++ swapgs
++ mov %gs:<percpu_offset>, %reg
++ mov (%reg), %reg1
++
++ When coming from user space, the CPU can speculatively skip the
++ swapgs, and then do a speculative percpu load using the user GS
++ value. So the user can speculatively force a read of any kernel
++ value. If a gadget exists which uses the percpu value as an address
++ in another load/store, then the contents of the kernel value may
++ become visible via an L1 side channel attack.
++
++ A similar attack exists when coming from kernel space. The CPU can
++ speculatively do the swapgs, causing the user GS to get used for the
++ rest of the speculative window.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
++ A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
+ target buffer (BTB) before issuing syscall to launch an attack.
+ After entering the kernel, the kernel could use the poisoned branch
+ target buffer on indirect jump and jump to gadget code in speculative
+@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
+
+ The possible values in this file are:
+
+- ======================================= =================================
+- 'Mitigation: __user pointer sanitation' Protection in kernel on a case by
+- case base with explicit pointer
+- sanitation.
+- ======================================= =================================
++ .. list-table::
++
++ * - 'Not affected'
++ - The processor is not vulnerable.
++ * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
++ - The swapgs protections are disabled; otherwise it has
++ protection in the kernel on a case by case base with explicit
++ pointer sanitation and usercopy LFENCE barriers.
++ * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
++ - Protection in the kernel on a case by case base with explicit
++ pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
++ barriers.
+
+ However, the protections are put in place on a case by case basis,
+ and there is no guarantee that all possible attack vectors for Spectre
+@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
+ 1. Kernel mitigation
+ ^^^^^^^^^^^^^^^^^^^^
+
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
+ For the Spectre variant 1, vulnerable kernel code (as determined
+ by code audit or scanning tools) is annotated on a case by case
+ basis to use nospec accessor macros for bounds clipping :ref:`[2]
+ <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
+ not cover all attack vectors for Spectre variant 1.
+
++ Copy-from-user code has an LFENCE barrier to prevent the access_ok()
++ check from being mis-speculated. The barrier is done by the
++ barrier_nospec() macro.
++
++ For the swapgs variant of Spectre variant 1, LFENCE barriers are
++ added to interrupt, exception and NMI entry where needed. These
++ barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
++ FENCE_SWAPGS_USER_ENTRY macros.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
+ For Spectre variant 2 mitigation, the compiler turns indirect calls or
+ jumps in the kernel into equivalent return trampolines (retpolines)
+ :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
+@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
+ Spectre variant 2 mitigation can be disabled or force enabled at the
+ kernel command line.
+
++ nospectre_v1
++
++ [X86,PPC] Disable mitigations for Spectre Variant 1
++ (bounds check bypass). With this option data leaks are
++ possible in the system.
++
+ nospectre_v2
+
+ [X86] Disable all mitigations for the Spectre variant 2
+--
+2.7.4
+
diff --git a/queue/series b/queue/series
new file mode 100644
index 0000000..f0429e3
--- /dev/null
+++ b/queue/series
@@ -0,0 +1,7 @@
+x86-cpufeatures-Carve-out-CQM-features-retrieval.patch
+x86-cpufeatures-Combine-word-11-and-12-into-a-new-sc.patch
+x86-speculation-Prepare-entry-code-for-Spectre-v1-sw.patch
+x86-speculation-Enable-Spectre-v1-swapgs-mitigations.patch
+x86-entry-64-Use-JMP-instead-of-JMPQ.patch
+x86-speculation-swapgs-Exclude-ATOMs-from-speculatio.patch
+Documentation-Add-swapgs-description-to-the-Spectre-.patch
diff --git a/queue/x86-cpufeatures-Carve-out-CQM-features-retrieval.patch b/queue/x86-cpufeatures-Carve-out-CQM-features-retrieval.patch
new file mode 100644
index 0000000..7c3869e
--- /dev/null
+++ b/queue/x86-cpufeatures-Carve-out-CQM-features-retrieval.patch
@@ -0,0 +1,105 @@
+From 45fc56e629caa451467e7664fbd4c797c434a6c4 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 19 Jun 2019 17:24:34 +0200
+Subject: [PATCH] x86/cpufeatures: Carve out CQM features retrieval
+
+commit 45fc56e629caa451467e7664fbd4c797c434a6c4 upstream.
+
+... into a separate function for better readability. Split out from a
+patch from Fenghua Yu <fenghua.yu@intel.com> to keep the mechanical,
+sole code movement separate for easy review.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: x86@kernel.org
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 2c57fffebf9b..fe6ed9696467 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -801,6 +801,38 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void init_cqm(struct cpuinfo_x86 *c)
++{
++ u32 eax, ebx, ecx, edx;
++
++ /* Additional Intel-defined flags: level 0x0000000F */
++ if (c->cpuid_level >= 0x0000000F) {
++
++ /* QoS sub-leaf, EAX=0Fh, ECX=0 */
++ cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
++ c->x86_capability[CPUID_F_0_EDX] = edx;
++
++ if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
++ /* will be overridden if occupancy monitoring exists */
++ c->x86_cache_max_rmid = ebx;
++
++ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
++ cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
++ c->x86_capability[CPUID_F_1_EDX] = edx;
++
++ if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
++ ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
++ (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
++ c->x86_cache_max_rmid = ecx;
++ c->x86_cache_occ_scale = ebx;
++ }
++ } else {
++ c->x86_cache_max_rmid = -1;
++ c->x86_cache_occ_scale = -1;
++ }
++ }
++}
++
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+ {
+ u32 eax, ebx, ecx, edx;
+@@ -832,33 +864,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ c->x86_capability[CPUID_D_1_EAX] = eax;
+ }
+
+- /* Additional Intel-defined flags: level 0x0000000F */
+- if (c->cpuid_level >= 0x0000000F) {
+-
+- /* QoS sub-leaf, EAX=0Fh, ECX=0 */
+- cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_0_EDX] = edx;
+-
+- if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
+- /* will be overridden if occupancy monitoring exists */
+- c->x86_cache_max_rmid = ebx;
+-
+- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+- cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_1_EDX] = edx;
+-
+- if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
+- ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
+- (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
+- c->x86_cache_max_rmid = ecx;
+- c->x86_cache_occ_scale = ebx;
+- }
+- } else {
+- c->x86_cache_max_rmid = -1;
+- c->x86_cache_occ_scale = -1;
+- }
+- }
+-
+ /* AMD-defined flags: level 0x80000001 */
+ eax = cpuid_eax(0x80000000);
+ c->extended_cpuid_level = eax;
+@@ -889,6 +894,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+
+ init_scattered_cpuid_features(c);
+ init_speculation_control(c);
++ init_cqm(c);
+
+ /*
+ * Clear/Set all flags overridden by options, after probe.
+--
+2.7.4
+
diff --git a/queue/x86-cpufeatures-Combine-word-11-and-12-into-a-new-sc.patch b/queue/x86-cpufeatures-Combine-word-11-and-12-into-a-new-sc.patch
new file mode 100644
index 0000000..af99443
--- /dev/null
+++ b/queue/x86-cpufeatures-Combine-word-11-and-12-into-a-new-sc.patch
@@ -0,0 +1,201 @@
+From acec0ce081de0c36459eea91647faf99296445a3 Mon Sep 17 00:00:00 2001
+From: Fenghua Yu <fenghua.yu@intel.com>
+Date: Wed, 19 Jun 2019 18:51:09 +0200
+Subject: [PATCH] x86/cpufeatures: Combine word 11 and 12 into a new scattered
+ features word
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit acec0ce081de0c36459eea91647faf99296445a3 upstream.
+
+It's a waste for the four X86_FEATURE_CQM_* feature bits to occupy two
+whole feature bits words. To better utilize feature words, re-define
+word 11 to host scattered features and move the four X86_FEATURE_CQM_*
+features into Linux defined word 11. More scattered features can be
+added in word 11 in the future.
+
+Rename leaf 11 in cpuid_leafs to CPUID_LNX_4 to reflect it's a
+Linux-defined leaf.
+
+Rename leaf 12 as CPUID_DUMMY which will be replaced by a meaningful
+name in the next patch when CPUID.7.1:EAX occupies world 12.
+
+Maximum number of RMID and cache occupancy scale are retrieved from
+CPUID.0xf.1 after scattered CQM features are enumerated. Carve out the
+code into a separate function.
+
+KVM doesn't support resctrl now. So it's safe to move the
+X86_FEATURE_CQM_* features to scattered features word 11 for KVM.
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Aaron Lewis <aaronlewis@google.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Babu Moger <babu.moger@amd.com>
+Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
+Cc: "Sean J Christopherson" <sean.j.christopherson@intel.com>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: kvm ML <kvm@vger.kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Nadav Amit <namit@vmware.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Pavel Tatashin <pasha.tatashin@oracle.com>
+Cc: Peter Feiner <pfeiner@google.com>
+Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Cc: Ravi V Shankar <ravi.v.shankar@intel.com>
+Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
+Cc: x86 <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1560794416-217638-2-git-send-email-fenghua.yu@intel.com
+
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 1d337c51f7e6..403f70c2e431 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -22,8 +22,8 @@ enum cpuid_leafs
+ CPUID_LNX_3,
+ CPUID_7_0_EBX,
+ CPUID_D_1_EAX,
+- CPUID_F_0_EDX,
+- CPUID_F_1_EDX,
++ CPUID_LNX_4,
++ CPUID_DUMMY,
+ CPUID_8000_0008_EBX,
+ CPUID_6_EAX,
+ CPUID_8000_000A_EDX,
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 1017b9c7dfe0..be858b86023a 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -271,13 +271,16 @@
+ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
+
+-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+-
+-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
+-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
+-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
++/*
++ * Extended auxiliary flags: Linux defined - for features scattered in various
++ * CPUID levels like 0xf, etc.
++ *
++ * Reuse free bits when adding new feature flags!
++ */
++#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
++#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
++#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
++#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
+
+ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index fe6ed9696467..efb114298cfb 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -803,33 +803,25 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+
+ static void init_cqm(struct cpuinfo_x86 *c)
+ {
+- u32 eax, ebx, ecx, edx;
+-
+- /* Additional Intel-defined flags: level 0x0000000F */
+- if (c->cpuid_level >= 0x0000000F) {
++ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
++ c->x86_cache_max_rmid = -1;
++ c->x86_cache_occ_scale = -1;
++ return;
++ }
+
+- /* QoS sub-leaf, EAX=0Fh, ECX=0 */
+- cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_0_EDX] = edx;
++ /* will be overridden if occupancy monitoring exists */
++ c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+- if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
+- /* will be overridden if occupancy monitoring exists */
+- c->x86_cache_max_rmid = ebx;
++ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
++ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
++ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
++ u32 eax, ebx, ecx, edx;
+
+- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+- cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_1_EDX] = edx;
++ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
++ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+- if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
+- ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
+- (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
+- c->x86_cache_max_rmid = ecx;
+- c->x86_cache_occ_scale = ebx;
+- }
+- } else {
+- c->x86_cache_max_rmid = -1;
+- c->x86_cache_occ_scale = -1;
+- }
++ c->x86_cache_max_rmid = ecx;
++ c->x86_cache_occ_scale = ebx;
+ }
+ }
+
+diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
+index 2c0bd38a44ab..fa07a224e7b9 100644
+--- a/arch/x86/kernel/cpu/cpuid-deps.c
++++ b/arch/x86/kernel/cpu/cpuid-deps.c
+@@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
++ { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
++ { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
++ { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
+ {}
+ };
+
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index 94aa1c72ca98..adf9b71386ef 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -26,6 +26,10 @@ struct cpuid_bit {
+ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
++ { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
++ { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
++ { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
++ { X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 9a327d5b6d1f..d78a61408243 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
+- [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
+- [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
+ [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+ [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
+ [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+--
+2.7.4
+
diff --git a/queue/x86-entry-64-Use-JMP-instead-of-JMPQ.patch b/queue/x86-entry-64-Use-JMP-instead-of-JMPQ.patch
new file mode 100644
index 0000000..7862bff
--- /dev/null
+++ b/queue/x86-entry-64-Use-JMP-instead-of-JMPQ.patch
@@ -0,0 +1,35 @@
+From 64dbc122b20f75183d8822618c24f85144a5a94d Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 15 Jul 2019 11:51:39 -0500
+Subject: [PATCH] x86/entry/64: Use JMP instead of JMPQ
+
+commit 64dbc122b20f75183d8822618c24f85144a5a94d upstream.
+
+Somehow the swapgs mitigation entry code patch ended up with a JMPQ
+instruction instead of JMP, where only the short jump is needed. Some
+assembler versions apparently fail to optimize JMPQ into a two-byte JMP
+when possible, instead always using a 7-byte JMP with relocation. For
+some reason that makes the entry code explode with a #GP during boot.
+
+Change it back to "JMP" as originally intended.
+
+Fixes: 18ec54fdd6d1 ("x86/speculation: Prepare entry code for Spectre v1 swapgs mitigations")
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 57a0d96d6beb..b043c754d978 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -549,7 +549,7 @@ ENTRY(interrupt_entry)
+ UNWIND_HINT_FUNC
+
+ movq (%rdi), %rdi
+- jmpq 2f
++ jmp 2f
+ 1:
+ FENCE_SWAPGS_KERNEL_ENTRY
+ 2:
+--
+2.7.4
+
diff --git a/queue/x86-speculation-Enable-Spectre-v1-swapgs-mitigations.patch b/queue/x86-speculation-Enable-Spectre-v1-swapgs-mitigations.patch
new file mode 100644
index 0000000..8382bd3
--- /dev/null
+++ b/queue/x86-speculation-Enable-Spectre-v1-swapgs-mitigations.patch
@@ -0,0 +1,263 @@
+From a2059825986a1c8143fd6698774fa9d83733bb11 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 8 Jul 2019 11:52:26 -0500
+Subject: [PATCH] x86/speculation: Enable Spectre v1 swapgs mitigations
+
+commit a2059825986a1c8143fd6698774fa9d83733bb11 upstream.
+
+The previous commit added macro calls in the entry code which mitigate the
+Spectre v1 swapgs issue if the X86_FEATURE_FENCE_SWAPGS_* features are
+enabled. Enable those features where applicable.
+
+The mitigations may be disabled with "nospectre_v1" or "mitigations=off".
+
+There are different features which can affect the risk of attack:
+
+- When FSGSBASE is enabled, unprivileged users are able to place any
+ value in GS, using the wrgsbase instruction. This means they can
+ write a GS value which points to any value in kernel space, which can
+ be useful with the following gadget in an interrupt/exception/NMI
+ handler:
+
+ if (coming from user space)
+ swapgs
+ mov %gs:<percpu_offset>, %reg1
+ // dependent load or store based on the value of %reg
+ // for example: mov %(reg1), %reg2
+
+ If an interrupt is coming from user space, and the entry code
+ speculatively skips the swapgs (due to user branch mistraining), it
+ may speculatively execute the GS-based load and a subsequent dependent
+ load or store, exposing the kernel data to an L1 side channel leak.
+
+ Note that, on Intel, a similar attack exists in the above gadget when
+ coming from kernel space, if the swapgs gets speculatively executed to
+ switch back to the user GS. On AMD, this variant isn't possible
+ because swapgs is serializing with respect to future GS-based
+ accesses.
+
+ NOTE: The FSGSBASE patch set hasn't been merged yet, so the above case
+ doesn't exist quite yet.
+
+- When FSGSBASE is disabled, the issue is mitigated somewhat because
+ unprivileged users must use prctl(ARCH_SET_GS) to set GS, which
+ restricts GS values to user space addresses only. That means the
+ gadget would need an additional step, since the target kernel address
+ needs to be read from user space first. Something like:
+
+ if (coming from user space)
+ swapgs
+ mov %gs:<percpu_offset>, %reg1
+ mov (%reg1), %reg2
+ // dependent load or store based on the value of %reg2
+ // for example: mov %(reg2), %reg3
+
+ It's difficult to audit for this gadget in all the handlers, so while
+ there are no known instances of it, it's entirely possible that it
+ exists somewhere (or could be introduced in the future). Without
+ tooling to analyze all such code paths, consider it vulnerable.
+
+ Effects of SMAP on the !FSGSBASE case:
+
+ - If SMAP is enabled, and the CPU reports RDCL_NO (i.e., not
+ susceptible to Meltdown), the kernel is prevented from speculatively
+ reading user space memory, even L1 cached values. This effectively
+ disables the !FSGSBASE attack vector.
+
+ - If SMAP is enabled, but the CPU *is* susceptible to Meltdown, SMAP
+ still prevents the kernel from speculatively reading user space
+ memory. But it does *not* prevent the kernel from reading the
+ user value from L1, if it has already been cached. This is probably
+ only a small hurdle for an attacker to overcome.
+
+Thanks to Dave Hansen for contributing the speculative_smap() function.
+
+Thanks to Andrew Cooper for providing the inside scoop on whether swapgs
+is serializing on AMD.
+
+[ tglx: Fixed the USER fence decision and polished the comment as suggested
+ by Dave Hansen ]
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@intel.com>
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index e6e806285703..01d7ad250e98 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2587,7 +2587,7 @@
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86,PPC]
+ kpti=0 [ARM64]
+- nospectre_v1 [PPC]
++ nospectre_v1 [X86,PPC]
+ nobp=0 [S390]
+ nospectre_v2 [X86,PPC,S390,ARM64]
+ spectre_v2_user=off [X86]
+@@ -2936,9 +2936,9 @@
+ nosmt=force: Force disable SMT, cannot be undone
+ via the sysfs control file.
+
+- nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
+- check bypass). With this option data leaks are possible
+- in the system.
++ nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
++ (bounds check bypass). With this option data leaks are
++ possible in the system.
+
+ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+ the Spectre variant 2 (indirect branch prediction)
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 66ca906aa790..992f832c447b 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,7 @@
+
+ #include "cpu.h"
+
++static void __init spectre_v1_select_mitigation(void);
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
+ static void __init l1tf_select_mitigation(void);
+@@ -98,17 +99,11 @@ void __init check_bugs(void)
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
+- /* Select the proper spectre mitigation before patching alternatives */
++ /* Select the proper CPU mitigations before patching alternatives: */
++ spectre_v1_select_mitigation();
+ spectre_v2_select_mitigation();
+-
+- /*
+- * Select proper mitigation for any exposure to the Speculative Store
+- * Bypass vulnerability.
+- */
+ ssb_select_mitigation();
+-
+ l1tf_select_mitigation();
+-
+ mds_select_mitigation();
+
+ arch_smt_update();
+@@ -274,6 +269,108 @@ static int __init mds_cmdline(char *str)
+ early_param("mds", mds_cmdline);
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Spectre V1 : " fmt
++
++enum spectre_v1_mitigation {
++ SPECTRE_V1_MITIGATION_NONE,
++ SPECTRE_V1_MITIGATION_AUTO,
++};
++
++static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
++ SPECTRE_V1_MITIGATION_AUTO;
++
++static const char * const spectre_v1_strings[] = {
++ [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
++ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
++};
++
++static bool is_swapgs_serializing(void)
++{
++ /*
++ * Technically, swapgs isn't serializing on AMD (despite it previously
++ * being documented as such in the APM). But according to AMD, %gs is
++ * updated non-speculatively, and the issuing of %gs-relative memory
++ * operands will be blocked until the %gs update completes, which is
++ * good enough for our purposes.
++ */
++ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
++}
++
++/*
++ * Does SMAP provide full mitigation against speculative kernel access to
++ * userspace?
++ */
++static bool smap_works_speculatively(void)
++{
++ if (!boot_cpu_has(X86_FEATURE_SMAP))
++ return false;
++
++ /*
++ * On CPUs which are vulnerable to Meltdown, SMAP does not
++ * prevent speculative access to user data in the L1 cache.
++ * Consider SMAP to be non-functional as a mitigation on these
++ * CPUs.
++ */
++ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
++ return false;
++
++ return true;
++}
++
++static void __init spectre_v1_select_mitigation(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
++ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
++ return;
++ }
++
++ if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
++ /*
++ * With Spectre v1, a user can speculatively control either
++ * path of a conditional swapgs with a user-controlled GS
++ * value. The mitigation is to add lfences to both code paths.
++ *
++ * If FSGSBASE is enabled, the user can put a kernel address in
++ * GS, in which case SMAP provides no protection.
++ *
++ * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
++ * FSGSBASE enablement patches have been merged. ]
++ *
++ * If FSGSBASE is disabled, the user can only put a user space
++ * address in GS. That makes an attack harder, but still
++ * possible if there's no SMAP protection.
++ */
++ if (!smap_works_speculatively()) {
++ /*
++ * Mitigation can be provided from SWAPGS itself or
++ * PTI as the CR3 write in the Meltdown mitigation
++ * is serializing.
++ *
++ * If neither is there, mitigate with an LFENCE.
++ */
++ if (!is_swapgs_serializing() && !boot_cpu_has(X86_FEATURE_PTI))
++ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
++
++ /*
++ * Enable lfences in the kernel entry (non-swapgs)
++ * paths, to prevent user entry from speculatively
++ * skipping swapgs.
++ */
++ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
++ }
++ }
++
++ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
++}
++
++static int __init nospectre_v1_cmdline(char *str)
++{
++ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
++ return 0;
++}
++early_param("nospectre_v1", nospectre_v1_cmdline);
++
++#undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+@@ -1290,7 +1387,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ break;
+
+ case X86_BUG_SPECTRE_V1:
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+
+ case X86_BUG_SPECTRE_V2:
+ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+--
+2.7.4
+
diff --git a/queue/x86-speculation-Prepare-entry-code-for-Spectre-v1-sw.patch b/queue/x86-speculation-Prepare-entry-code-for-Spectre-v1-sw.patch
new file mode 100644
index 0000000..fc7b4b1
--- /dev/null
+++ b/queue/x86-speculation-Prepare-entry-code-for-Spectre-v1-sw.patch
@@ -0,0 +1,194 @@
+From 18ec54fdd6d18d92025af097cd042a75cf0ea24c Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 8 Jul 2019 11:52:25 -0500
+Subject: [PATCH] x86/speculation: Prepare entry code for Spectre v1 swapgs
+ mitigations
+
+commit 18ec54fdd6d18d92025af097cd042a75cf0ea24c upstream.
+
+Spectre v1 isn't only about array bounds checks. It can affect any
+conditional checks. The kernel entry code interrupt, exception, and NMI
+handlers all have conditional swapgs checks. Those may be problematic in
+the context of Spectre v1, as kernel code can speculatively run with a user
+GS.
+
+For example:
+
+ if (coming from user space)
+ swapgs
+ mov %gs:<percpu_offset>, %reg
+ mov (%reg), %reg1
+
+When coming from user space, the CPU can speculatively skip the swapgs, and
+then do a speculative percpu load using the user GS value. So the user can
+speculatively force a read of any kernel value. If a gadget exists which
+uses the percpu value as an address in another load/store, then the
+contents of the kernel value may become visible via an L1 side channel
+attack.
+
+A similar attack exists when coming from kernel space. The CPU can
+speculatively do the swapgs, causing the user GS to get used for the rest
+of the speculative window.
+
+The mitigation is similar to a traditional Spectre v1 mitigation, except:
+
+ a) index masking isn't possible; because the index (percpu offset)
+ isn't user-controlled; and
+
+ b) an lfence is needed in both the "from user" swapgs path and the
+ "from kernel" non-swapgs path (because of the two attacks described
+ above).
+
+The user entry swapgs paths already have SWITCH_TO_KERNEL_CR3, which has a
+CR3 write when PTI is enabled. Since CR3 writes are serializing, the
+lfences can be skipped in those cases.
+
+On the other hand, the kernel entry swapgs paths don't depend on PTI.
+
+To avoid unnecessary lfences for the user entry case, create two separate
+features for alternative patching:
+
+ X86_FEATURE_FENCE_SWAPGS_USER
+ X86_FEATURE_FENCE_SWAPGS_KERNEL
+
+Use these features in entry code to patch in lfences where needed.
+
+The features aren't enabled yet, so there's no functional change.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@intel.com>
+
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index 9f1f9e3b8230..7ce7ac9d9d3f 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -314,6 +314,23 @@ For 32-bit we have the following conventions - kernel is built with
+
+ #endif
+
++/*
++ * Mitigate Spectre v1 for conditional swapgs code paths.
++ *
++ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
++ * prevent a speculative swapgs when coming from kernel space.
++ *
++ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
++ * to prevent the swapgs from getting speculatively skipped when coming from
++ * user space.
++ */
++.macro FENCE_SWAPGS_USER_ENTRY
++ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
++.endm
++.macro FENCE_SWAPGS_KERNEL_ENTRY
++ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
++.endm
++
+ .macro STACKLEAK_ERASE_NOCLOBBER
+ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ PUSH_AND_CLEAR_REGS
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index a829dd3117d0..57a0d96d6beb 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -519,7 +519,7 @@ ENTRY(interrupt_entry)
+ testb $3, CS-ORIG_RAX+8(%rsp)
+ jz 1f
+ SWAPGS
+-
++ FENCE_SWAPGS_USER_ENTRY
+ /*
+ * Switch to the thread stack. The IRET frame and orig_ax are
+ * on the stack, as well as the return address. RDI..R12 are
+@@ -549,8 +549,10 @@ ENTRY(interrupt_entry)
+ UNWIND_HINT_FUNC
+
+ movq (%rdi), %rdi
++ jmpq 2f
+ 1:
+-
++ FENCE_SWAPGS_KERNEL_ENTRY
++2:
+ PUSH_AND_CLEAR_REGS save_ret=1
+ ENCODE_FRAME_POINTER 8
+
+@@ -1221,6 +1223,13 @@ ENTRY(paranoid_entry)
+ */
+ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+
++ /*
++ * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
++ * unconditional CR3 write, even in the PTI case. So do an lfence
++ * to prevent GS speculation, regardless of whether PTI is enabled.
++ */
++ FENCE_SWAPGS_KERNEL_ENTRY
++
+ ret
+ END(paranoid_entry)
+
+@@ -1271,6 +1280,7 @@ ENTRY(error_entry)
+ * from user mode due to an IRET fault.
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ /* We have user CR3. Change to kernel CR3. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+
+@@ -1292,6 +1302,8 @@ ENTRY(error_entry)
+ CALL_enter_from_user_mode
+ ret
+
++.Lerror_entry_done_lfence:
++ FENCE_SWAPGS_KERNEL_ENTRY
+ .Lerror_entry_done:
+ TRACE_IRQS_OFF
+ ret
+@@ -1310,7 +1322,7 @@ ENTRY(error_entry)
+ cmpq %rax, RIP+8(%rsp)
+ je .Lbstep_iret
+ cmpq $.Lgs_change, RIP+8(%rsp)
+- jne .Lerror_entry_done
++ jne .Lerror_entry_done_lfence
+
+ /*
+ * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
+@@ -1318,6 +1330,7 @@ ENTRY(error_entry)
+ * .Lgs_change's error handler with kernel gsbase.
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ jmp .Lerror_entry_done
+
+@@ -1332,6 +1345,7 @@ ENTRY(error_entry)
+ * gsbase and CR3. Switch to kernel gsbase and CR3:
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+
+ /*
+@@ -1423,6 +1437,7 @@ ENTRY(nmi)
+
+ swapgs
+ cld
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 998c2cc08363..4393278666d9 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -281,6 +281,8 @@
+ #define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
+ #define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
+ #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
++#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
++#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+--
+2.7.4
+
diff --git a/queue/x86-speculation-swapgs-Exclude-ATOMs-from-speculatio.patch b/queue/x86-speculation-swapgs-Exclude-ATOMs-from-speculatio.patch
new file mode 100644
index 0000000..1869a59
--- /dev/null
+++ b/queue/x86-speculation-swapgs-Exclude-ATOMs-from-speculatio.patch
@@ -0,0 +1,155 @@
+From f36cf386e3fec258a341d446915862eded3e13d8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 17 Jul 2019 21:18:59 +0200
+Subject: [PATCH] x86/speculation/swapgs: Exclude ATOMs from speculation
+ through SWAPGS
+
+commit f36cf386e3fec258a341d446915862eded3e13d8 upstream.
+
+Intel provided the following information:
+
+ On all current Atom processors, instructions that use a segment register
+ value (e.g. a load or store) will not speculatively execute before the
+ last writer of that segment retires. Thus they will not use a
+ speculatively written segment value.
+
+That means on ATOMs there is no speculation through SWAPGS, so the SWAPGS
+entry paths can be excluded from the extra LFENCE if PTI is disabled.
+
+Create a separate bug flag for the through SWAPGS speculation and mark all
+out-of-order ATOMs and AMD/HYGON CPUs as not affected. The in-order ATOMs
+are excluded from the whole mitigation mess anyway.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 4393278666d9..e880f2408e29 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -396,5 +396,6 @@
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+ #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
++#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 992f832c447b..6383f0db098c 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -284,18 +284,6 @@ static const char * const spectre_v1_strings[] = {
+ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+ };
+
+-static bool is_swapgs_serializing(void)
+-{
+- /*
+- * Technically, swapgs isn't serializing on AMD (despite it previously
+- * being documented as such in the APM). But according to AMD, %gs is
+- * updated non-speculatively, and the issuing of %gs-relative memory
+- * operands will be blocked until the %gs update completes, which is
+- * good enough for our purposes.
+- */
+- return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+-}
+-
+ /*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+@@ -346,9 +334,11 @@ static void __init spectre_v1_select_mitigation(void)
+ * PTI as the CR3 write in the Meltdown mitigation
+ * is serializing.
+ *
+- * If neither is there, mitigate with an LFENCE.
++ * If neither is there, mitigate with an LFENCE to
++ * stop speculation through swapgs.
+ */
+- if (!is_swapgs_serializing() && !boot_cpu_has(X86_FEATURE_PTI))
++ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
++ !boot_cpu_has(X86_FEATURE_PTI))
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+ /*
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 309b6b9b49d4..300dcf00d287 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -970,6 +970,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+ #define NO_L1TF BIT(3)
+ #define NO_MDS BIT(4)
+ #define MSBDS_ONLY BIT(5)
++#define NO_SWAPGS BIT(6)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -996,30 +997,38 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+
+- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
++
++ /*
++ * Technically, swapgs isn't serializing on AMD (despite it previously
++ * being documented as such in the APM). But according to AMD, %gs is
++ * updated non-speculatively, and the issuing of %gs-relative memory
++ * operands will be blocked until the %gs update completes, which is
++ * good enough for our purposes.
++ */
+
+ /* AMD Family 0xf - 0x12 */
+- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+ {}
+ };
+
+@@ -1056,6 +1065,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+ }
+
++ if (!cpu_matches(NO_SWAPGS))
++ setup_force_cpu_bug(X86_BUG_SWAPGS);
++
+ if (cpu_matches(NO_MELTDOWN))
+ return;
+
+--
+2.7.4
+