summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2020-01-23 10:41:20 -0800
committerDave Hansen <dave.hansen@intel.com>2020-01-23 10:41:20 -0800
commit45fc24e89b7cc2e227b2f03d99dda0a2204bf383 (patch)
tree913d1b90a3a490d3290e93d38e28ed8ddae2896a /arch/x86/include/asm/mmu_context.h
parent42222eae17f7c930833dfda7896ef280879de94a (diff)
x86/mpx: remove MPX from arch/x86
From: Dave Hansen <dave.hansen@linux.intel.com> MPX is being removed from the kernel due to a lack of support in the toolchain going forward (gcc). This removes all the remaining (dead at this point) MPX handling code remaining in the tree. The only remaining code is the XSAVE support for MPX state which is currently needd for KVM to handle VMs which might use MPX. Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: x86@kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/mmu_context.h')
-rw-r--r--arch/x86/include/asm/mmu_context.h20
1 files changed, 0 insertions, 20 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index b7135e92e571..a3e1c72a3ab9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -12,7 +12,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
-#include <asm/mpx.h>
#include <asm/debugreg.h>
extern atomic64_t last_mm_ctx_id;
@@ -275,25 +274,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- /*
- * mpx_notify_unmap() goes and reads a rarely-hot
- * cacheline in the mm_struct. That can be expensive
- * enough to be seen in profiles.
- *
- * The mpx_notify_unmap() call and its contents have been
- * observed to affect munmap() performance on hardware
- * where MPX is not present.
- *
- * The unlikely() optimizes for the fast case: no MPX
- * in the CPU, or no MPX use in the process. Even if
- * we get this wrong (in the unlikely event that MPX
- * is widely enabled on some system) the overhead of
- * MPX itself (reading bounds tables) is expected to
- * overwhelm the overhead of getting this unlikely()
- * consistently wrong.
- */
- if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
- mpx_notify_unmap(mm, start, end);
}
/*