summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:03:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 18:03:54 -0800
commit6b00f7efb5303418c231994c91fb8239f5ada260 (patch)
tree1daba87ccda34e632ea39dedc5055391c7e94bdc
parentb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (diff)
parentd476d94f180af3f0fca77394651d4a98f4df1c54 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "arm64 updates for 3.20: - reimplementation of the virtual remapping of UEFI Runtime Services in a way that is stable across kexec - emulation of the "setend" instruction for 32-bit tasks (user endianness switching trapped in the kernel, SCTLR_EL1.E0E bit set accordingly) - compat_sys_call_table implemented in C (from asm) and made it a constant array together with sys_call_table - export CPU cache information via /sys (like other architectures) - DMA API implementation clean-up in preparation for IOMMU support - macros clean-up for KVM - dropped some unnecessary cache+tlb maintenance - CONFIG_ARM64_CPU_SUSPEND clean-up - defconfig update (CPU_IDLE) The EFI changes going via the arm64 tree have been acked by Matt Fleming. There is also a patch adding sys_*stat64 prototypes to include/linux/syscalls.h, acked by Andrew Morton" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (47 commits) arm64: compat: Remove incorrect comment in compat_siginfo arm64: Fix section mismatch on alloc_init_p[mu]d() arm64: Avoid breakage caused by .altmacro in fpsimd save/restore macros arm64: mm: use *_sect to check for section maps arm64: drop unnecessary cache+tlb maintenance arm64:mm: free the useless initial page table arm64: Enable CPU_IDLE in defconfig arm64: kernel: remove ARM64_CPU_SUSPEND config option arm64: make sys_call_table const arm64: Remove asm/syscalls.h arm64: Implement the compat_sys_call_table in C syscalls: Declare sys_*stat64 prototypes if __ARCH_WANT_(COMPAT_)STAT64 compat: Declare compat_sys_sigpending and compat_sys_sigprocmask prototypes arm64: uapi: expose our struct ucontext to the uapi headers smp, ARM64: Kill SMP single function call interrupt arm64: Emulate SETEND for AArch32 tasks arm64: Consolidate hotplug notifier for instruction emulation arm64: Track system support for mixed endian EL0 arm64: implement generic IOMMU configuration arm64: Combine coherent and non-coherent swiotlb dma_ops ...
-rw-r--r--Documentation/arm64/legacy_instructions.txt12
-rw-r--r--arch/arm64/Kconfig18
-rw-r--r--arch/arm64/Kconfig.debug23
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cachetype.h29
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/cpu_ops.h8
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/cpuidle.h6
-rw-r--r--arch/arm64/include/asm/cputype.h17
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/efi.h30
-rw-r--r--arch/arm64/include/asm/esr.h117
-rw-r--r--arch/arm64/include/asm/fixmap.h1
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h43
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/io.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h73
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h28
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/mmu.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h8
-rw-r--r--arch/arm64/include/asm/ptrace.h7
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/syscalls.h30
-rw-r--r--arch/arm64/include/asm/unistd.h3
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/ucontext.h (renamed from arch/arm64/include/asm/ucontext.h)8
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c205
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cacheinfo.c128
-rw-r--r--arch/arm64/kernel/cpuidle.c20
-rw-r--r--arch/arm64/kernel/cpuinfo.c34
-rw-r--r--arch/arm64/kernel/efi.c356
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/entry32.S (renamed from arch/arm64/kernel/sys32.S)34
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/insn.c47
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/setup.c22
-rw-r--r--arch/arm64/kernel/signal32.c7
-rw-r--r--arch/arm64/kernel/smp.c10
-rw-r--r--arch/arm64/kernel/suspend.c21
-rw-r--r--arch/arm64/kernel/sys.c5
-rw-r--r--arch/arm64/kernel/sys32.c51
-rw-r--r--arch/arm64/kernel/traps.c50
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/kvm/emulate.c5
-rw-r--r--arch/arm64/kvm/handle_exit.c39
-rw-r--r--arch/arm64/kvm/hyp.S17
-rw-r--r--arch/arm64/kvm/inject_fault.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c23
-rw-r--r--arch/arm64/mm/dma-mapping.c116
-rw-r--r--arch/arm64/mm/dump.c30
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/ioremap.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c342
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--drivers/cpuidle/Kconfig.arm641
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c1
-rw-r--r--drivers/firmware/efi/efi.c56
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c59
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c25
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/firmware/efi/libstub/fdt.c62
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/syscalls.h8
73 files changed, 1533 insertions, 919 deletions
diff --git a/Documentation/arm64/legacy_instructions.txt b/Documentation/arm64/legacy_instructions.txt
index a3b3da2ec6ed..01bf3d9fac85 100644
--- a/Documentation/arm64/legacy_instructions.txt
+++ b/Documentation/arm64/legacy_instructions.txt
@@ -32,6 +32,9 @@ The default mode depends on the status of the instruction in the
architecture. Deprecated instructions should default to emulation
while obsolete instructions must be undefined by default.
+Note: Instruction emulation may not be possible in all cases. See
+individual instruction notes for further information.
+
Supported legacy instructions
-----------------------------
* SWP{B}
@@ -43,3 +46,12 @@ Default: Undef (0)
Node: /proc/sys/abi/cp15_barrier
Status: Deprecated
Default: Emulate (1)
+
+* SETEND
+Node: /proc/sys/abi/setend
+Status: Deprecated
+Default: Emulate (1)*
+Note: All the cpus on the system must have mixed endian support at EL0
+for this feature to be enabled. If a new CPU - which doesn't support mixed
+endian - is hotplugged in after this feature has been enabled, there could
+be unexpected results in the application.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1f9a20a3677..d3f7e4941231 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -540,6 +540,21 @@ config CP15_BARRIER_EMULATION
If unsure, say Y
+config SETEND_EMULATION
+ bool "Emulate SETEND instruction"
+ help
+ The SETEND instruction alters the data-endianness of the
+ AArch32 EL0, and is deprecated in ARMv8.
+
+ Say Y here to enable software emulation of the instruction
+ for AArch32 userspace code.
+
+ Note: All the cpus on the system must have mixed endian support at EL0
+ for this feature to be enabled. If a new CPU - which doesn't support mixed
+ endian - is hotplugged in after this feature has been enabled, there could
+ be unexpected results in the applications.
+
+ If unsure, say Y
endif
endmenu
@@ -627,9 +642,6 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
def_bool y
-config ARM64_CPU_SUSPEND
- def_bool PM_SLEEP
-
endmenu
menu "CPU Power Management"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 5fdd6dce8061..4a8741073c90 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable.
+
+ If in doubt, say Y
+
+config DEBUG_ALIGN_RODATA
+ depends on DEBUG_RODATA && !ARM64_64K_PAGES
+ bool "Align linker sections up to SECTION_SIZE"
+ help
+ If this option is enabled, sections that may potentially be marked as
+ read only or non-executable will be aligned up to the section size of
+ the kernel. This prevents sections from being split into pages and
+ avoids a potential TLB penalty. The downside is an increase in
+ alignment and potentially wasted space. Turn on this option if
+ performance is more important than memory pressure.
+
+ If in doubt, say N
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 066688863920..69ceedc982a5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,8 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
@@ -50,7 +48,6 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
-libs-y += $(LIBGCC)
libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
# Default target when executing plain make
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5376d908eabe..66b6cacc3251 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -45,6 +45,8 @@ CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM64_CPUIDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 7ae31a2cc6c0..67d309cc3b6b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 4c631a0a3609..da2fc9e3cedd 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -39,24 +39,41 @@
extern unsigned long __icache_flags;
+/*
+ * NumSets, bits[27:13] - (Number of sets in cache) - 1
+ * Associativity, bits[12:3] - (Associativity of cache) - 1
+ * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
+ */
+#define CCSIDR_EL1_WRITE_THROUGH BIT(31)
+#define CCSIDR_EL1_WRITE_BACK BIT(30)
+#define CCSIDR_EL1_READ_ALLOCATE BIT(29)
+#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28)
#define CCSIDR_EL1_LINESIZE_MASK 0x7
#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
-
+#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3
+#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff
+#define CCSIDR_EL1_ASSOCIATIVITY(x) \
+ (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK)
#define CCSIDR_EL1_NUMSETS_SHIFT 13
-#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
+#define CCSIDR_EL1_NUMSETS_MASK 0x7fff
#define CCSIDR_EL1_NUMSETS(x) \
- (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
+ (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK)
+
+#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x))
+#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1)
+#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1)
-extern u64 __attribute_const__ icache_get_ccsidr(void);
+extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr);
+/* Helpers for Level 1 Instruction cache csselr = 1L */
static inline int icache_get_linesize(void)
{
- return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
+ return CACHE_LINESIZE(cache_get_ccsidr(1L));
}
static inline int icache_get_numsets(void)
{
- return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
+ return CACHE_NUMSETS(cache_get_ccsidr(1L));
}
/*
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 3fb053fa6e98..7fbed6919b54 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -161,7 +161,6 @@ typedef struct compat_siginfo {
int si_code;
union {
- /* The padding is the same size as AArch64. */
int _pad[128/sizeof(int) - 3];
/* kill() */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 6f8e2ef9094a..da301ee7395c 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -28,8 +28,6 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
- * devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -42,6 +40,8 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled.
@@ -49,7 +49,6 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -58,7 +57,8 @@ struct cpu_operations {
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_IDLE
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_suspend)(unsigned long);
#endif
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 07547ccc1f2b..b6c16d5f622f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -52,6 +52,8 @@ static inline void cpus_set_cap(unsigned int num)
}
void check_local_cpu_errata(void);
+bool cpu_supports_mixed_endian_el0(void);
+bool system_supports_mixed_endian_el0(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index b52a9932e2b1..0710654631e7 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -3,11 +3,17 @@
#ifdef CONFIG_CPU_IDLE
extern int cpu_init_idle(unsigned int cpu);
+extern int cpu_suspend(unsigned long arg);
#else
static inline int cpu_init_idle(unsigned int cpu)
{
return -EOPNOTSUPP;
}
+
+static inline int cpu_suspend(unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 8adb986a3086..a84ec605bed8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -72,6 +72,18 @@
#define APM_CPU_PART_POTENZA 0x000
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGEND_SHIFT 8
+#define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
+#define ID_AA64MMFR0_BIGEND(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
+
+#define SCTLR_EL1_CP15BEN (0x1 << 5)
+#define SCTLR_EL1_SED (0x1 << 8)
+
#ifndef __ASSEMBLY__
/*
@@ -104,6 +116,11 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CTR_EL0);
}
+static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
+{
+ return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) ||
+ (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1);
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 9ce3e680ae1c..6932bb57dba0 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-extern struct dma_map_ops coherent_swiotlb_dma_ops;
-extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
@@ -47,23 +45,18 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- dev->archdata.dma_ops = ops;
-}
-
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
dev->archdata.dma_coherent = coherent;
- if (coherent)
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
+ if (!dev)
+ return false;
return dev->archdata.dma_coherent;
}
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a34fd3b12e2b..ef572206f1c3 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,29 +6,33 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efi_idmap_init(void);
#else
#define efi_init()
-#define efi_idmap_init()
#endif
#define efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
efi_status_t __s; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
})
@@ -44,4 +48,22 @@ extern void efi_idmap_init(void);
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define EFI_ALLOC_ALIGN SZ_64K
+
+/*
+ * On ARM systems, virtually remapped UEFI runtime services are set up in two
+ * distinct stages:
+ * - The stub retrieves the final version of the memory map from UEFI, populates
+ * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
+ * service to communicate the new mapping to the firmware (Note that the new
+ * mapping is not live at this time)
+ * - During an early initcall(), the EFI system table is permanently remapped
+ * and the virtual remapping of the UEFI Runtime Services regions is loaded
+ * into a private set of page tables. If this all succeeds, the Runtime
+ * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
+ */
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 72674f4c3871..62167090937d 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,40 +18,89 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
-#define ESR_EL1_WRITE (1 << 6)
-#define ESR_EL1_CM (1 << 8)
-#define ESR_EL1_IL (1 << 25)
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define E