summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile7
-rw-r--r--arch/arm64/kernel/alternative.c85
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c553
-rw-r--r--arch/arm64/kernel/cpu_errata.c111
-rw-r--r--arch/arm64/kernel/cpuinfo.c23
-rw-r--r--arch/arm64/kernel/efi-entry.S3
-rw-r--r--arch/arm64/kernel/efi.c37
-rw-r--r--arch/arm64/kernel/entry-ftrace.S21
-rw-r--r--arch/arm64/kernel/entry.S138
-rw-r--r--arch/arm64/kernel/head.S434
-rw-r--r--arch/arm64/kernel/insn.c26
-rw-r--r--arch/arm64/kernel/io.c66
-rw-r--r--arch/arm64/kernel/irq.c2
-rw-r--r--arch/arm64/kernel/jump_label.c23
-rw-r--r--arch/arm64/kernel/module.c18
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c108
-rw-r--r--arch/arm64/kernel/signal32.c6
-rw-r--r--arch/arm64/kernel/sleep.S36
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/sys_compat.c49
-rw-r--r--arch/arm64/kernel/topology.c7
-rw-r--r--arch/arm64/kernel/trace-events-emulation.h35
-rw-r--r--arch/arm64/kernel/traps.c66
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S33
27 files changed, 1539 insertions, 404 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5bd029b43644..eaa77ed7766a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -5,6 +5,7 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_armv8_deprecated.o := -I$(src)
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
@@ -15,10 +16,11 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
- cpuinfo.o
+ cpuinfo.o cpu_errata.o alternative.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o
+ sys_compat.o \
+ ../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
@@ -31,6 +33,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
arm64-obj-$(CONFIG_PCI) += pci.o
+arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
new file mode 100644
index 000000000000..ad7821d64a1d
--- /dev/null
+++ b/arch/arm64/kernel/alternative.c
@@ -0,0 +1,85 @@
+/*
+ * alternative runtime patching
+ * inspired by the x86 version
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+#include <linux/stop_machine.h>
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+};
+
+static int __apply_alternatives(void *alt_region)
+{
+ struct alt_instr *alt;
+ struct alt_region *region = alt_region;
+ u8 *origptr, *replptr;
+
+ for (alt = region->begin; alt < region->end; alt++) {
+ if (!cpus_have_cap(alt->cpufeature))
+ continue;
+
+ BUG_ON(alt->alt_len > alt->orig_len);
+
+ pr_info_once("patching kernel code\n");
+
+ origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
+ replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
+ memcpy(origptr, replptr, alt->alt_len);
+ flush_icache_range((uintptr_t)origptr,
+ (uintptr_t)(origptr + alt->alt_len));
+ }
+
+ return 0;
+}
+
+void apply_alternatives_all(void)
+{
+ struct alt_region region = {
+ .begin = __alt_instructions,
+ .end = __alt_instructions_end,
+ };
+
+ /* better not try code patching on a live SMP system */
+ stop_machine(__apply_alternatives, &region, NULL);
+}
+
+void apply_alternatives(void *start, size_t length)
+{
+ struct alt_region region = {
+ .begin = start,
+ .end = start + length,
+ };
+
+ __apply_alternatives(&region);
+}
+
+void free_alternatives_memory(void)
+{
+ free_reserved_area(__alt_instructions, __alt_instructions_end,
+ 0, "alternatives");
+}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
new file mode 100644
index 000000000000..c363671d7509
--- /dev/null
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (C) 2014 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+
+#include <asm/insn.h>
+#include <asm/opcodes.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace-events-emulation.h"
+
+/*
+ * The runtime support for deprecated instruction support can be in one of
+ * following three states -
+ *
+ * 0 = undef
+ * 1 = emulate (software emulation)
+ * 2 = hw (supported in hardware)
+ */
+enum insn_emulation_mode {
+ INSN_UNDEF,
+ INSN_EMULATE,
+ INSN_HW,
+};
+
+enum legacy_insn_status {
+ INSN_DEPRECATED,
+ INSN_OBSOLETE,
+};
+
+struct insn_emulation_ops {
+ const char *name;
+ enum legacy_insn_status status;
+ struct undef_hook *hooks;
+ int (*set_hw_mode)(bool enable);
+};
+
+struct insn_emulation {
+ struct list_head node;
+ struct insn_emulation_ops *ops;
+ int current_mode;
+ int min;
+ int max;
+};
+
+static LIST_HEAD(insn_emulation);
+static int nr_insn_emulated;
+static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+
+static void register_emulation_hooks(struct insn_emulation_ops *ops)
+{
+ struct undef_hook *hook;
+
+ BUG_ON(!ops->hooks);
+
+ for (hook = ops->hooks; hook->instr_mask; hook++)
+ register_undef_hook(hook);
+
+ pr_notice("Registered %s emulation handler\n", ops->name);
+}
+
+static void remove_emulation_hooks(struct insn_emulation_ops *ops)
+{
+ struct undef_hook *hook;
+
+ BUG_ON(!ops->hooks);
+
+ for (hook = ops->hooks; hook->instr_mask; hook++)
+ unregister_undef_hook(hook);
+
+ pr_notice("Removed %s emulation handler\n", ops->name);
+}
+
+static int update_insn_emulation_mode(struct insn_emulation *insn,
+ enum insn_emulation_mode prev)
+{
+ int ret = 0;
+
+ switch (prev) {
+ case INSN_UNDEF: /* Nothing to be done */
+ break;
+ case INSN_EMULATE:
+ remove_emulation_hooks(insn->ops);
+ break;
+ case INSN_HW:
+ if (insn->ops->set_hw_mode) {
+ insn->ops->set_hw_mode(false);
+ pr_notice("Disabled %s support\n", insn->ops->name);
+ }
+ break;
+ }
+
+ switch (insn->current_mode) {
+ case INSN_UNDEF:
+ break;
+ case INSN_EMULATE:
+ register_emulation_hooks(insn->ops);
+ break;
+ case INSN_HW:
+ if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true))
+ pr_notice("Enabled %s support\n", insn->ops->name);
+ else
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void register_insn_emulation(struct insn_emulation_ops *ops)
+{
+ unsigned long flags;
+ struct insn_emulation *insn;
+
+ insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+ insn->ops = ops;
+ insn->min = INSN_UNDEF;
+
+ switch (ops->status) {
+ case INSN_DEPRECATED:
+ insn->current_mode = INSN_EMULATE;
+ insn->max = INSN_HW;
+ break;
+ case INSN_OBSOLETE:
+ insn->current_mode = INSN_UNDEF;
+ insn->max = INSN_EMULATE;
+ break;
+ }
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_add(&insn->node, &insn_emulation);
+ nr_insn_emulated++;
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+
+ /* Register any handlers if required */
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+}
+
+static int emulation_proc_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = 0;
+ struct insn_emulation *insn = (struct insn_emulation *) table->data;
+ enum insn_emulation_mode prev_mode = insn->current_mode;
+
+ table->data = &insn->current_mode;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write || prev_mode == insn->current_mode)
+ goto ret;
+
+ ret = update_insn_emulation_mode(insn, prev_mode);
+ if (ret) {
+ /* Mode change failed, revert to previous mode. */
+ insn->current_mode = prev_mode;
+ update_insn_emulation_mode(insn, INSN_UNDEF);
+ }
+ret:
+ table->data = insn;
+ return ret;
+}
+
+static struct ctl_table ctl_abi[] = {
+ {
+ .procname = "abi",
+ .mode = 0555,
+ },
+ { }
+};
+
+static void register_insn_emulation_sysctl(struct ctl_table *table)
+{
+ unsigned long flags;
+ int i = 0;
+ struct insn_emulation *insn;
+ struct ctl_table *insns_sysctl, *sysctl;
+
+ insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
+ GFP_KERNEL);
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_for_each_entry(insn, &insn_emulation, node) {
+ sysctl = &insns_sysctl[i];
+
+ sysctl->mode = 0644;
+ sysctl->maxlen = sizeof(int);
+
+ sysctl->procname = insn->ops->name;
+ sysctl->data = insn;
+ sysctl->extra1 = &insn->min;
+ sysctl->extra2 = &insn->max;
+ sysctl->proc_handler = emulation_proc_handler;
+ i++;
+ }
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+
+ table->child = insns_sysctl;
+ register_sysctl_table(table);
+}
+
+/*
+ * Implement emulation of the SWP/SWPB instructions using load-exclusive and
+ * store-exclusive.
+ *
+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ * Where: Rt = destination
+ * Rt2 = source
+ * Rn = address
+ */
+
+/*
+ * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
+ */
+#define __user_swpX_asm(data, addr, res, temp, B) \
+ __asm__ __volatile__( \
+ " mov %w2, %w1\n" \
+ "0: ldxr"B" %w1, [%3]\n" \
+ "1: stxr"B" %w0, %w2, [%3]\n" \
+ " cbz %w0, 2f\n" \
+ " mov %w0, %w4\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w5\n" \
+ " b 2b\n" \
+ " .popsection" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .quad 0b, 3b\n" \
+ " .quad 1b, 3b\n" \
+ " .popsection" \
+ : "=&r" (res), "+r" (data), "=&r" (temp) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "memory")
+
+#define __user_swp_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "")
+#define __user_swpb_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "b")
+
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+static void set_segfault(struct pt_regs *regs, unsigned long addr)
+{
+ siginfo_t info;
+
+ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
+ up_read(&current->mm->mmap_sem);
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_addr = (void *) instruction_pointer(regs);
+
+ pr_debug("SWP{B} emulation: access caused memory abort!\n");
+ arm64_notify_die("Illegal memory access", regs, &info, 0);
+}
+
+static int emulate_swpX(unsigned int address, unsigned int *data,
+ unsigned int type)
+{
+ unsigned int res = 0;
+
+ if ((type != TYPE_SWPB) && (address & 0x3)) {
+ /* SWP to unaligned address not permitted */
+ pr_debug("SWP instruction on unaligned pointer!\n");
+ return -EFAULT;
+ }
+
+ while (1) {
+ unsigned long temp;
+
+ if (type == TYPE_SWPB)
+ __user_swpb_asm(*data, address, res, temp);
+ else
+ __user_swp_asm(*data, address, res, temp);
+
+ if (likely(res != -EAGAIN) || signal_pending(current))
+ break;
+
+ cond_resched();
+ }
+
+ return res;
+}
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, u32 instr)
+{
+ u32 destreg, data, type, address = 0;
+ int rn, rt2, res = 0;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ type = instr & TYPE_SWPB;
+
+ switch (arm_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a SWP, undef */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
+ rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
+
+ address = (u32)regs->user_regs.regs[rn];
+ data = (u32)regs->user_regs.regs[rt2];
+ destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
+
+ pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
+ rn, address, destreg,
+ aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
+
+ /* Check access in reasonable access range for both SWP and SWPB */
+ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
+ address);
+ goto fault;
+ }
+
+ res = emulate_swpX(address, &data, type);
+ if (res == -EFAULT)
+ goto fault;
+ else if (res == 0)
+ regs->user_regs.regs[destreg] = data;
+
+ret:
+ if (type == TYPE_SWPB)
+ trace_instruction_emulation("swpb", regs->pc);
+ else
+ trace_instruction_emulation("swp", regs->pc);
+
+ pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ regs->pc += 4;
+ return 0;
+
+fault:
+ set_segfault(regs, address);
+
+ return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb.
+ */
+static struct undef_hook swp_hooks[] = {
+ {
+ .instr_mask = 0x0fb00ff0,
+ .instr_val = 0x01000090,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = swp_handler
+ },
+ { }
+};
+
+static struct insn_emulation_ops swp_ops = {
+ .name = "swp",
+ .status = INSN_OBSOLETE,
+ .hooks = swp_hooks,
+ .set_hw_mode = NULL,
+};
+
+static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
+{
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ switch (arm_check_condition(instr, regs->pstate)) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ goto ret;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a barrier instruction */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
+ switch (aarch32_insn_mcr_extract_crm(instr)) {
+ case 10:
+ /*
+ * dmb - mcr p15, 0, Rt, c7, c10, 5
+ * dsb - mcr p15, 0, Rt, c7, c10, 4
+ */
+ if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
+ dmb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
+ } else {
+ dsb(sy);
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
+ }
+ break;
+ case 5:
+ /*
+ * isb - mcr p15, 0, Rt, c7, c5, 4
+ *
+ * Taking an exception or returning from one acts as an
+ * instruction barrier. So no explicit barrier needed here.
+ */
+ trace_instruction_emulation(
+ "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
+ break;
+ }
+
+ret:
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ regs->pc += 4;
+ return 0;
+}
+
+#define SCTLR_EL1_CP15BEN (1 << 5)
+
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+ u32 val;
+
+ asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+ val &= ~clear;
+ val |= set;
+ asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
+
+static void enable_cp15_ben(void *info)
+{
+ config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+}
+
+static void disable_cp15_ben(void *info)
+{
+ config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+}
+
+static int cpu_hotplug_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ enable_cp15_ben(NULL);
+ return NOTIFY_DONE;
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ disable_cp15_ben(NULL);
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_hotplug_notifier = {
+ .notifier_call = cpu_hotplug_notify,
+};
+
+static int cp15_barrier_set_hw_mode(bool enable)
+{
+ if (enable) {
+ register_cpu_notifier(&cpu_hotplug_notifier);
+ on_each_cpu(enable_cp15_ben, NULL, true);
+ } else {
+ unregister_cpu_notifier(&cpu_hotplug_notifier);
+ on_each_cpu(disable_cp15_ben, NULL, true);
+ }
+
+ return true;
+}
+
+static struct undef_hook cp15_barrier_hooks[] = {
+ {
+ .instr_mask = 0x0fff0fdf,
+ .instr_val = 0x0e070f9a,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = cp15barrier_handler,
+ },
+ {
+ .instr_mask = 0x0fff0fff,
+ .instr_val = 0x0e070f95,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = cp15barrier_handler,
+ },
+ { }
+};
+
+static struct insn_emulation_ops cp15_barrier_ops = {
+ .name = "cp15_barrier",
+ .status = INSN_DEPRECATED,
+ .hooks = cp15_barrier_hooks,
+ .set_hw_mode = cp15_barrier_set_hw_mode,
+};
+
+/*
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init armv8_deprecated_init(void)
+{
+ if (IS_ENABLED(CONFIG_SWP_EMULATION))
+ register_insn_emulation(&swp_ops);
+
+ if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
+ register_insn_emulation(&cp15_barrier_ops);
+
+ register_insn_emulation_sysctl(ctl_abi);
+
+ return 0;
+}
+
+late_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
new file mode 100644
index 000000000000..fa62637e63a8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -0,0 +1,111 @@
+/*
+ * Contains CPU specific errata definitions
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/types.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpufeature.h>
+
+#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+
+/*
+ * Add a struct or another datatype to the union below if you need
+ * different means to detect an affected CPU.
+ */
+struct arm64_cpu_capabilities {
+ const char *desc;
+ u16 capability;
+ bool (*is_affected)(struct arm64_cpu_capabilities *);
+ union {
+ struct {
+ u32 midr_model;
+ u32 midr_range_min, midr_range_max;
+ };
+ };
+};
+
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+static bool __maybe_unused
+is_affected_midr_range(struct arm64_cpu_capabilities *entry)
+{
+ u32 midr = read_cpuid_id();
+
+ if ((midr & CPU_MODEL_MASK) != entry->midr_model)
+ return false;
+
+ midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+
+ return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+}
+
+#define MIDR_RANGE(model, min, max) \
+ .is_affected = is_affected_midr_range, \
+ .midr_model = model, \
+ .midr_range_min = min, \
+ .midr_range_max = max
+
+struct arm64_cpu_capabilities arm64_errata[] = {
+#if defined(CONFIG_ARM64_ERRATUM_826319) || \
+ defined(CONFIG_ARM64_ERRATUM_827319) || \
+ defined(CONFIG_ARM64_ERRATUM_824069)
+ {
+ /* Cortex-A53 r0p[012] */
+ .desc = "ARM errata 826319, 827319, 824069",
+ .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_819472
+ {
+ /* Cortex-A53 r0p[01] */
+ .desc = "ARM errata 819472",
+ .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_832075
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
+ },
+#endif
+ {
+ }
+};
+
+void check_local_cpu_errata(void)
+{
+ struct arm64_cpu_capabilities *cpus = arm64_errata;
+ int i;
+
+ for (i = 0; cpus[i].desc; i++) {
+ if (!cpus[i].is_affected(&cpus[i]))
+ continue;
+
+ if (!cpus_have_cap(cpus[i].capability))
+ pr_info("enabling workaround for %s\n", cpus[i].desc);
+ cpus_set_cap(cpus[i].capability);
+ }
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 504fdaa8367e..57b641747534 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -18,6 +18,7 @@
#include <asm/cachetype.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/cpufeature.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -111,6 +112,15 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(cntfrq, boot, cur, cpu);
/*
+ * The kernel uses self-hosted debug features and expects CPUs to
+ * support identical debug features. We presently need CTX_CMPs, WRPs,
+ * and BRPs to be identical.
+ * ID_AA64DFR1 is currently RES0.
+ */
+ diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
+ diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
+
+ /*
* Even in big.LITTLE, processors should be identical instruction-set
* wise.
*/
@@ -143,7 +153,12 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_isar3, boot, cur, cpu);
diff |= CHECK(id_isar4, boot, cur, cpu);
diff |= CHECK(id_isar5, boot, cur, cpu);
- diff |= CHECK(id_mmfr0, boot, cur, cpu);
+ /*
+ * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+ * ACTLR formats could differ across CPUs and therefore would have to
+ * be trapped for virtualization anyway.
+ */
+ diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
diff |= CHECK(id_mmfr1, boot, cur, cpu);
diff |= CHECK(id_mmfr2, boot, cur, cpu);
diff |= CHECK(id_mmfr3, boot, cur, cpu);
@@ -155,7 +170,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* pretend to support them.
*/
WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
- "Unsupported CPU feature variation.");
+ "Unsupported CPU feature variation.\n");
}
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
@@ -165,6 +180,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
+ info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
@@ -186,6 +203,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
cpuinfo_detect_icache_policy(info);
+
+ check_local_cpu_errata();
}
void cpuinfo_store_cpu(void)
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index d18a44940968..8ce9b0577442 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,8 @@ ENTRY(efi_stub_entry)
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
- mov x21, x0
+ ldr x21, =stext_offset
+ add x21, x0, x21
/*
* Calculate size of the kernel Image (same for original and copy).
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 95c49ebc660d..6fac253bc783 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -112,8 +113,6 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff, vendor);
retval = efi_config_init(NULL);
- if (retval == 0)
- set_bit(EFI_CONFIG_TABLES, &efi.flags);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
@@ -125,17 +124,17 @@ out:
*/
static __init int is_reserve_region(efi_memory_desc_t *md)
{
- if (!is_normal_ram(md))
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
return 0;
-
- if (md->attribute & EFI_MEMORY_RUNTIME)
- return 1;
-
- if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
- md->type == EFI_RESERVED_TYPE)
- return 1;
-
- return 0;
+ default:
+ break;
+ }
+ return is_normal_ram(md);
}
static __init void reserve_regions(void)
@@ -471,3 +470,17 @@ err_unmap:
return -1;
}
early_initcall(arm64_enter_virtual_mode);
+
+static int __init arm64_dmi_init(void)
+{
+ /*
+ * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
+ * be called early because dmi_id_init(), which is an arch_initcall
+ * itself, depends on dmi_scan_machine() having been called already.
+ */
+ dmi_scan_machine();
+ if (dmi_available)
+ dmi_set_dump_stack_arch_desc();