summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/asm-offsets.c23
-rw-r--r--arch/arm/kernel/devtree.c6
-rw-r--r--arch/arm/kernel/dma.c14
-rw-r--r--arch/arm/kernel/entry-armv.S14
-rw-r--r--arch/arm/kernel/entry-common.S38
-rw-r--r--arch/arm/kernel/entry-ftrace.S75
-rw-r--r--arch/arm/kernel/ftrace.c51
-rw-r--r--arch/arm/kernel/head-nommu.S301
-rw-r--r--arch/arm/kernel/hw_breakpoint.c78
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/machine_kexec.c36
-rw-r--r--arch/arm/kernel/paravirt.c4
-rw-r--r--arch/arm/kernel/perf_event_v6.c18
-rw-r--r--arch/arm/kernel/perf_event_v7.c18
-rw-r--r--arch/arm/kernel/perf_event_xscale.c24
-rw-r--r--arch/arm/kernel/process.c12
-rw-r--r--arch/arm/kernel/ptrace.c10
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/swp_emulate.c30
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c4
-rw-r--r--arch/arm/kernel/time.c15
-rw-r--r--arch/arm/kernel/topology.c6
-rw-r--r--arch/arm/kernel/traps.c63
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S171
-rw-r--r--arch/arm/kernel/vmlinux.lds.S179
-rw-r--r--arch/arm/kernel/vmlinux.lds.h137
30 files changed, 619 insertions, 749 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 783fbb4de5f9..8fa2dc21d332 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index f369ece99958..3968d6c22455 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -38,30 +38,19 @@
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
/*
- * GCC 3.0, 3.1: general bad code generation.
- * GCC 3.2.0: incorrect function argument offset calculation.
- * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
- * (http://gcc.gnu.org/PR8896) and incorrect structure
- * initialisation in fs/jffs2/erase.c
* GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
* miscompiles find_get_entry(), and can result in EXT3 and EXT4
* filesystem corruption (possibly other FS too).
*/
-#ifdef __GNUC__
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
+#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
#error Your compiler is too buggy; it is known to miscompile kernels
#error and result in filesystem corruption and oopses.
#endif
-#endif
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
BLANK();
@@ -194,9 +183,11 @@ int main(void)
DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
- DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
- DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
- DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+ DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
+ DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
+ DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+ DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
+ DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
return 0;
}
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index ecaa68dd1af5..e3057c1b55b9 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -12,7 +12,6 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -87,14 +86,11 @@ void __init arm_dt_init_cpu_maps(void)
if (!cpus)
return;
- for_each_child_of_node(cpus, cpu) {
+ for_each_of_cpu_node(cpu) {
const __be32 *cell;
int prop_bytes;
u32 hwid;
- if (of_node_cmp(cpu->type, "cpu"))
- continue;
-
pr_debug(" * %pOF...\n", cpu);
/*
* A device tree containing CPU nodes with missing "reg"
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index e651c4d0a0d9..6739d37c2bc5 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -276,21 +276,9 @@ static int proc_dma_show(struct seq_file *m, void *v)
return 0;
}
-static int proc_dma_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_dma_show, NULL);
-}
-
-static const struct file_operations proc_dma_operations = {
- .open = proc_dma_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int __init proc_dma_init(void)
{
- proc_create("dma", 0, NULL, &proc_dma_operations);
+ proc_create_single("dma", 0, NULL, proc_dma_show);
return 0;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 1752033b0070..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
* Interrupt handling.
*/
.macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
@@ -791,7 +791,7 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
ldr r7, [r2, #TI_TASK]
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
@@ -807,7 +807,7 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
THUMB( mov ip, r4 )
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 20df608bf343..0465d65d23de 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -39,14 +39,16 @@ saved_pc .req lr
.section .entry.text,"ax",%progbits
.align 5
-#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \
+ IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
- * have tracing and context tracking disabled - the overheads from those
- * features make this path too inefficient.
+ * have tracing, context tracking and rseq debug disabled - the overheads
+ * from those features make this path too inefficient.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
@@ -71,14 +73,21 @@ fast_work_pending:
/* fall through to work_pending */
#else
/*
- * The "replacement" ret_fast_syscall for when tracing or context tracking
- * is enabled. As we will need to call out to some C functions, we save
- * r0 first to avoid needing to save registers around each C function call.
+ * The "replacement" ret_fast_syscall for when tracing, context tracking,
+ * or rseq debug is enabled. As we will need to call out to some C functions,
+ * we save r0 first to avoid needing to save registers around each C function
+ * call.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
@@ -113,6 +122,12 @@ ENDPROC(ret_fast_syscall)
*/
ENTRY(ret_to_user)
ret_slow_syscall:
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ enable_irq_notrace @ enable interrupts
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
@@ -242,7 +257,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- invoke_syscall tbl, scno, r10, ret_fast_syscall
+ invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -281,16 +296,15 @@ __sys_trace:
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
- b ret_slow_syscall
-__sys_trace_return:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+__sys_trace_return_nosave:
+ enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
-__sys_trace_return_nosave:
- enable_irq_notrace
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index efcd9f25a14b..0be69e551a64 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -15,23 +15,8 @@
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
- * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
- *
- * bl mcount
- *
- * These versions have the limitation that in order for the mcount routine to
- * be able to determine the function's caller's address, an APCS-style frame
- * pointer (which is set up with something like the code below) is required.
- *
- * mov ip, sp
- * push {fp, ip, lr, pc}
- * sub fp, ip, #4
- *
- * With EABI, these frame pointers are not available unless -mapcs-frame is
- * specified, and if building as Thumb-2, not even then.
- *
- * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
- * with call sites like:
+ * Newer GCCs (4.4+) solve this problem by using a version of mcount with call
+ * sites like:
*
* push {lr}
* bl __gnu_mcount_nc
@@ -46,17 +31,10 @@
* allows it to be clobbered in subroutines and doesn't use it to hold
* parameters.)
*
- * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
- * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
- * arch/arm/kernel/ftrace.c).
+ * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
+ * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
-#ifndef CONFIG_OLD_MCOUNT
-#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
-#endif
-#endif
-
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
@@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
mcount_exit
.endm
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * mcount
- */
-
-.macro mcount_enter
- stmdb sp!, {r0-r3, lr}
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [fp, #-4]
-.endm
-
-.macro mcount_exit
- ldr lr, [fp, #-4]
- ldmia sp!, {r0-r3, pc}
-.endm
-
-ENTRY(mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- stmdb sp!, {lr}
- ldr lr, [fp, #-4]
- ldmia sp!, {pc}
-#else
- __mcount _old
-#endif
-ENDPROC(mcount)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller_old)
- __ftrace_caller _old
-ENDPROC(ftrace_caller_old)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller_old)
- __ftrace_graph_caller
-ENDPROC(ftrace_graph_caller_old)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-#endif
-
/*
* __gnu_mcount_nc
*/
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 5617932a83df..0142fcfcc3d3 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return rec->arch.old_mcount ? OLD_NOP : NOP;
-}
-
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
-{
- if (!rec->arch.old_mcount)
- return addr;
-
- if (addr == MCOUNT_ADDR)
- addr = OLD_MCOUNT_ADDR;
- else if (addr == FTRACE_ADDR)
- addr = OLD_FTRACE_ADDR;
-
- return addr;
-}
-#else
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return NOP;
@@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
{
return addr;
}
-#endif
int ftrace_arch_code_modify_prepare(void)
{
@@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
}
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret) {
- pc = (unsigned long)&ftrace_call_old;
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, 0, new, false);
- }
-#endif
-
return ret;
}
@@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod,
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
-#ifdef CONFIG_OLD_MCOUNT
- if (ret == -EINVAL && addr == MCOUNT_ADDR) {
- rec->arch.old_mcount = true;
-
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
- }
-#endif
-
return ret;
}
@@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable)
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret)
- ret = __ftrace_modify_caller(&ftrace_graph_call_old,
- ftrace_graph_caller_old,
- enable);
-#endif
-
return ret;
}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e38f85b757a..ec29de250076 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -68,14 +72,6 @@ ENTRY(stext)
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU
- /* Calculate the size of a region covering just the kernel */
- ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
- ldr r6, =(_end) @ Cover whole kernel
- sub r6, r6, r5 @ Minimum size of region to map
- clz r6, r6 @ Region size must be 2^N...
- rsb r6, r6, #31 @ ...so round up region size
- lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
- orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
bl __setup_mpu
#endif
@@ -83,8 +79,8 @@ ENTRY(stext)
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
-1: bl __after_proc_init
- b __mmap_switched
+1: ldr lr, =__mmap_switched
+ b __after_proc_init
ENDPROC(stext)
#ifdef CONFIG_SMP
@@ -97,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
@@ -110,8 +110,6 @@ ENTRY(secondary_startup)
ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU
- /* Use MPU region info supplied by __cpu_up */
- ldr r6, [r7] @ get secondary_data.mpu_rgn_info
bl __secondary_setup_mpu @ Initialize the MPU
#endif
@@ -133,12 +131,45 @@ __secondary_data:
/*
* Set the Control Register and Read the process ID.
*/
+ .text
__after_proc_init:
+#ifdef CONFIG_ARM_MPU
+M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+M_CLASS(ldr r3, [r12, 0x50])
+AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
+ and r3, r3, #(MMFR0_PMSA) @ PMSA field
+ teq r3, #(MMFR0_PMSAv7) @ PMSA v7
+ beq 1f
+ teq r3, #(MMFR0_PMSAv8) @ PMSA v8
+ /*
+ * Memory region attributes for PMSAv8:
+ *
+ * n = AttrIndx[2:0]
+ * n MAIR
+ * DEVICE_nGnRnE 000 00000000
+ * NORMAL 001 11111111
+ */
+ ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
+ PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
+ moveq r3, #0
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
+
+1:
+#endif
#ifdef CONFIG_CPU_CP15
/*
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
+
+#ifdef CONFIG_ARM_MPU
+ biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
+ orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
+#endif
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
@@ -154,7 +185,15 @@ __after_proc_init:
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
+ instr_sync
#elif defined (CONFIG_CPU_V7M)
+#ifdef CONFIG_ARM_MPU
+ ldreq r3, [r12, MPU_CTRL]
+ biceq r3, #MPU_CTRL_PRIVDEFENA
+ orreq r3, #MPU_CTRL_ENABLE
+ streq r3, [r12, MPU_CTRL]
+ isb
+#endif
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
@@ -165,9 +204,7 @@ __after_proc_init:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
- movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- str r0, [r3]
+ str r0, [r12, V7M_SCB_CCR]
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
@@ -184,7 +221,7 @@ ENDPROC(__after_proc_init)
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
-.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused
+.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
@@ -192,14 +229,14 @@ ENDPROC(__after_proc_init)
#else
.macro set_region_nr tmp, rgnr, base
mov \tmp, \rgnr
- str \tmp, [\base, #MPU_RNR]
+ str \tmp, [\base, #PMSAv7_RNR]
.endm
.macro setup_region bar, acr, sr, unused, base
lsl \acr, \acr, #16
orr \acr, \acr, \sr
- str \bar, [\base, #MPU_RBAR]
- str \acr, [\base, #MPU_RASR]
+ str \bar, [\base, #PMSAv7_RBAR]
+ str \acr, [\base, #PMSAv7_RASR]
.endm
#endif
@@ -210,8 +247,9 @@ ENDPROC(__after_proc_init)
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
*
- * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
+ * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/
+ __HEAD
ENTRY(__setup_mpu)
@@ -223,7 +261,22 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
- bxne lr
+ beq __setup_pmsa_v7
+ teq r0, #(MMFR0_PMSAv8) @ PMSA v8
+ beq __setup_pmsa_v8
+
+ ret lr
+ENDPROC(__setup_mpu)
+
+ENTRY(__setup_pmsa_v7)
+ /* Calculate the size of a region covering just the kernel */
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r6, =(_end) @ Cover whole kernel
+ sub r6, r6, r5 @ Minimum size of region to map
+ clz r6, r6 @ Region size must be 2^N...
+ rsb r6, r6, #31 @ ...so round up region size
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
@@ -234,77 +287,189 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */
- set_region_nr r0, #MPU_RAM_REGION, r12
+ set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
- ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
+ ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb
/* First/background region */
- set_region_nr r0, #MPU_BG_REGION, r12
+ set_region_nr r0, #PMSAv7_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
- ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
- mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled
+ ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
+ mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
beq 2f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb
#ifdef CONFIG_XIP_KERNEL
- set_region_nr r0, #MPU_ROM_REGION, r12
+ set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb
- ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL)
+ ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, r0 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
- lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
- orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
beq 3f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb
#endif
+ ret lr
+ENDPROC(__setup_pmsa_v7)
+
+ENTRY(__setup_pmsa_v8)
+ mov r0, #0
+AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
+M_CLASS(str r0, [r12, #PMSAv8_RNR])
+ isb
+
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r6, =(_exiprom) @ ROM end
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
+AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
+#endif
- /* Enable the MPU */
-AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR
-AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map'
-AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on)
-AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU
+ ldr r5, =KERNEL_START
+ ldr r6, =KERNEL_END
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
-M_CLASS(ldr r0, [r12, #MPU_CTRL])
-M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA)
-M_CLASS(orr r0, #MPU_CTRL_ENABLE)
-M_CLASS(str r0, [r12, #MPU_CTRL])
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
+AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
+
+ /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r6, =KERNEL_START
+ ldr r5, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r5
+ movcs r6, r5
+#else
+ ldr r6, =KERNEL_START
+#endif
+ cmp r6, #0
+ beq 1f
+
+ mov r5, #0
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
+AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
+
+1:
+ /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =KERNEL_END
+ ldr r6, =(_exiprom)
+ cmp r5, r6
+ movcc r5, r6
+#else
+ ldr r5, =KERNEL_END
+#endif
+ mov r6, #0xffffffff
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
+AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
+
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
+ ldr r5, =(_exiprom)
+ ldr r6, =KERNEL_END
+ cmp r5, r6
+ movcs r5, r6
+
+ ldr r6, =KERNEL_START
+ ldr r0, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r0
+ movcc r6, r0
+
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+#ifdef CONFIG_CPU_V7M
+ /* There is no alias for n == 4 */
+ mov r0, #4
+ str r0, [r12, #PMSAv8_RNR] @ PRSEL
isb
+ str r5, [r12, #PMSAv8_RBAR_A(0)]
+ str r6, [r12, #PMSAv8_RLAR_A(0)]
+#else
+ mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
+ mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
+#endif
+#endif
ret lr
-ENDPROC(__setup_mpu)
+ENDPROC(__setup_pmsa_v8)
#ifdef CONFIG_SMP
/*
* r6: pointer at mpu_rgn_info
*/
+ .text
ENTRY(__secondary_setup_mpu)
+ /* Use MPU region info supplied by __cpu_up */
+ ldr r6, [