summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt5
-rw-r--r--Documentation/devicetree/booting-without-of.txt4
-rw-r--r--arch/arm/Kconfig34
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/mcpm_entry.c281
-rw-r--r--arch/arm/common/mcpm_head.S2
-rw-r--r--arch/arm/include/asm/assembler.h17
-rw-r--r--arch/arm/include/asm/cacheflush.h7
-rw-r--r--arch/arm/include/asm/cmpxchg.h67
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S4
-rw-r--r--arch/arm/include/asm/io.h52
-rw-r--r--arch/arm/include/asm/irqflags.h11
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mcpm.h73
-rw-r--r--arch/arm/include/asm/memory.h16
-rw-r--r--arch/arm/include/asm/module.h12
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/pmu.h19
-rw-r--r--arch/arm/include/asm/proc-fns.h7
-rw-r--r--arch/arm/include/asm/smp.h3
-rw-r--r--arch/arm/include/asm/system_info.h1
-rw-r--r--arch/arm/include/asm/unified.h2
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/entry-armv.S12
-rw-r--r--arch/arm/kernel/entry-common.S6
-rw-r--r--arch/arm/kernel/entry-ftrace.S2
-rw-r--r--arch/arm/kernel/entry-v7m.S13
-rw-r--r--arch/arm/kernel/head-nommu.S27
-rw-r--r--arch/arm/kernel/head.S52
-rw-r--r--arch/arm/kernel/module-plts.c183
-rw-r--r--arch/arm/kernel/module.c32
-rw-r--r--arch/arm/kernel/module.lds4
-rw-r--r--arch/arm/kernel/perf_event.c408
-rw-r--r--arch/arm/kernel/perf_event_cpu.c421
-rw-r--r--arch/arm/kernel/perf_event_v6.c49
-rw-r--r--arch/arm/kernel/perf_event_v7.c129
-rw-r--r--arch/arm/kernel/perf_event_xscale.c32
-rw-r--r--arch/arm/kernel/setup.c30
-rw-r--r--arch/arm/kernel/sleep.S4
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/tcm.c104
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/kvm/interrupts.S2
-rw-r--r--arch/arm/lib/call_with_stack.S2
-rw-r--r--arch/arm/mach-exynos/suspend.c8
-rw-r--r--arch/arm/mach-hisi/platmcpm.c133
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c1
-rw-r--r--arch/arm/mach-keystone/keystone.c41
-rw-r--r--arch/arm/mach-keystone/platsmp.c13
-rw-r--r--arch/arm/mach-nspire/nspire.c2
-rw-r--r--arch/arm/mach-realview/core.c13
-rw-r--r--arch/arm/mach-sa1100/Makefile2
-rw-r--r--arch/arm/mach-sa1100/generic.c37
-rw-r--r--arch/arm/mach-versatile/core.c12
-rw-r--r--arch/arm/mm/Kconfig24
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-l2x0.c107
-rw-r--r--arch/arm/mm/dma-mapping.c32
-rw-r--r--arch/arm/mm/mmu.c153
-rw-r--r--arch/arm/mm/nommu.c9
-rw-r--r--arch/arm/mm/proc-v7-2level.S12
-rw-r--r--arch/arm/mm/proc-v7-3level.S14
-rw-r--r--arch/arm/mm/proc-v7.S182
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm/mm/pv-fixup-asm.S88
-rw-r--r--arch/arm/vdso/Makefile18
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-integrator-ap.c3
-rw-r--r--drivers/clocksource/timer-sp.h (renamed from arch/arm/include/asm/hardware/arm_timer.h)5
-rw-r--r--drivers/clocksource/timer-sp804.c (renamed from arch/arm/common/timer-sp.c)12
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-sa11x0.c (renamed from arch/arm/mach-sa1100/irq.c)85
-rw-r--r--include/clocksource/timer-sp804.h (renamed from arch/arm/include/asm/hardware/timer-sp.h)5
-rw-r--r--include/linux/irqchip/irq-sa11x0.h17
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/soc/sa1100/pwer.h15
-rw-r--r--kernel/events/core.c8
83 files changed, 1889 insertions, 1361 deletions
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 0dbabe9a6b0a..2251dccb141e 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,11 @@ Optional properties:
disable if zero.
- arm,prefetch-offset : Override prefetch offset value. Valid values are
0-7, 15, 23, and 31.
+- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
+ (forcibly enable), property absent (retain settings set by firmware)
+- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
+ <1> (forcibly enable), property absent (retain settings set by
+ firmware)
Example:
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index e49e423268c0..04d34f6a58f3 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -856,6 +856,10 @@ address which can extend beyond that limit.
name may clash with standard defined ones, you prefix them with your
vendor name and a comma.
+ Additional properties for the root node:
+
+ - serial-number : a string representing the device's serial number
+
b) The /cpus node
This node is the parent of all individual CPU nodes. It doesn't
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf292d3ec27f..a750c1425c3a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -33,8 +33,8 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
@@ -45,7 +45,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if MMU
- select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
+ select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
@@ -59,10 +59,10 @@ config ARM
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
- select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_MEMBLOCK
- select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_OPTPROBES if !THUMB2_KERNEL
select HAVE_PERF_EVENTS
@@ -173,7 +173,7 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
bool
- default y
+ default !CPU_V7M
config RWSEM_XCHGADD_ALGORITHM
bool
@@ -1010,11 +1010,6 @@ config PLAT_PXA
config PLAT_VERSATILE
bool
-config ARM_TIMER_SP804
- bool
- select CLKSRC_MMIO
- select CLKSRC_OF if OF
-
source "arch/arm/firmware/Kconfig"
source arch/arm/mm/Kconfig
@@ -1342,6 +1337,7 @@ config SMP
depends on GENERIC_CLOCKEVENTS
depends on HAVE_SMP
depends on MMU || ARM_MPU
+ select IRQ_WORK
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
@@ -1717,6 +1713,21 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
+config ARM_MODULE_PLTS
+ bool "Use PLTs to allow module memory to spill over into vmalloc area"
+ depends on MODULES
+ help
+ Allocate PLTs when loading modules so that jumps and calls whose
+ targets are too far away for their relative offsets to be encoded
+ in the instructions themselves can be bounced via veneers in the
+ module's PLT. This allows modules to be allocated in the generic
+ vmalloc area after the dedicated module memory area has been
+ exhausted. The modules will use slightly more memory, but after
+ rounding up to page size, the actual memory footprint is usually
+ the same.
+
+ Say y if you are getting out of memory errors while loading modules
+
source "mm/Kconfig"
config FORCE_MAX_ZONEORDER
@@ -1987,6 +1998,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
+ depends on !CPU_V7M
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a6b5d0e35968..f1b157971366 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -5,6 +5,7 @@ source "lib/Kconfig.debug"
config ARM_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
+ depends on MMU
select DEBUG_FS
---help---
Say Y here if you want to show the kernel pagetable layout in a
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 2a4fae7e9c44..07ab3d203916 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -19,6 +19,10 @@ LDFLAGS_vmlinux += --be8
LDFLAGS_MODULE += --be8
endif
+ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
+LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
+endif
+
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6e1fb2b2ecc7..7a13aebacf81 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -103,6 +103,8 @@ extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \
lib1funcs.S ashldi3.S bswapsdi2.S $(libfdt) $(libfdt_hdrs) \
hyp-stub.S
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 2c45b5709fa4..06e983f59980 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,7 +130,7 @@ start:
.endr
ARM( mov r0, r0 )
ARM( b 1f )
- THUMB( adr r12, BSYM(1f) )
+ THUMB( badr r12, 1f )
THUMB( bx r12 )
.word _magic_sig @ Magic numbers to help the loader
@@ -447,7 +447,7 @@ dtb_check_done:
bl cache_clean_flush
- adr r0, BSYM(restart)
+ badr r0, restart
add r0, r0, r6
mov pc, r0
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 70b1eff477b3..6ee5959a813b 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
-obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 5f8a52ac7edf..a923524d1040 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -20,6 +20,126 @@
#include <asm/cputype.h>
#include <asm/suspend.h>
+/*
+ * The public API for this code is documented in arch/arm/include/asm/mcpm.h.
+ * For a comprehensive description of the main algorithm used here, please
+ * see Documentation/arm/cluster-pm-race-avoidance.txt.
+ */
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ * This must be called at the point of committing to teardown of a CPU.
+ * The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ * cluster can be torn down without disrupting this CPU.
+ * To avoid deadlocks, this must be called before a CPU is powered down.
+ * The CPU cache (SCTRL.C bit) is expected to be off.
+ * However L2 cache might or might not be active.
+ */
+static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+ sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ * CLUSTER_UP: no destructive teardown was done and the cluster has been
+ * restored to the previous state (CPU cache still active); or
+ * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ * (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cluster = state;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+ sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete. CPU cache expected to be active.
+ *
+ * Returns:
+ * false: the critical section was not entered because an inbound CPU was
+ * observed, or the cluster is already being set up;
+ * true: the critical section was entered: it is now safe to tear down the
+ * cluster.
+ */
+static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int i;
+ struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+ /* Warn inbound CPUs that the cluster is being torn down: */
+ c->cluster = CLUSTER_GOING_DOWN;
+ sync_cache_w(&c->cluster);
+
+ /* Back out if the inbound cluster is already in the critical region: */
+ sync_cache_r(&c->inbound);
+ if (c->inbound == INBOUND_COMING_UP)
+ goto abort;
+
+ /*
+ * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+ * teardown is complete on each CPU before tearing down the cluster.
+ *
+ * If any CPU has been woken up again from the DOWN state, then we
+ * shouldn't be taking the cluster down at all: abort in that case.
+ */
+ sync_cache_r(&c->cpus);
+ for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+ int cpustate;
+
+ if (i == cpu)
+ continue;
+
+ while (1) {
+ cpustate = c->cpus[i].cpu;
+ if (cpustate != CPU_GOING_DOWN)
+ break;
+
+ wfe();
+ sync_cache_r(&c->cpus[i].cpu);
+ }
+
+ switch (cpustate) {
+ case CPU_DOWN:
+ continue;
+
+ default:
+ goto abort;
+ }
+ }
+
+ return true;
+
+abort:
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+ return false;
+}
+
+static int __mcpm_cluster_state(unsigned int cluster)
+{
+ sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+ return mcpm_sync.clusters[cluster].cluster;
+}
+
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
bool cpu_is_down, cluster_is_down;
int ret = 0;
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (!platform_ops)
return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
- /* backward compatibility callback */
- if (platform_ops->power_up)
- return platform_ops->power_up(cpu, cluster);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
-
/*
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* variant exists, we need to disable IRQs manually here.
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
bool cpu_going_down, last_man;
phys_reset_t phys_reset;
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (WARN_ON_ONCE(!platform_ops))
return;
BUG_ON(!irqs_disabled());
- /*
- * Do this before calling into the power_down method,
- * as it might not always be safe to do afterwards.
- */
setup_mm_for_reboot();
- /* backward compatibility callback */
- if (platform_ops->power_down) {
- platform_ops->power_down();
- goto not_dead;
- }
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
-
__mcpm_cpu_going_down(cpu, cluster);
-
arch_spin_lock(&mcpm_lock);
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
if (cpu_going_down)
wfi();
-not_dead:
/*
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
return ret;
}
-void mcpm_cpu_suspend(u64 expected_residency)
+void mcpm_cpu_suspend(void)
{
if (WARN_ON_ONCE(!platform_ops))
return;
- /* backward compatibility callback */
- if (platform_ops->suspend) {
- phys_reset_t phys_reset;
- BUG_ON(!irqs_disabled());
- setup_mm_for_reboot();
- platform_ops->suspend(expected_residency);
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
- phys_reset(virt_to_phys(mcpm_entry_point));
- BUG();
- }
-