diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 16:27:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 16:27:01 -0700 |
commit | c706c7eb0d08098f0d768aeef945d7cf1f8858b4 (patch) | |
tree | c629c9705514b70dd3e8f8dc67fa8d849871886c /arch/arm/kernel | |
parent | 79b0691d0c1d3d4a976e3304a1512e6eca9671ec (diff) | |
parent | 3ff32a0def6e0d2e21a6c5ad1b00726592774018 (diff) |
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM development updates from Russell King:
"Included in this update:
- moving PSCI code from ARM64/ARM to drivers/
- removal of some architecture internals from global kernel view
- addition of software based "privileged no access" support using the
old domains register to turn off the ability for kernel
loads/stores to access userspace. Only the proper accessors will
be usable.
- addition of early fixup support for early console
- re-addition (and reimplementation) of OMAP special interconnect
barrier
- removal of finish_arch_switch()
- only expose cpuX/online in sysfs if hotpluggable
- a number of code cleanups"
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (41 commits)
ARM: software-based priviledged-no-access support
ARM: entry: provide uaccess assembly macro hooks
ARM: entry: get rid of multiple macro definitions
ARM: 8421/1: smp: Collapse arch_cpu_idle_dead() into cpu_die()
ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore()
ARM: mm: improve do_ldrd_abort macro
ARM: entry: ensure that IRQs are enabled when calling syscall_trace_exit()
ARM: entry: efficiency cleanups
ARM: entry: get rid of asm_trace_hardirqs_on_cond
ARM: uaccess: simplify user access assembly
ARM: domains: remove DOMAIN_TABLE
ARM: domains: keep vectors in separate domain
ARM: domains: get rid of manager mode for user domain
ARM: domains: move initial domain setting value to asm/domains.h
ARM: domains: provide domain_mask()
ARM: domains: switch to keeping domain value in register
ARM: 8419/1: dma-mapping: harmonize definition of DMA_ERROR_CODE
ARM: 8417/1: refactor bitops functions with BIT_MASK() and BIT_WORD()
ARM: 8416/1: Feroceon: use of_iomap() to map register base
ARM: 8415/1: early fixmap support for earlycon
...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/arm/kernel/armksyms.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 32 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 63 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 112 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 896 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 56 | ||||
-rw-r--r-- | arch/arm/kernel/psci.c | 299 | ||||
-rw-r--r-- | arch/arm/kernel/psci_smp.c | 31 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/swp_emulate.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 1 |
19 files changed, 222 insertions, 1326 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index e69f7a19735d..af9e59bf3831 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ - perf_event_xscale.o perf_event_v6.o \ +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ perf_event_v7.o CFLAGS_pj4-cp0.o := -marm AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt @@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci.o psci-call.o +obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 5e5a51a99e68..f89811fb9a55 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy); #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(__copy_from_user); -EXPORT_SYMBOL(__copy_to_user); -EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(arm_copy_from_user); +EXPORT_SYMBOL(arm_copy_to_user); +EXPORT_SYMBOL(arm_clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index cb4fb1e69778..3e1c26eb32b4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -149,10 +149,10 @@ ENDPROC(__und_invalid) #define SPFIX(code...) #endif - .macro svc_entry, stack_hole=0, trace=1 + .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) + sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) @@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack @@ -185,6 +185,11 @@ ENDPROC(__und_invalid) @ stmia r7, {r2 - r6} + uaccess_save r0 + .if \uaccess + uaccess_disable r0 + .endif + .if \trace #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -194,7 +199,7 @@ ENDPROC(__und_invalid) .align 5 __dabt_svc: - svc_entry + svc_entry uaccess=0 mov r2, sp dabt_helper THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR @@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) #error "sizeof(struct pt_regs) must be a multiple of 8" #endif - .macro usr_entry, trace=1 + .macro usr_entry, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE @@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) + .if \uaccess + uaccess_disable ip + .endif + @ Enable the alignment trap while in kernel mode ATRAP( teq r8, r7) ATRAP( mcrne p15, 0, r8, c1, c0, 0) @@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) .align 5 __dabt_usr: - usr_entry + usr_entry uaccess=0 kuser_cmpxchg_check mov r2, sp dabt_helper @@ -458,7 +467,7 @@ ENDPROC(__irq_usr) .align 5 __und_usr: - usr_entry + usr_entry uaccess=0 mov r2, r4 mov r3, r5 @@ -484,6 +493,8 @@ __und_usr: 1: ldrt r0, [r4] ARM_BE8(rev r0, r0) @ little endian instruction + uaccess_disable ip + @ r0 = 32-bit ARM instruction which caused the exception @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction @@ -518,9 +529,10 @@ __und_usr_thumb: 2: ldrht r5, [r4] ARM_BE8(rev16 r5, r5) @ little endian instruction cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16 @ 16bit undefined instruction + blo __und_usr_fault_16_pan @ 16bit undefined instruction 3: ldrht r0, [r2] ARM_BE8(rev16 r0, r0) @ little endian instruction + uaccess_disable ip add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 @@ -715,6 +727,8 @@ ENDPROC(no_fp) __und_usr_fault_32: mov r1, #4 b 1f +__und_usr_fault_16_pan: + uaccess_disable ip __und_usr_fault_16: mov r1, #2 1: mov r0, sp @@ -770,6 +784,8 @@ ENTRY(__switch_to) ldr r4, [r2, #TI_TP_VALUE] ldr r5, [r2, #TI_TP_VALUE + 4] #ifdef CONFIG_CPU_USE_DOMAINS + mrc p15, 0, r6, c3, c0, 0 @ Get domain register + str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b48dd4f37f80..30a7228eaceb 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -24,35 +24,55 @@ .align 5 +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* - * This is the fast syscall return path. We do as little as - * possible here, and this includes saving r0 back into the SVC - * stack. + * This is the fast syscall return path. We do as little as possible here, + * such as avoiding writing r0 to the stack. We only use this path if we + * have tracing and context tracking disabled - the overheads from those + * features make this path too inefficient. */ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne __sys_trace_return - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending - asm_trace_hardirqs_on /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr - ct_user_enter restore_user_regs fast = 1, offset = S_OFF UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) -/* - * Ok, we need to do extra processing, enter the slow path. - */ + /* Ok, we need to do extra processing, enter the slow path. */ fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 -work_pending: + /* fall through to work_pending */ +#else +/* + * The "replacement" ret_fast_syscall for when tracing or context tracking + * is enabled. As we will need to call out to some C functions, we save + * r0 first to avoid needing to save registers around each C function call. + */ +ret_fast_syscall: + UNWIND(.fnstart ) + UNWIND(.cantunwind ) + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + beq no_work_pending + UNWIND(.fnend ) +ENDPROC(ret_fast_syscall) + + /* Slower path - fall through to work_pending */ +#endif + + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return_nosave +slow_work_pending: mov r0, sp @ 'regs' mov r2, why @ 'syscall' bl do_work_pending @@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall) /* * "slow" syscall return path. "why" tells us if this was a real syscall. + * IRQs may be enabled here, so always disable them. Note that we use the + * "notrace" version to avoid calling into the tracing code unnecessarily. + * do_work_pending() will update this state if necessary. */ ENTRY(ret_to_user) ret_slow_syscall: - disable_irq @ disable interrupts + disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK - bne work_pending + bne slow_work_pending no_work_pending: - asm_trace_hardirqs_on + asm_trace_hardirqs_on save = 0 /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -174,6 +197,8 @@ ENTRY(vector_swi) USER( ldr scno, [lr, #-4] ) @ get SWI instruction #endif + uaccess_disable tbl + adr tbl, sys_call_table @ load syscall table pointer #if defined(CONFIG_OABI_COMPAT) @@ -252,6 +277,12 @@ __sys_trace_return: bl syscall_trace_exit b ret_slow_syscall +__sys_trace_return_nosave: + enable_irq_notrace + mov r0, sp + bl syscall_trace_exit + b ret_slow_syscall + .align 5 #ifdef CONFIG_ALIGNMENT_TRAP .type __cr_alignment, #object diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 1a0045abead7..0d22ad206d52 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -196,7 +196,7 @@ msr cpsr_c, \rtemp @ switch back to the SVC mode .endm -#ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr, irq = 0 .if \irq != 0 @ IRQs already off @@ -215,6 +215,10 @@ blne trace_hardirqs_off #endif .endif + uaccess_restore + +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode SVC restore msr spsr_cxsf, \rpsr #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -222,6 +226,20 @@ strex r1, r2, [r0] @ clear the exclusive monitor #endif ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#else + @ Thumb mode SVC restore + ldr lr, [sp, #S_SP] @ top of the stack + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor + + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context + ldmia sp, {r0 - r12} + mov sp, lr + ldr lr, [sp], #4 + rfeia sp! +#endif .endm @ @@ -241,6 +259,9 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq + uaccess_restore +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode restore mov r0, sp ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will @ clobber state restored below) @@ -250,9 +271,27 @@ msr spsr_cxsf, r9 ldr r0, [r0, #S_R0] ldmia r8, {pc}^ +#else + @ Thumb mode restore + add r0, sp, #S_R2 + ldr lr, [sp, #S_LR] + ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will + @ clobber state restored below) + ldmia r0, {r2 - r12} + mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT + msr cpsr_c, r1 + sub r0, #S_R2 + add r8, r0, #S_PC + ldmia r0, {r0 - r1} + rfeia r8 +#endif .endm + .macro restore_user_regs, fast = 0, offset = 0 + uaccess_enable r1, isb=0 +#ifndef CONFIG_THUMB2_KERNEL + @ ARM mode restore mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc @@ -270,72 +309,16 @@ @ after ldm {}^ add sp, sp, #\offset + S_FRAME_SIZE movs pc, lr @ return & move spsr_svc into cpsr - .endm - -#else /* CONFIG_THUMB2_KERNEL */ - .macro svc_exit, rpsr, irq = 0 - .if \irq != 0 - @ IRQs already off -#ifdef CONFIG_TRACE_IRQFLAGS - @ The parent context IRQs must have been enabled to get here in - @ the first place, so there's no point checking the PSR I bit. - bl trace_hardirqs_on -#endif - .else - @ IRQs off again before pulling preserved data off the stack - disable_irq_notrace -#ifdef CONFIG_TRACE_IRQFLAGS - tst \rpsr, #PSR_I_BIT - bleq trace_hardirqs_on - tst \rpsr, #PSR_I_BIT - blne trace_hardirqs_off -#endif - .endif - ldr lr, [sp, #S_SP] @ top of the stack - ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - - @ We must avoid clrex due to Cortex-A15 erratum #830321 - strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor - - stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context - ldmia sp, {r0 - r12} - mov sp, lr - ldr lr, [sp], #4 - rfeia sp! - .endm - - @ - @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit - @ - @ For full details see non-Thumb implementation above. - @ - .macro svc_exit_via_fiq - add r0, sp, #S_R2 - ldr lr, [sp, #S_LR] - ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will - @ clobber state restored below) - ldmia r0, {r2 - r12} - mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT - msr cpsr_c, r1 - sub r0, #S_R2 - add r8, r0, #S_PC - ldmia r0, {r0 - r1} - rfeia r8 - .endm - -#ifdef CONFIG_CPU_V7M - /* - * Note we don't need to do clrex here as clearing the local monitor is - * part of each exception entry and exit sequence. - */ - .macro restore_user_regs, fast = 0, offset = 0 +#elif defined(CONFIG_CPU_V7M) + @ V7M restore. + @ Note that we don't need to do clrex here as clearing the local + @ monitor is part of the exception entry and exit sequence. .if \offset add sp, #\offset .endif v7m_exception_slow_exit ret_r0 = \fast - .endm -#else /* ifdef CONFIG_CPU_V7M */ - .macro restore_user_regs, fast = 0, offset = 0 +#else + @ Thumb mode restore mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr @@ -353,9 +336,8 @@ .endif add sp, sp, #S_FRAME_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr - .endm -#endif /* ifdef CONFIG_CPU_V7M / else */ #endif /* !CONFIG_THUMB2_KERNEL */ + .endm /* * Context tracking subsystem. Used to instrument transitions diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 29e2991465cb..04286fd9e09c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -464,10 +464,7 @@ __enable_mmu: #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + mov r5, #DACR_INIT mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index baf8edebe26f..5ff4826cb154 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -39,6 +39,7 @@ #include <linux/export.h> #include <asm/hardware/cache-l2x0.h> +#include <asm/outercache.h> #include <asm/exception.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c deleted file mode 100644 index 54272e0be713..000000000000 --- a/arch/arm/kernel/perf_event.c +++ /dev/null @@ -1,896 +0,0 @@ -#undef DEBUG - -/* - * ARM performance counter support. - * - * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles - * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> - * - * This code is based on the sparc64 perf event code, which is in turn based - * on the x86 code. - */ -#define pr_fmt(fmt) "hw perfevents: " fmt - -#include <linux/bitmap.h> -#include <linux/cpumask.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/irq.h> -#include <linux/irqdesc.h> - -#include <asm/cputype.h> -#include <asm/irq_regs.h> -#include <asm/pmu.h> - -static int -armpmu_map_cache_event(const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u64 config) -{ - unsigned int cache_type, cache_op, cache_result, ret; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; - - if (ret == CACHE_OP_UNSUPPORTED) - return -ENOENT; - - return ret; -} - -static int -armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) -{ - int mapping; - - if (config >= PERF_COUNT_HW_MAX) - return -EINVAL; - - mapping = (*event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; -} - -static int -armpmu_map_raw_event(u32 raw_event_mask, u64 config) -{ - return (int)(config & raw_event_mask); -} - -int -armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) -{ - u64 config = event->attr.config; - int type = event->attr.type; - - if (type == event->pmu->type) - return armpmu_map_raw_event(raw_event_mask, config); - - switch (type) { - case PERF_TYPE_HARDWARE: - return armpmu_map_hw_event(event_map, config); - case PERF_TYPE_HW_CACHE: - return armpmu_map_cache_event(cache_map, config); - case PERF_TYPE_RAW: - return armpmu_map_raw_event(raw_event_mask, config); - } - - return -ENOENT; -} - -int armpmu_event_set_period(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - s64 left = local64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - /* - * Limit the maximum period to prevent the counter value - * from overtaking the one we are about to program. In - * effect we are reducing max_period to account for - * interrupt latency (and we are being very conservative). - */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; - - local64_set(&hwc->prev_count, (u64)-left); - - armpmu->write_counter(event, (u64)(-left) & 0xffffffff); - - perf_event_update_userpage(event); - - return ret; -} - -u64 armpmu_event_update(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - u64 delta, prev_raw_count, new_raw_count; - -again: - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(event); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; - - local64_add(delta, &event->count); - local64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void -armpmu_read(struct perf_event *event) -{ - armpmu_event_update(event); -} - -static void -armpmu_stop(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to update the counter, so ignore - * PERF_EF_UPDATE, see comments in armpmu_start(). - */ - if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(event); - armpmu_event_update(event); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; - } -} - -static void armpmu_start(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to reprogram the period, so ignore - * PERF_EF_RELOAD, see the comment below. - */ - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - /* - * Set the period again. Some counters can't be stopped, so when we - * were stopped we simply disabled the IRQ source and the counter - * may have been left counting. If we don't do this step then we may - * get an interrupt too soon or *way* too late if the overflow has - * happened since disabling. - */ - armpmu_event_set_period(event); - armpmu->enable(event); -} - -static void -armpmu_del(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - armpmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - if (armpmu->clear_event_idx) - armpmu->clear_event_idx(hw_events, event); - - perf_event_update_userpage(event); -} - -static int -armpmu_add(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* An event following a process won't be stopped earlier */ - if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) - return -ENOENT; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, event); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ - event->hw.idx = idx; - armpmu->disable(event); - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - armpmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static int -validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, - struct perf_event *event) -{ - struct arm_pmu *armpmu; - - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != pmu) - return 0; - - if (event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - armpmu = to_arm_pmu(event->pmu); - return armpmu->get_event_idx(hw_events, event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct pmu_hw_events fake_pmu; - - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) -{ - struct arm_pmu *armpmu; - struct platform_device *plat_device; - struct arm_pmu_platdata *plat; - int ret; - u64 start_clock, finish_clock; - - /* - * we request the IRQ with a (possibly percpu) struct arm_pmu**, but - * the handlers expect a struct arm_pmu*. The percpu_irq framework will - * do any necessary shifting, we just need to perform the first - * dereference. - */ - armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); - - start_clock = sched_clock(); - if (plat && plat->handle_irq) - ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); - else - ret = armpmu->handle_irq(irq, armpmu); - finish_clock = sched_clock(); - - perf_sample_event_took(finish_clock - start_clock); - return ret; -} - -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - armpmu->free_irq(armpmu); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); - if (err) { - armpmu_release_hardware(armpmu); - return err; - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int mapping; - - mapping = armpmu->map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. For SMP systems, each core has it's own PMU so we can't do any - * clever allocation or constraints checking at this point. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Check whether we need to exclude the counter from certain modes. - */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { - pr_debug("ARM performance counters do not support " - "mode exclusion\n"); - return -EOPNOTSUPP; - } - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - if (!is_sampling_event(event)) { - /* - * For non-sampling runs, limit the sample_period to half - * of the counter width. That way, the new counter value - * is far less likely to overtake the previous one unless - * you have some serious IRQ latency issues. - */ - hwc->sample_period = armpmu->max_period >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - } - - if (event->group_leader != event) { - if (validate_group(event) != 0) - return - |