From 3d8afe3099ebc602848aa7f09235cce3a9a023ce Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 2 Sep 2014 11:35:24 +0100 Subject: arm64: use irq_set_affinity with force=false when migrating irqs The arm64 interrupt migration code on cpu offline calls irqchip.irq_set_affinity() with the argument force=true. Originally this argument had no effect because it was not used by any interrupt chip driver and there was no semantics defined. This changed with commit 01f8fa4f01d8 ("genirq: Allow forcing cpu affinity of interrupts") which made the force argument useful to route interrupts to not yet online cpus without checking the target cpu against the cpu online mask. The following commit ffde1de64012 ("irqchip: gic: Support forced affinity setting") implemented this for the GIC interrupt controller. As a consequence the cpu offline irq migration fails if CPU0 is offlined, because CPU0 is still set in the affinity mask and the validation against cpu online mask is skipped to the force argument being true. The following first_cpu(mask) selection always selects CPU0 as the target. Commit 601c942176d8("arm64: use cpu_online_mask when using forced irq_set_affinity") intended to fix the above mentioned issue but introduced another issue where affinity can be migrated to a wrong CPU due to unconditional copy of cpu_online_mask. As with for arm, solve the issue by calling irq_set_affinity() with force=false from the CPU offline irq migration code so the GIC driver validates the affinity mask against CPU online mask and therefore removes CPU0 from the possible target candidates. Also revert the changes done in the commit 601c942176d8 as it's no longer needed. Tested on Juno platform. Fixes: 601c942176d8("arm64: use cpu_online_mask when using forced irq_set_affinity") Signed-off-by: Sudeep Holla Acked-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Cc: # 3.10.x Signed-off-by: Will Deacon --- arch/arm64/kernel/irq.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0f08dfd69ebc..dfa6e3e74fdd 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; ret = true; + } - /* - * when using forced irq_set_affinity we must ensure that the cpu - * being offlined is not present in the affinity mask, it may be - * selected as the target CPU otherwise - */ - affinity = cpu_online_mask; c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; -- cgit v1.2.3 From eb35bdd7bca29a13c8ecd44e6fd747a84ce675db Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 11 Sep 2014 14:38:16 +0100 Subject: arm64: flush TLS registers during exec Nathan reports that we leak TLS information from the parent context during an exec, as we don't clear the TLS registers when flushing the thread state. This patch updates the flushing code so that we: (1) Unconditionally zero the tpidr_el0 register (since this is fully context switched for native tasks and zeroed for compat tasks) (2) Zero the tp_value state in thread_info before clearing the tpidrr0_el0 register for compat tasks (since this is only writable by the set_tls compat syscall and therefore not fully switched). A missing compiler barrier is also added to the compat set_tls syscall. Cc: Acked-by: Nathan Lynch Reported-by: Nathan Lynch Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 18 ++++++++++++++++++ arch/arm64/kernel/sys_compat.c | 6 ++++++ 2 files changed, 24 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1309d64aa926..29d48690f2ac 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -230,9 +230,27 @@ void exit_thread(void) { } +static void tls_thread_flush(void) +{ + asm ("msr tpidr_el0, xzr"); + + if (is_compat_task()) { + current->thread.tp_value = 0; + + /* + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + */ + barrier(); + asm ("msr tpidrro_el0, xzr"); + } +} + void flush_thread(void) { fpsimd_flush_thread(); + tls_thread_flush(); flush_ptrace_hw_breakpoint(current); } diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index de2b0226e06d..dc47e53e9e28 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs) case __ARM_NR_compat_set_tls: current->thread.tp_value = regs->regs[0]; + + /* + * Protect against register corruption from context switch. + * See comment in tls_thread_flush. + */ + barrier(); asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); return 0; -- cgit v1.2.3