From b3c395ef5556a6c60f4426cc060f5b7bdcf82d5b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 11 May 2015 17:52:08 +0200 Subject: mm/uaccess, mm/fault: Clarify that uaccess may only sleep if pagefaults are enabled In general, non-atomic variants of user access functions must not sleep if pagefaults are disabled. Let's update all relevant comments in uaccess code. This also reflects the might_sleep() checks in might_fault(). Reviewed-and-tested-by: Thomas Gleixner Signed-off-by: David Hildenbrand Signed-off-by: Peter Zijlstra (Intel) Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-4-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 15 ++++++++++----- arch/x86/include/asm/uaccess_32.h | 6 ++++-- arch/x86/lib/usercopy_32.c | 6 ++++-- 3 files changed, 18 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ace9dec050b1..a8df874f3e88 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * @addr: User space pointer to start of block to check * @size: Size of block to check * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Checks if a pointer to a block of memory in user space is valid. * @@ -145,7 +146,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) * @x: Variable to store result. * @ptr: Source address, in user space. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger @@ -240,7 +242,8 @@ extern void __put_user_8(void); * @x: Value to copy to user space. * @ptr: Destination address, in user space. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger @@ -455,7 +458,8 @@ struct __large_struct { unsigned long buf[100]; }; * @x: Variable to store result. * @ptr: Source address, in user space. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger @@ -479,7 +483,8 @@ struct __large_struct { unsigned long buf[100]; }; * @x: Value to copy to user space. * @ptr: Destination address, in user space. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 3c03a5de64d3..7c8ad3451988 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -70,7 +70,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) * @from: Source address, in kernel space. * @n: Number of bytes to copy. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. @@ -117,7 +118,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) * @from: Source address, in user space. * @n: Number of bytes to copy. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Copy data from user space to kernel space. Caller must check * the specified block with access_ok() before calling this function. diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e2f5e21c03b3..91d93b95bd86 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); * @from: Source address, in kernel space. * @n: Number of bytes to copy. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Copy data from kernel space to user space. * @@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user); * @from: Source address, in user space. * @n: Number of bytes to copy. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Copy data from user space to kernel space. * -- cgit v1.2.3 From 2cb7c9cb426660b5ed58b643d9e7dd5d50ba901f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 11 May 2015 17:52:09 +0200 Subject: sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_* The existing code relies on pagefault_disable() implicitly disabling preemption, so that no schedule will happen between kmap_atomic() and kunmap_atomic(). Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner Signed-off-by: David Hildenbrand Signed-off-by: Peter Zijlstra (Intel) Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/x86/mm/highmem_32.c | 3 ++- arch/x86/mm/iomap_32.c | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 4500142bc4aa..eecb207a2037 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) unsigned long vaddr; int idx, type; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) @@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr) #endif pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 9ca35fc60cfe..2b7ece0e103a 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) unsigned long vaddr; int idx, type; + preempt_disable(); pagefault_disable(); type = kmap_atomic_idx_push(); @@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr) } pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL_GPL(iounmap_atomic); -- cgit v1.2.3 From 70ffdb9393a7264a069265edded729078dcf0425 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 11 May 2015 17:52:11 +0200 Subject: mm/fault, arch: Use pagefault_disable() to check for disabled pagefaults in the handler Introduce faulthandler_disabled() and use it to check for irq context and disabled pagefaults (via pagefault_disable()) in the pagefault handlers. Please note that we keep the in_atomic() checks in place - to detect whether in irq context (in which case preemption is always properly disabled). In contrast, preempt_disable() should never be used to disable pagefaults. With !CONFIG_PREEMPT_COUNT, preempt_disable() doesn't modify the preempt counter, and therefore the result of in_atomic() differs. We validate that condition by using might_fault() checks when calling might_sleep(). Therefore, add a comment to faulthandler_disabled(), describing why this is needed. faulthandler_disabled() and pagefault_disable() are defined in linux/uaccess.h, so let's properly add that include to all relevant files. This patch is based on a patch from Thomas Gleixner. Reviewed-and-tested-by: Thomas Gleixner Signed-off-by: David Hildenbrand Signed-off-by: Peter Zijlstra (Intel) Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-7-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 181c53bac3a7..9dc909841739 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,6 +13,7 @@ #include /* hstate_index_to_shift */ #include /* prefetchw */ #include /* exception_enter(), ... */ +#include /* faulthandler_disabled() */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ @@ -1126,9 +1127,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, /* * If we're in an interrupt, have no user context or are running - * in an atomic region then we must not take the fault: + * in a region with pagefaults disabled then we must not take the fault */ - if (unlikely(in_atomic() || !mm)) { + if (unlikely(faulthandler_disabled() || !mm)) { bad_area_nosemaphore(regs, error_code, address); return; } -- cgit v1.2.3 From 0fb0328d3458ff2d6ffbb280b75053c99a8a4b1f Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 May 2015 10:28:09 +0200 Subject: sched/x86: Drop repeated word from mwait_idle() comment A single "default" is fine. Signed-off-by: Huang Rui [ Fix another typo and reflow comment. ] Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1432022472-2224-5-git-send-email-ray.huang@amd.com Link: http://lkml.kernel.org/r/1432628901-18044-7-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6e338e3b1dc0..c648139d68d7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -445,11 +445,10 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) } /* - * MONITOR/MWAIT with no hints, used for default default C1 state. - * This invokes MWAIT with interrutps enabled and no flags, - * which is backwards compatible with the original MWAIT implementation. + * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT + * with interrupts enabled and no flags, which is backwards compatible with the + * original MWAIT implementation. */ - static void mwait_idle(void) { if (!current_set_polling_and_test()) { -- cgit v1.2.3 From 06931e62246844c73fba24d7aeb4a5dc897a2739 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 26 May 2015 15:11:28 +0200 Subject: sched/topology: Rename topology_thread_cpumask() to topology_sibling_cpumask() Rename topology_thread_cpumask() to topology_sibling_cpumask() for more consistency with scheduler code. Signed-off-by: Bartosz Golaszewski Reviewed-by: Thomas Gleixner Acked-by: Russell King Acked-by: Catalin Marinas Cc: Benoit Cousson Cc: Fenghua Yu Cc: Guenter Roeck Cc: Jean Delvare Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Oleg Drokin Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Russell King Cc: Viresh Kumar Link: http://lkml.kernel.org/r/1432645896-12588-2-git-send-email-bgolaszewski@baylibre.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/topology.h | 2 +- arch/x86/kernel/cpu/perf_event_intel.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 0e8f04f2c26f..5a77593fdace 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -124,7 +124,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); #ifdef ENABLE_TOPO_DEFINES #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) -#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) +#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) #endif static inline void arch_fix_phys_package_id(int num, u32 slot) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3998131d1a68..324817735771 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2621,7 +2621,7 @@ static void intel_pmu_cpu_starting(int cpu) if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; - for_each_cpu(i, topology_thread_cpumask(cpu)) { + for_each_cpu(i, topology_sibling_cpumask(cpu)) { struct intel_shared_regs *pc; pc = per_cpu(cpu_hw_events, i).shared_regs; @@ -2641,7 +2641,7 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { int h = x86_pmu.num_counters >> 1; - for_each_cpu(i, topology_thread_cpumask(cpu)) { + for_each_cpu(i, topology_sibling_cpumask(cpu)) { struct intel_excl_cntrs *c; c = per_cpu(cpu_hw_events, i).excl_cntrs; @@ -3403,7 +3403,7 @@ static __init int fixup_ht_bug(void) if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) return 0; - w = cpumask_weight(topology_thread_cpumask(cpu)); + w = cpumask_weight(topology_sibling_cpumask(cpu)); if (w > 1) { pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); return 0; -- cgit v1.2.3 From 7d79a7bd7554d420313451fb805ebc37a8da97fe Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 26 May 2015 15:11:35 +0200 Subject: x86: Replace cpu_**_mask() with topology_**_cpumask() The former duplicate the functionalities of the latter but are neither documented nor arch-independent. Signed-off-by: Bartosz Golaszewski Reviewed-by: Thomas Gleixner Cc: Benoit Cousson Cc: Catalin Marinas Cc: Fenghua Yu Cc: Guenter Roeck Cc: Jean Delvare Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Oleg Drokin Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Russell King Cc: Viresh Kumar Link: http://lkml.kernel.org/r/1432645896-12588-9-git-send-email-bgolaszewski@baylibre.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 3 ++- arch/x86/kernel/smpboot.c | 42 ++++++++++++++++++++++-------------------- arch/x86/kernel/tsc_sync.c | 2 +- 3 files changed, 25 insertions(+), 22 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e7d8c7608471..18ca99f2798b 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -12,7 +12,8 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, { #ifdef CONFIG_SMP seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu))); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); seq_printf(m, "apicid\t\t: %d\n", c->apicid); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 50e547eac8cd..0e8209619455 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -314,10 +314,10 @@ topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); } -#define link_mask(_m, c1, c2) \ +#define link_mask(mfunc, c1, c2) \ do { \ - cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \ - cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ + cpumask_set_cpu((c1), mfunc(c2)); \ + cpumask_set_cpu((c2), mfunc(c1)); \ } while (0) static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) @@ -398,9 +398,9 @@ void set_cpu_sibling_map(int cpu) cpumask_set_cpu(cpu, cpu_sibling_setup_mask); if (!has_mp) { - cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); + cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); - cpumask_set_cpu(cpu, cpu_core_mask(cpu)); + cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); c->booted_cores = 1; return; } @@ -409,32 +409,34 @@ void set_cpu_sibling_map(int cpu) o = &cpu_data(i); if ((i == cpu) || (has_smt && match_smt(c, o))) - link_mask(sibling, cpu, i); + link_mask(topology_sibling_cpumask, cpu, i); if ((i == cpu) || (has_mp && match_llc(c, o))) - link_mask(llc_shared, cpu, i); + link_mask(cpu_llc_shared_mask, cpu, i); } /* * This needs a separate iteration over the cpus because we rely on all - * cpu_sibling_mask links to be set-up. + * topology_sibling_cpumask links to be set-up. */ for_each_cpu(i, cpu_sibling_setup_mask) { o = &cpu_data(i); if ((i == cpu) || (has_mp && match_die(c, o))) { - link_mask(core, cpu, i); + link_mask(topology_core_cpumask, cpu, i); /* * Does this new cpu bringup a new core? */ - if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { + if (cpumask_weight( + topology_sibling_cpumask(cpu)) == 1) { /* * for each core in package, increment * the booted_cores for this new cpu */ - if (cpumask_first(cpu_sibling_mask(i)) == i) + if (cpumask_first( + topology_sibling_cpumask(i)) == i) c->booted_cores++; /* * increment the core count for all @@ -1009,8 +1011,8 @@ static __init void disable_smp(void) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); else physid_set_mask_of_physid(0, &phys_cpu_present_map); - cpumask_set_cpu(0, cpu_sibling_mask(0)); - cpumask_set_cpu(0, cpu_core_mask(0)); + cpumask_set_cpu(0, topology_sibling_cpumask(0)); + cpumask_set_cpu(0, topology_core_cpumask(0)); } enum { @@ -1293,22 +1295,22 @@ static void remove_siblinginfo(int cpu) int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - for_each_cpu(sibling, cpu_core_mask(cpu)) { - cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); + for_each_cpu(sibling, topology_core_cpumask(cpu)) { + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); /*/ * last thread sibling in this cpu core going down */ - if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) + if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) cpu_data(sibling).booted_cores--; } - for_each_cpu(sibling, cpu_sibling_mask(cpu)) - cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); cpumask_clear(cpu_llc_shared_mask(cpu)); - cpumask_clear(cpu_sibling_mask(cpu)); - cpumask_clear(cpu_core_mask(cpu)); + cpumask_clear(topology_sibling_cpumask(cpu)); + cpumask_clear(topology_core_cpumask(cpu)); c->phys_proc_id = 0; c->cpu_core_id = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 26488487bc61..dd8d0791dfb5 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -113,7 +113,7 @@ static void check_tsc_warp(unsigned int timeout) */ static inline unsigned int loop_timeout(int cpu) { - return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; + return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; } /* -- cgit v1.2.3 From 960d447b94b22ceba286917056871d1dac8da697 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 26 May 2015 15:11:36 +0200 Subject: x86: Remove cpu_sibling_mask() and cpu_core_mask() These functions are arch-specific and duplicate the functionality of macros defined in linux/include/topology.h. Remove them as all the callers in x86 have now switched to using the topology_**_cpumask() family. Signed-off-by: Bartosz Golaszewski Reviewed-by: Thomas Gleixner Cc: Benoit Cousson Cc: Catalin Marinas Cc: Fenghua Yu Cc: Guenter Roeck Cc: Jean Delvare Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Oleg Drokin Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Russell King Cc: Viresh Kumar Link: http://lkml.kernel.org/r/1432645896-12588-10-git-send-email-bgolaszewski@baylibre.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smp.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 17a8dced12da..222a6a3ca2b5 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -37,16 +37,6 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); -static inline struct cpumask *cpu_sibling_mask(int cpu) -{ - return per_cpu(cpu_sibling_map, cpu); -} - -static inline struct cpumask *cpu_core_mask(int cpu) -{ - return per_cpu(cpu_core_map, cpu); -} - static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return per_cpu(cpu_llc_shared_map, cpu); -- cgit v1.2.3 From 4eaca0a887eaee04fc7a3866d0f5b51b34030dfa Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Jun 2015 17:39:08 +0200 Subject: preempt: Use preempt_schedule_context() as the official tracing preemption point preempt_schedule_context() is a tracing safe preemption point but it's only used when CONFIG_CONTEXT_TRACKING=y. Other configs have tracing recursion issues since commit: b30f0e3ffedf ("sched/preempt: Optimize preemption operations on __schedule() callers") introduced function based preemp_count_*() ops. Lets make it available on all configs and give it a more appropriate name for its new position. Reported-by: Fengguang Wu Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1433432349-1021-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/preempt.h | 8 +++----- arch/x86/kernel/i386_ksyms_32.c | 4 +--- arch/x86/kernel/x8664_ksyms_64.c | 4 +--- arch/x86/lib/thunk_32.S | 4 +--- arch/x86/lib/thunk_64.S | 4 +--- 5 files changed, 7 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8f3271842533..dca71714f860 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -99,11 +99,9 @@ static __always_inline bool should_resched(void) extern asmlinkage void ___preempt_schedule(void); # define __preempt_schedule() asm ("call ___preempt_schedule") extern asmlinkage void preempt_schedule(void); -# ifdef CONFIG_CONTEXT_TRACKING - extern asmlinkage void ___preempt_schedule_context(void); -# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") - extern asmlinkage void preempt_schedule_context(void); -# endif + extern asmlinkage void ___preempt_schedule_notrace(void); +# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace") + extern asmlinkage void preempt_schedule_notrace(void); #endif #endif /* __ASM_PREEMPT_H */ diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 05fd74f537d6..64341aa485ae 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -40,7 +40,5 @@ EXPORT_SYMBOL(empty_zero_page); #ifdef CONFIG_PREEMPT EXPORT_SYMBOL(___preempt_schedule); -#ifdef CONFIG_CONTEXT_TRACKING -EXPORT_SYMBOL(___preempt_schedule_context); -#endif +EXPORT_SYMBOL(___preempt_schedule_notrace); #endif diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 37d8fa4438f0..a0695be19864 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -75,7 +75,5 @@ EXPORT_SYMBOL(native_load_gs_index); #ifdef CONFIG_PREEMPT EXPORT_SYMBOL(___preempt_schedule); -#ifdef CONFIG_CONTEXT_TRACKING -EXPORT_SYMBOL(___preempt_schedule_context); -#endif +EXPORT_SYMBOL(___preempt_schedule_notrace); #endif diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S index 5eb715087b80..e407941d0488 100644 --- a/arch/x86/lib/thunk_32.S +++ b/arch/x86/lib/thunk_32.S @@ -38,8 +38,6 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule -#ifdef CONFIG_CONTEXT_TRACKING - THUNK ___preempt_schedule_context, preempt_schedule_context -#endif + THUNK ___preempt_schedule_notrace, preempt_schedule_notrace #endif diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index f89ba4e93025..2198902329b5 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -49,9 +49,7 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule -#ifdef CONFIG_CONTEXT_TRACKING - THUNK ___preempt_schedule_context, preempt_schedule_context -#endif + THUNK ___preempt_schedule_notrace, preempt_schedule_notrace #endif #if defined(CONFIG_TRACE_IRQFLAGS) \ -- cgit v1.2.3