summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/dump_stack.c47
-rw-r--r--lib/idr.c7
-rw-r--r--lib/percpu_counter.c2
3 files changed, 48 insertions, 8 deletions
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 53bad099ebd6..c03154173cc7 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -6,15 +6,58 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+
+static void __dump_stack(void)
+{
+ dump_stack_print_info(KERN_DEFAULT);
+ show_stack(NULL, NULL);
+}
/**
* dump_stack - dump the current task information and its stack trace
*
* Architectures can override this implementation by implementing its own.
*/
+#ifdef CONFIG_SMP
+static atomic_t dump_lock = ATOMIC_INIT(-1);
+
void dump_stack(void)
{
- dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL);
+ int was_locked;
+ int old;
+ int cpu;
+
+ /*
+ * Permit this cpu to perform nested stack dumps while serialising
+ * against other CPUs
+ */
+ preempt_disable();
+
+retry:
+ cpu = smp_processor_id();
+ old = atomic_cmpxchg(&dump_lock, -1, cpu);
+ if (old == -1) {
+ was_locked = 0;
+ } else if (old == cpu) {
+ was_locked = 1;
+ } else {
+ cpu_relax();
+ goto retry;
+ }
+
+ __dump_stack();
+
+ if (!was_locked)
+ atomic_set(&dump_lock, -1);
+
+ preempt_enable();
+}
+#else
+void dump_stack(void)
+{
+ __dump_stack();
}
+#endif
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/idr.c b/lib/idr.c
index cca4b9302a71..bfe4db4e165f 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -524,9 +524,7 @@ EXPORT_SYMBOL(idr_alloc_cyclic);
static void idr_remove_warning(int id)
{
- printk(KERN_WARNING
- "idr_remove called for id=%d which is not allocated.\n", id);
- dump_stack();
+ WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
}
static void sub_remove(struct idr *idp, int shift, int id)
@@ -1064,8 +1062,7 @@ void ida_remove(struct ida *ida, int id)
return;
err:
- printk(KERN_WARNING
- "ida_remove called for id=%d which is not allocated.\n", id);
+ WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_remove);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ba6085d9c741..1fc23a3277e1 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -80,8 +80,8 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
if (count >= batch || count <= -batch) {
raw_spin_lock(&fbc->lock);
fbc->count += count;
- __this_cpu_write(*fbc->counters, 0);
raw_spin_unlock(&fbc->lock);
+ __this_cpu_write(*fbc->counters, 0);
} else {
__this_cpu_write(*fbc->counters, count);
}