summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/irq_regs.h8
-rw-r--r--include/linux/elevator.h12
-rw-r--r--include/linux/highmem.h13
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/kprobes.h4
-rw-r--r--include/linux/percpu.h205
6 files changed, 222 insertions, 22 deletions
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
index 5ae1d07d4a12..6bf9355fa7eb 100644
--- a/include/asm-generic/irq_regs.h
+++ b/include/asm-generic/irq_regs.h
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
- return __get_cpu_var(__irq_regs);
+ return __this_cpu_read(__irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
- struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+ struct pt_regs *old_regs;
- old_regs = *pp_regs;
- *pp_regs = new_regs;
+ old_regs = __this_cpu_read(__irq_regs);
+ __this_cpu_write(__irq_regs, new_regs);
return old_regs;
}
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4fd978e7eb83..4d857973d2c9 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -195,15 +195,9 @@ enum {
/*
* io context count accounting
*/
-#define elv_ioc_count_mod(name, __val) \
- do { \
- preempt_disable(); \
- __get_cpu_var(name) += (__val); \
- preempt_enable(); \
- } while (0)
-
-#define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1)
-#define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1)
+#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
+#define elv_ioc_count_inc(name) this_cpu_inc(name)
+#define elv_ioc_count_dec(name) this_cpu_dec(name)
#define elv_ioc_count_read(name) \
({ \
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index b676c585574e..3a93f73a8acc 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx);
static inline int kmap_atomic_idx_push(void)
{
- int idx = __get_cpu_var(__kmap_atomic_idx)++;
+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx > KM_TYPE_NR);
@@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void)
static inline int kmap_atomic_idx(void)
{
- return __get_cpu_var(__kmap_atomic_idx) - 1;
+ return __this_cpu_read(__kmap_atomic_idx) - 1;
}
-static inline int kmap_atomic_idx_pop(void)
+static inline void kmap_atomic_idx_pop(void)
{
- int idx = --__get_cpu_var(__kmap_atomic_idx);
#ifdef CONFIG_DEBUG_HIGHMEM
+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
BUG_ON(idx < 0);
+#else
+ __this_cpu_dec(__kmap_atomic_idx);
#endif
- return idx;
}
#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ad54c846911b..44e83ba12b5b 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
#ifndef CONFIG_GENERIC_HARDIRQS
#define kstat_irqs_this_cpu(irq) \
- (kstat_this_cpu.irqs[irq])
+ (this_cpu_read(kstat.irqs[irq])
struct irq_desc;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index b78edb58ee66..dd7c12e875bc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -305,12 +305,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__get_cpu_var(current_kprobe));
+ return (__this_cpu_read(current_kprobe));
}
static inline void reset_current_kprobe(void)
{
- __get_cpu_var(current_kprobe) = NULL;
+ __this_cpu_write(current_kprobe, NULL);
}
static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5095b834a6fb..27c3c6fcfad3 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
pscr_ret__; \
})
+#define __pcpu_size_call_return2(stem, variable, ...) \
+({ \
+ typeof(variable) pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pscr2_ret__; \
+})
+
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@@ -402,6 +417,89 @@ do { \
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
#endif
+#define _this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) ret__; \
+ preempt_disable(); \
+ __this_cpu_add(pcp, val); \
+ ret__ = __this_cpu_read(pcp); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#define _this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval) \
+ __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
/*
* Generic percpu operations that do not require preemption handling.
* Either we do not care about races or the caller has the
@@ -529,11 +627,87 @@ do { \
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
#endif
+#define __this_cpu_generic_add_return(pcp, val) \
+({ \
+ __this_cpu_add(pcp, val); \
+ __this_cpu_read(pcp); \
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_xchg(pcp, nval) \
+({ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval) \
+ __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ ret__; \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
* processors (which one gets when using regular atomic operations)
- . They are guaranteed to be atomic vs. local interrupts and
+ * They are guaranteed to be atomic vs. local interrupts and
* preemption only.
*/
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -620,4 +794,33 @@ do { \
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
#endif
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ ret__ = __this_cpu_read(pcp); \
+ if (ret__ == (oval)) \
+ __this_cpu_write(pcp, nval); \
+ local_irq_restore(flags); \
+ ret__; \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
#endif /* __LINUX_PERCPU_H */