summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/configs.c2
-rw-r--r--kernel/cpu.c83
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/futex.c21
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/power/main.c21
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/rcupdate.c59
-rw-r--r--kernel/rcutorture.c3
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time.c22
-rw-r--r--kernel/workqueue.c12
18 files changed, 229 insertions, 97 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 0c56320d38dc..32fa03ad1984 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -291,8 +291,10 @@ int kauditd_thread(void *dummy)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kauditd_wait, &wait);
- if (!skb_queue_len(&audit_skb_queue))
+ if (!skb_queue_len(&audit_skb_queue)) {
+ try_to_freeze();
schedule();
+ }
__set_current_state(TASK_RUNNING);
remove_wait_queue(&kauditd_wait, &wait);
diff --git a/kernel/configs.c b/kernel/configs.c
index 986f7af31e0a..009e1ebdcb88 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -3,7 +3,7 @@
* Echo the kernel .config file used to build the kernel
*
* Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com>
- * Copyright (C) 2002 Randy Dunlap <rddunlap@osdl.org>
+ * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
* Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
* Copyright (C) 2002 Hewlett-Packard Company
*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88f34e5..e882c6babf41 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
-DECLARE_MUTEX(cpucontrol);
-EXPORT_SYMBOL_GPL(cpucontrol);
+static DECLARE_MUTEX(cpucontrol);
static struct notifier_block *cpu_chain;
-/*
- * Used to check by callers if they need to acquire the cpucontrol
- * or not to protect a cpu from being removed. Its sometimes required to
- * call these functions both for normal operations, and in response to
- * a cpu being added/removed. If the context of the call is in the same
- * thread context as a CPU hotplug thread, we dont need to take the lock
- * since its already protected
- * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
- */
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *lock_cpu_hotplug_owner;
+static int lock_cpu_hotplug_depth;
-int current_in_cpu_hotplug(void)
+static int __lock_cpu_hotplug(int interruptible)
{
- return (current->flags & PF_HOTPLUG_CPU);
+ int ret = 0;
+
+ if (lock_cpu_hotplug_owner != current) {
+ if (interruptible)
+ ret = down_interruptible(&cpucontrol);
+ else
+ down(&cpucontrol);
+ }
+
+ /*
+ * Set only if we succeed in locking
+ */
+ if (!ret) {
+ lock_cpu_hotplug_depth++;
+ lock_cpu_hotplug_owner = current;
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
+void lock_cpu_hotplug(void)
+{
+ __lock_cpu_hotplug(0);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
+void unlock_cpu_hotplug(void)
+{
+ if (--lock_cpu_hotplug_depth == 0) {
+ lock_cpu_hotplug_owner = NULL;
+ up(&cpucontrol);
+ }
+}
+EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
+
+int lock_cpu_hotplug_interruptible(void)
+{
+ return __lock_cpu_hotplug(1);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
+#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
ret = notifier_chain_register(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
- down(&cpucontrol);
+ lock_cpu_hotplug();
notifier_chain_unregister(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
}
EXPORT_SYMBOL(unregister_cpu_notifier);
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug. Callers can check if cpucontrol
- * is already acquired by current thread, and if so not cause
- * a dead lock by not acquiring the lock
- */
- current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ out_thread:
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
int ret;
void *hcpu = (void *)(long)cpu;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug.
- */
- current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ out_notify:
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index e0d0b77343f8..fb8572a42297 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -263,7 +263,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
rb_parent = &tmp->vm_rb;
mm->map_count++;
- retval = copy_page_range(mm, oldmm, tmp);
+ retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
@@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags,
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
- cpuset_fork(p);
-
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
@@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags,
__get_cpu_var(process_counts)++;
}
- proc_fork_connector(p);
if (!current->signal->tty && p->signal->tty)
p->signal->tty = NULL;
nr_threads++;
total_forks++;
write_unlock_irq(&tasklist_lock);
+ proc_fork_connector(p);
+ cpuset_fork(p);
retval = 0;
fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index aca8d10704f6..5e71a6bf6f6b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
* from swap. But that's a lot of code to duplicate here
* for a rare case, so we simply fetch the page.
*/
-
- /*
- * Do a quick atomic lookup first - this is the fastpath.
- */
- page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
- if (likely(page != NULL)) {
- key->shared.pgoff =
- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- put_page(page);
- return 0;
- }
-
- /*
- * Do it the general way.
- */
err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
if (err >= 0) {
key->shared.pgoff =
@@ -285,7 +270,13 @@ static void wake_futex(struct futex_q *q)
/*
* The waiting task can free the futex_q as soon as this is written,
* without taking any locks. This must come last.
+ *
+ * A memory barrier is required here to prevent the following store
+ * to lock_ptr from getting ahead of the wakeup. Clearing the lock
+ * at the end of wake_up_all() does not prevent this store from
+ * moving.
*/
+ wmb();
q->lock_ptr = NULL;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3bd7226d15fa..81c49a4d679e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -36,6 +36,9 @@ void synchronize_irq(unsigned int irq)
{
struct irq_desc *desc = irq_desc + irq;
+ if (irq >= NR_IRQS)
+ return;
+
while (desc->status & IRQ_INPROGRESS)
cpu_relax();
}
@@ -60,6 +63,9 @@ void disable_irq_nosync(unsigned int irq)
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
+ if (irq >= NR_IRQS)
+ return;
+
spin_lock_irqsave(&desc->lock, flags);
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
@@ -86,6 +92,9 @@ void disable_irq(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
+ if (irq >= NR_IRQS)
+ return;
+
disable_irq_nosync(irq);
if (desc->action)
synchronize_irq(irq);
@@ -108,6 +117,9 @@ void enable_irq(unsigned int irq)
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
+ if (irq >= NR_IRQS)
+ return;
+
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 0:
@@ -163,6 +175,9 @@ int setup_irq(unsigned int irq, struct irqaction * new)
unsigned long flags;
int shared = 0;
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+
if (desc->handler == &no_irq_type)
return -ENOSYS;
/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5beda378cc75..3bb71e63a37e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -246,6 +246,19 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
return ret;
}
+/* Walks the list and increments nmissed count for multiprobe case */
+void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
+{
+ struct kprobe *kp;
+ if (p->pre_handler != aggr_pre_handler) {
+ p->nmissed++;
+ } else {
+ list_for_each_entry_rcu(kp, &p->list, list)
+ kp->nmissed++;
+ }
+ return;
+}
+
/* Called with kretprobe_lock held */
struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
{
@@ -399,10 +412,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
INIT_LIST_HEAD(&ap->list);
list_add_rcu(&p->list, &ap->list);
- INIT_HLIST_NODE(&ap->hlist);
- hlist_del_rcu(&p->hlist);
- hlist_add_head_rcu(&ap->hlist,
- &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
+ hlist_replace_rcu(&p->hlist, &ap->hlist);
}
/*
@@ -462,9 +472,16 @@ int __kprobes register_kprobe(struct kprobe *p)
int ret = 0;
unsigned long flags = 0;
struct kprobe *old_p;
+ struct module *mod;
+
+ if ((!kernel_text_address((unsigned long) p->addr)) ||
+ in_kprobes_functions((unsigned long) p->addr))
+ return -EINVAL;
+
+ if ((mod = module_text_address((unsigned long) p->addr)) &&
+ (unlikely(!try_module_get(mod))))
+ return -EINVAL;
- if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
- return ret;
if ((ret = arch_prepare_kprobe(p)) != 0)
goto rm_kprobe;
@@ -488,6 +505,8 @@ out:
rm_kprobe:
if (ret == -EEXIST)
arch_remove_kprobe(p);
+ if (ret && mod)
+ module_put(mod);
return ret;
}
@@ -495,6 +514,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
{
unsigned long flags;
struct kprobe *old_p;
+ struct module *mod;
spin_lock_irqsave(&kprobe_lock, flags);
old_p = get_kprobe(p->addr);
@@ -506,6 +526,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
cleanup_kprobe(p, flags);
synchronize_sched();
+
+ if ((mod = module_text_address((unsigned long)p->addr)))
+ module_put(mod);
+
if (old_p->pre_handler == aggr_pre_handler &&
list_empty(&old_p->list))
kfree(old_p);
diff --git a/kernel/params.c b/kernel/params.c
index 47ba69547945..c76ad25e6a21 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -619,7 +619,7 @@ static void __init param_sysfs_builtin(void)
/* module-related sysfs stuff */
-#ifdef CONFIG_MODULES
+#ifdef CONFIG_SYSFS
#define to_module_attr(n) container_of(n, struct module_attribute, attr);
#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 84af54c39e1b..cae4f5728997 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
- ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
+ ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6ee2cad530e8..d253f3ae2fa5 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,7 +24,7 @@
DECLARE_MUTEX(pm_sem);
-struct pm_ops * pm_ops = NULL;
+struct pm_ops *pm_ops;
suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
/**
@@ -151,6 +151,18 @@ static char *pm_states[PM_SUSPEND_MAX] = {
#endif
};
+static inline int valid_state(suspend_state_t state)
+{
+ /* Suspend-to-disk does not really need low-level support.
+ * It can work with reboot if needed. */
+ if (state == PM_SUSPEND_DISK)
+ return 1;
+
+ if (pm_ops && pm_ops->valid && !pm_ops->valid(state))
+ return 0;
+ return 1;
+}
+
/**
* enter_state - Do common work of entering low-power state.
@@ -167,7 +179,7 @@ static int enter_state(suspend_state_t state)
{
int error;
- if (pm_ops && pm_ops->valid && !pm_ops->valid(state))
+ if (!valid_state(state))
return -ENODEV;
if (down_trylock(&pm_sem))
return -EBUSY;
@@ -238,9 +250,8 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
char * s = buf;
for (i = 0; i < PM_SUSPEND_MAX; i++) {
- if (pm_states[i] && pm_ops && (!pm_ops->valid
- ||(pm_ops->valid && pm_ops->valid(i))))
- s += sprintf(s,"%s ",pm_states[i]);
+ if (pm_states[i] && valid_state(i))
+ s += sprintf(s,"%s ", pm_states[i]);
}
s += sprintf(s,"\n");
return (s - buf);
diff --git a/kernel/printk.c b/kernel/printk.c
index ac8a08f36207..5287be83e3e7 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -956,7 +956,7 @@ int unregister_console(struct console *console)
if (console_drivers == console) {
console_drivers=console->next;
res = 0;
- } else {
+ } else if (console_drivers) {
for (a=console_drivers->next, b=console_drivers ;
a; b=a, a=b->next) {
if (a == console) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 17ee7e5a3451..656476eedb1b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -241,7 +241,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
- set_page_dirty_lock(page);
+ if (!PageCompound(page))
+ set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c4d159a21e04..48d3bce465b8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -116,6 +116,10 @@ void fastcall call_rcu(struct rcu_head *head,
local_irq_restore(flags);
}
+static atomic_t rcu_barrier_cpu_count;
+static struct semaphore rcu_barrier_sema;
+static struct completion rcu_barrier_completion;
+
/**
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -162,6 +166,42 @@ long rcu_batches_completed(void)
return rcu_ctrlblk.completed;
}
+static void rcu_barrier_callback(struct rcu_head *notused)
+{
+ if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ complete(&rcu_barrier_completion);
+}
+
+/*
+ * Called with preemption disabled, and from cross-cpu IRQ context.
+ */
+static void rcu_barrier_func(void *notused)
+{
+ int cpu = smp_processor_id();
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_head *head;
+
+ head = &rdp->barrier;
+ atomic_inc(&rcu_barrier_cpu_count);
+ call_rcu(head, rcu_barrier_callback);
+}
+
+/**
+ * rcu_barrier - Wait until all the in-flight RCUs are complete.
+ */
+void rcu_barrier(void)
+{
+ BUG_ON(in_interrupt());
+ /* Take cpucontrol semaphore to protect against CPU hotplug */
+ down(&rcu_barrier_sema);
+ init_completion(&rcu_barrier_completion);
+ atomic_set(&rcu_barrier_cpu_count, 0);
+ on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+ wait_for_completion(&rcu_barrier_completion);
+ up(&rcu_barrier_sema);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
/*
* Invoke the completed RCU callbacks. They are expected to be in
* a per-cpu list.
@@ -217,15 +257,23 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
if (rcp->next_pending &&
rcp->completed == rcp->cur) {
- /* Can't change, since spin lock held. */
- cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
-
rcp->next_pending = 0;
- /* next_pending == 0 must be visible in __rcu_process_callbacks()
- * before it can see new value of cur.
+ /*
+ * next_pending == 0 must be visible in
+ * __rcu_process_callbacks() before it can see new value of cur.
*/
smp_wmb();
rcp->cur++;
+
+ /*
+ * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+ * Barrier Otherwise it can cause tickless idle CPUs to be
+ * included in rsp->cpumask, which will extend graceperiods
+ * unnecessarily.
+ */
+ smp_mb();
+ cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
+
}
}
@@ -457,6 +505,7 @@ static struct notifier_block __devinitdata rcu_nb = {
*/
void __init rcu_init(void)
{
+ sema_init(&rcu_barrier_sema, 1);
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
/* Register notifier for non-boot CPUs */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 88c28d476550..49fbbeff201c 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -409,9 +409,8 @@ rcu_torture_cleanup(void)
stats_task = NULL;
/* Wait for all RCU callbacks to fire. */
+ rcu_barrier();
- for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
- synchronize_rcu();
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
printk(KERN_ALERT TORTURE_FLAG
"--- End of test: %s\n",
diff --git a/kernel/sys.c b/kernel/sys.c
index bce933ebb29f..eecf84526afe 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
#include <linux/compat.h>
#include <linux/syscalls.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -168,7 +169,7 @@ EXPORT_SYMBOL(notifier_chain_unregister);
* of the last notifier function called.
*/
-int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
{
int ret=NOTIFY_DONE;
struct notifier_block *nb = *n;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9990e10192e8..b53115b882e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2192,29 +2192,32 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen, void **context)
{
- size_t l, len;
-
if (!table->data || !table->maxlen)
return -ENOTDIR;
if (oldval && oldlenp) {
- if (get_user(len, oldlenp))
+ size_t bufsize;
+ if (get_user(bufsize, oldlenp))
return -EFAULT;
- if (len) {
- l = strlen(table->data);
- if (len > l) len = l;
- if (len >= table->maxlen)
+ if (bufsize) {
+ size_t len = strlen(table->data), copied;
+
+ /* This shouldn't trigger for a well-formed sysctl */
+ if (len > table->maxlen)
len = table->maxlen;
- if(copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if(put_user(0, ((char __user *) oldval) + len))
+
+ /* Copy up to a max of bufsize-1 bytes of the string */
+ copied = (len >= bufsize) ? bufsize - 1 : len;
+
+ if (copy_to_user(oldval, table->data, copied) ||
+ put_user(0, (char __user *)(oldval + copied)))
return -EFAULT;
- if(put_user(len, oldlenp))
+ if (put_user(len, oldlenp))
return -EFAULT;
}
}
if (newval && newlen) {
- len = newlen;
+ size_t len = newlen;
if (len > table->maxlen)
len = table->maxlen;
if(copy_from_user(table->data, newval, len))
@@ -2223,7 +2226,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
len--;
((char *) table->data)[len] = 0;
}
- return 0;
+ return 1;
}
/*
diff --git a/kernel/time.c b/kernel/time.c
index 245d595a13cb..b94bfa8c03e0 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -561,6 +561,28 @@ void getnstimeofday(struct timespec *tv)
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
+void getnstimestamp(struct timespec *ts)
+{
+ unsigned int seq;
+ struct timespec wall2mono;
+
+ /* synchronize with settimeofday() changes */
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ getnstimeofday(ts);
+ wall2mono = wall_to_monotonic;
+ } while(unlikely(read_seqretry(&xtime_lock, seq)));
+
+ /* adjust to monotonicaly-increasing values */
+ ts->tv_sec += wall2mono.tv_sec;
+ ts->tv_nsec += wall2mono.tv_nsec;
+ while (unlikely(ts->tv_nsec >= NSEC_PER_SEC)) {
+ ts->tv_nsec -= NSEC_PER_SEC;
+ ts->tv_sec++;
+ }
+}
+EXPORT_SYMBOL_GPL(getnstimestamp);
+
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42df83d7fad2..2bd5aee1c736 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
if (!test_and_set_bit(0, &work->pending)) {
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
BUG_ON(!list_empty(&work->entry));
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
ret = 1;
@@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data)
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
might_sleep();
if (is_single_threaded(wq)) {
- /* Always use cpu 0's area. */
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
+ /* Always use first cpu's area. */
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
} else {
int cpu;
@@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
- p = create_workqueue_thread(wq, 0);
+ p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
if (!p)
destroy = 1;
else
@@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (is_single_threaded(wq))
- cleanup_workqueue_thread(wq, 0);
+ cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);