summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 11:56:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-03 11:56:24 -0800
commit61420f59a589c0668f70cbe725785837c78ece90 (patch)
tree79ae77d731cd2425677b9527d50079d8cf34c3b2 /arch
parentd97106ab53f812910a62d18afb9dbe882819c1ba (diff)
parentc742b31c03f37c5c499178f09f57381aa6c70131 (diff)
Merge branch 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [PATCH] fast vdso implementation for CLOCK_THREAD_CPUTIME_ID [PATCH] improve idle cputime accounting [PATCH] improve precision of idle time detection. [PATCH] improve precision of process accounting. [PATCH] idle cputime accounting [PATCH] fix scaled & unscaled cputime accounting
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/time.c18
-rw-r--r--arch/powerpc/kernel/process.c1
-rw-r--r--arch/powerpc/kernel/time.c18
-rw-r--r--arch/s390/include/asm/cpu.h7
-rw-r--r--arch/s390/include/asm/cputime.h42
-rw-r--r--arch/s390/include/asm/lowcore.h49
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timer.h16
-rw-r--r--arch/s390/include/asm/vdso.h15
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/entry.S5
-rw-r--r--arch/s390/kernel/entry64.S50
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/process.c43
-rw-r--r--arch/s390/kernel/s390_ext.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c34
-rw-r--r--arch/s390/kernel/vdso.c123
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S39
-rw-r--r--arch/s390/kernel/vtime.c486
-rw-r--r--arch/x86/xen/time.c10
23 files changed, 589 insertions, 389 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 65c10a42c88f..f0ebb342409d 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
now = ia64_get_itc();
delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
- account_system_time(prev, 0, delta_stime);
- account_system_time_scaled(prev, delta_stime);
+ if (idle_task(smp_processor_id()) != prev)
+ account_system_time(prev, 0, delta_stime, delta_stime);
+ else
+ account_idle_time(delta_stime);
if (pi->ac_utime) {
delta_utime = cycle_to_cputime(pi->ac_utime);
- account_user_time(prev, delta_utime);
- account_user_time_scaled(prev, delta_utime);
+ account_user_time(prev, delta_utime, delta_utime);
}
pi->ac_stamp = ni->ac_stamp = now;
@@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk)
now = ia64_get_itc();
delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
- account_system_time(tsk, 0, delta_stime);
- account_system_time_scaled(tsk, delta_stime);
+ if (irq_count() || idle_task(smp_processor_id()) != tsk)
+ account_system_time(tsk, 0, delta_stime, delta_stime);
+ else
+ account_idle_time(delta_stime);
ti->ac_stime = 0;
ti->ac_stamp = now;
@@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
if (ti->ac_utime) {
delta_utime = cycle_to_cputime(ti->ac_utime);
- account_user_time(p, delta_utime);
- account_user_time_scaled(p, delta_utime);
+ account_user_time(p, delta_utime, delta_utime);
ti->ac_utime = 0;
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 51b201ddf9a1..fb7049c054c0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -33,6 +33,7 @@
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
+#include <linux/kernel_stat.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99f1ddd68582..c9564031a2a9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -256,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk)
delta += sys_time;
get_paca()->system_time = 0;
}
- account_system_time(tsk, 0, delta);
- account_system_time_scaled(tsk, deltascaled);
+ if (in_irq() || idle_task(smp_processor_id()) != tsk)
+ account_system_time(tsk, 0, delta, deltascaled);
+ else
+ account_idle_time(delta);
per_cpu(cputime_last_delta, smp_processor_id()) = delta;
per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
local_irq_restore(flags);
@@ -275,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
utime = get_paca()->user_time;
get_paca()->user_time = 0;
- account_user_time(tsk, utime);
-
utimescaled = cputime_to_scaled(utime);
- account_user_time_scaled(tsk, utimescaled);
+ account_user_time(tsk, utime, utimescaled);
}
/*
@@ -338,8 +338,12 @@ void calculate_steal_time(void)
tb = mftb();
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
- if (stolen > 0)
- account_steal_time(current, stolen);
+ if (stolen > 0) {
+ if (idle_task(smp_processor_id()) != current)
+ account_steal_time(stolen);
+ else
+ account_idle_time(stolen);
+ }
pme->tb = tb;
pme->purr = purr;
}
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index e5a6a9ba3adf..d60a2eefb17b 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -14,7 +14,6 @@
struct s390_idle_data {
spinlock_t lock;
- unsigned int in_idle;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
@@ -22,12 +21,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
-void s390_idle_leave(void);
+void vtime_start_cpu(void);
static inline void s390_idle_check(void)
{
- if ((&__get_cpu_var(s390_idle))->in_idle)
- s390_idle_leave();
+ if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
+ vtime_start_cpu();
}
#endif /* _ASM_S390_CPU_H_ */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 133ce054fc89..521726430afa 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -11,7 +11,7 @@
#include <asm/div64.h>
-/* We want to use micro-second resolution. */
+/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long cputime_t;
typedef unsigned long long cputime64_t;
@@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base)
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ))
+#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ))
+#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
@@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base)
static inline u64
cputime64_to_jiffies64(cputime64_t cputime)
{
- do_div(cputime, 1000000 / HZ);
+ do_div(cputime, 4096000000ULL / HZ);
return cputime;
}
@@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime)
static inline unsigned int
cputime_to_msecs(const cputime_t cputime)
{
- return __div(cputime, 1000);
+ return __div(cputime, 4096000);
}
static inline cputime_t
msecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 1000;
+ return (cputime_t) m * 4096000;
}
/*
@@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m)
static inline unsigned int
cputime_to_secs(const cputime_t cputime)
{
- return __div(cputime, 1000000);
+ return __div(cputime, 2048000000) >> 1;
}
static inline cputime_t
secs_to_cputime(const unsigned int s)
{
- return (cputime_t) s * 1000000;
+ return (cputime_t) s * 4096000000ULL;
}
/*
@@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s)
static inline cputime_t
timespec_to_cputime(const struct timespec *value)
{
- return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000;
+ return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
}
static inline void
@@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
register_pair rp;
rp.pair = cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
- value->tv_nsec = rp.subreg.even * 1000;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
+ value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (cputime % 1000000) * 1000;
- value->tv_sec = cputime / 1000000;
+ value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
+ value->tv_sec = cputime / 4096000000ULL;
#endif
}
@@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
static inline cputime_t
timeval_to_cputime(const struct timeval *value)
{
- return value->tv_usec + (u64) value->tv_sec * 1000000;
+ return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
}
static inline void
@@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
register_pair rp;
rp.pair = cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
- value->tv_usec = rp.subreg.even;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
+ value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = cputime % 1000000;
- value->tv_sec = cputime / 1000000;
+ value->tv_usec = cputime % 4096000000ULL;
+ value->tv_sec = cputime / 4096000000ULL;
#endif
}
@@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
static inline clock_t
cputime_to_clock_t(cputime_t cputime)
{
- return __div(cputime, 1000000 / USER_HZ);
+ return __div(cputime, 4096000000ULL / USER_HZ);
}
static inline cputime_t
clock_t_to_cputime(unsigned long x)
{
- return (cputime_t) x * (1000000 / USER_HZ);
+ return (cputime_t) x * (4096000000ULL / USER_HZ);
}
/*
@@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x)
static inline clock_t
cputime64_to_clock_t(cputime64_t cputime)
{
- return __div(cputime, 1000000 / USER_HZ);
+ return __div(cputime, 4096000000ULL / USER_HZ);
}
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 0bc51d52a899..ffdef5fe8587 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -67,11 +67,11 @@
#define __LC_SYNC_ENTER_TIMER 0x248
#define __LC_ASYNC_ENTER_TIMER 0x250
#define __LC_EXIT_TIMER 0x258
-#define __LC_LAST_UPDATE_TIMER 0x260
-#define __LC_USER_TIMER 0x268
-#define __LC_SYSTEM_TIMER 0x270
-#define __LC_LAST_UPDATE_CLOCK 0x278
-#define __LC_STEAL_CLOCK 0x280
+#define __LC_USER_TIMER 0x260
+#define __LC_SYSTEM_TIMER 0x268
+#define __LC_STEAL_TIMER 0x270
+#define __LC_LAST_UPDATE_TIMER 0x278
+#define __LC_LAST_UPDATE_CLOCK 0x280
#define __LC_RETURN_MCCK_PSW 0x288
#define __LC_KERNEL_STACK 0xC40
#define __LC_THREAD_INFO 0xC44
@@ -89,11 +89,11 @@
#define __LC_SYNC_ENTER_TIMER 0x250
#define __LC_ASYNC_ENTER_TIMER 0x258
#define __LC_EXIT_TIMER 0x260
-#define __LC_LAST_UPDATE_TIMER 0x268
-#define __LC_USER_TIMER 0x270
-#define __LC_SYSTEM_TIMER 0x278
-#define __LC_LAST_UPDATE_CLOCK 0x280
-#define __LC_STEAL_CLOCK 0x288
+#define __LC_USER_TIMER 0x268
+#define __LC_SYSTEM_TIMER 0x270
+#define __LC_STEAL_TIMER 0x278
+#define __LC_LAST_UPDATE_TIMER 0x280
+#define __LC_LAST_UPDATE_CLOCK 0x288
#define __LC_RETURN_MCCK_PSW 0x290
#define __LC_KERNEL_STACK 0xD40
#define __LC_THREAD_INFO 0xD48
@@ -106,8 +106,10 @@
#define __LC_IPLDEV 0xDB8
#define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8
+#define __LC_VDSO_PER_CPU 0xE38
#endif /* __s390x__ */
+#define __LC_PASTE 0xE40
#define __LC_PANIC_MAGIC 0xE00
#ifndef __s390x__
@@ -252,11 +254,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x248 */
__u64 async_enter_timer; /* 0x250 */
__u64 exit_timer; /* 0x258 */
- __u64 last_update_timer; /* 0x260 */
- __u64 user_timer; /* 0x268 */
- __u64 system_timer; /* 0x270 */
- __u64 last_update_clock; /* 0x278 */
- __u64 steal_clock; /* 0x280 */
+ __u64 user_timer; /* 0x260 */
+ __u64 system_timer; /* 0x268 */
+ __u64 steal_timer; /* 0x270 */
+ __u64 last_update_timer; /* 0x278 */
+ __u64 last_update_clock; /* 0x280 */
psw_t return_mcck_psw; /* 0x288 */
__u8 pad8[0xc00-0x290]; /* 0x290 */
@@ -343,11 +345,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x250 */
__u64 async_enter_timer; /* 0x258 */
__u64 exit_timer; /* 0x260 */
- __u64 last_update_timer; /* 0x268 */
- __u64 user_timer; /* 0x270 */
- __u64 system_timer; /* 0x278 */
- __u64 last_update_clock; /* 0x280 */
- __u64 steal_clock; /* 0x288 */
+ __u64 user_timer; /* 0x268 */
+ __u64 system_timer; /* 0x270 */
+ __u64 steal_timer; /* 0x278 */
+ __u64 last_update_timer; /* 0x280 */
+ __u64 last_update_clock; /* 0x288 */
psw_t return_mcck_psw; /* 0x290 */
__u8 pad8[0xc00-0x2a0]; /* 0x2a0 */
/* System info area */
@@ -381,7 +383,12 @@ struct _lowcore
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
- __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
+ /* Per cpu primary space access list */
+ __u8 pad_0xe04[0xe3c-0xe04]; /* 0xe04 */
+ __u32 vdso_per_cpu_data; /* 0xe3c */
+ __u32 paste[16]; /* 0xe40 */
+
+ __u8 pad13[0x11b8-0xe80]; /* 0xe80 */
/* 64 bit extparam used for pfault, diag 250 etc */
__u64 ext_params2; /* 0x11B8 */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 024ef42ed6d7..3a8b26eb1f2e 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
-extern void account_vtime(struct task_struct *);
+extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
@@ -121,7 +121,7 @@ static inline void cmma_init(void) { }
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
- account_vtime(prev); \
+ account_vtime(prev, current); \
} while (0)
#define nop() asm volatile("nop")
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index c1eaf9604da7..c544aa524535 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -47,6 +47,8 @@ struct thread_info {
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
+ __u64 user_timer;
+ __u64 system_timer;
};
/*
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index 61705d60f995..e4bcab739c19 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -23,20 +23,18 @@ struct vtimer_list {
__u64 expires;
__u64 interval;
- spinlock_t lock;
- unsigned long magic;
-
void (*function)(unsigned long);
unsigned long data;
};
-/* the offset value will wrap after ca. 71 years */
+/* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
- __u64 to_expire; /* current event expire time */
- __u64 offset; /* list offset to zero */
- __u64 idle; /* temp var for idle */
+ __u64 timer; /* last programmed timer */
+ __u64 elapsed; /* elapsed time of timer expire values */
+ __u64 idle; /* temp var for idle */
+ int do_spt; /* =1: reprogram cpu timer in idle */
};
extern void init_virt_timer(struct vtimer_list *timer);
@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);
-extern void vtime_start_cpu_timer(void);
-extern void vtime_stop_cpu_timer(void);
+extern void vtime_stop_cpu(void);
+extern void vtime_start_leave(void);
#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a44f4fe16a35..7bdd7c8ebc91 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -12,9 +12,9 @@
#ifndef __ASSEMBLY__
/*
- * Note about this structure:
+ * Note about the vdso_data and vdso_per_cpu_data structures:
*
- * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
+ * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
* structure is supposed to be known only to the function in the vdso
* itself and may change without notice.
*/
@@ -28,10 +28,21 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
+ __u32 ectg_available;
+};
+
+struct vdso_per_cpu_data {
+ __u64 ectg_timer_base;
+ __u64 ectg_user_time;
};
extern struct vdso_data *vdso_data;
+#ifdef CONFIG_64BIT
+int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
+void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e641f60bac99..67a60016babb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -48,6 +48,11 @@ int main(void)
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
+ DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+ DEFINE(__VDSO_ECTG_BASE,
+ offsetof(struct vdso_per_cpu_data, ectg_timer_base));
+ DEFINE(__VDSO_ECTG_USER,
+ offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 55de521aef77..1268aa2991bf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -583,8 +583,8 @@ kernel_per:
.globl io_int_handler
io_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -723,8 +723,8 @@ io_notify_resume:
.globl ext_int_handler
ext_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -750,6 +750,7 @@ __critical_end:
.globl mcck_int_handler
mcck_int_handler:
+ stck __LC_INT_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 16bb4fd1a403..c6fbde13971a 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -177,8 +177,11 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
.if !\sync
ni \psworg+1,0xfd # clear wait state bit
.endif
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
lpswe \psworg # back to caller
.endm
@@ -559,8 +562,8 @@ kernel_per:
*/
.globl io_int_handler
io_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
@@ -721,8 +724,8 @@ io_notify_resume:
*/
.globl ext_int_handler
ext_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
@@ -746,6 +749,7 @@ __critical_end:
*/
.globl mcck_int_handler
mcck_int_handler:
+ stck __LC_INT_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -979,23 +983,23 @@ cleanup_sysc_return:
cleanup_sysc_leave:
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
- je 2f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ je 3f
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
- je 2f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
+ jhe 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
- jne 0f
+ jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 1f
-0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-1: lmg %r0,%r11,SP_R0(%r15)
+ j 2f
+1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
+2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+3: la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_leave_insn:
.quad sysc_done - 4
- .quad sysc_done - 8
+ .quad sysc_done - 16
cleanup_io_return:
mvc __LC_RETURN_PSW(8),0(%r12)
@@ -1005,23 +1009,23 @@ cleanup_io_return:
cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
- je 2f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ je 3f
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
- je 2f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
+ jhe 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
- jne 0f
+ jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 1f
-0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-1: lmg %r0,%r11,SP_R0(%r15)
+ j 2f
+1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
+2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+3: la %r12,__LC_RETURN_PSW
br %r14
cleanup_io_leave_insn:
.quad io_done - 4
- .quad io_done - 8
+ .quad io_done - 16
/*
* Integer constants
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 3ccd36b24b8f..f9f70aa15244 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -87,6 +87,8 @@ startup_continue:
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+ lghi %r0,__LC_PASTE
+ stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
#
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 04f8c67a6101..b6110bdf8dc2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -38,6 +38,7 @@
#include <linux/utsname.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
+#include <linux/kernel_stat.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -45,7 +46,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
-#include <asm/cpu.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,36 +75,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
-
-static int s390_idle_enter(void)
-{
- struct s390_idle_data *idle;
-
- idle = &__get_cpu_var(s390_idle);
- spin_lock(&idle->lock);
- idle->idle_count++;
- idle->in_idle = 1;
- idle->idle_enter = get_clock();
- spin_unlock(&idle->lock);
- vtime_stop_cpu_timer();
- return NOTIFY_OK;
-}
-
-void s390_idle_leave(void)
-{
- struct s390_idle_data *idle;
-
- vtime_start_cpu_timer();
- idle = &__get_cpu_var(s390_idle);
- spin_lock(&idle->lock);
- idle->idle_time += get_clock() - idle->idle_enter;
- idle->in_idle = 0;
- spin_unlock(&idle->lock);
-}
-
extern void s390_handle_mcck(void);
/*
* The idle loop on a S390...
@@ -117,10 +87,6 @@ static void default_idle(void)
local_irq_enable();
return;
}
- if (s390_idle_enter() == NOTIFY_BAD) {
- local_irq_enable();
- return;
- }
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched();
@@ -130,7 +96,6 @@ static void default_idle(void)
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
- s390_idle_leave();
local_irq_enable();
s390_handle_mcck();
return;
@@ -138,9 +103,9 @@ static void default_idle(void)
trace_hardirqs_on();
/* Don't trace preempt off for idle. */
stop_critical_timings();
- /* Wait for external, I/O or machine check interrupt. */
- __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
- PSW_MASK_IO | PSW_MASK_EXT);
+ /* Stop virtual timer and halt the cpu. */
+ vtime_stop_cpu();
+ /* Reenable preemption tracer. */
start_critical_timings();
}
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index e019b419efc6..a0d2d55d7fb3 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- irq_enter();
s390_idle_check();
+ irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b7a1efd5522c..d825f4950e4e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -427,6 +427,8 @@ setup_lowcore(void)
/* enable extended save area */
__ctl_set_bit(14, 29);
}
+#else
+ lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
set_prefix((u32)(unsigned long) lc);
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3ed5c7a83c6c..9c0ccb532a45 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -47,6 +47,7 @@
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include <asm/cpu.h>
+#include <asm/vdso.h>
#include "entry.h"
/*
@@ -500,6 +501,9 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
goto out;
lowcore->extended_save_area_addr = (u32) save_area;
}
+#else
+ if (vdso_alloc_per_cpu(cpu, lowcore))
+ goto out;
#endif
lowcore_ptr[cpu] = lowcore;
return 0;
@@ -522,6 +526,8 @@ static void smp_free_lowcore(int cpu)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
free_page((unsigned long) lowcore->extended_save_area_addr);
+#else
+ vdso_free_per_cpu(cpu, lowcore);
#endif
free_page(lowcore->panic_stack - PAGE_SIZE);
free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
@@ -664,6 +670,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
panic_stack = __get_free_page(GFP_KERNEL);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ BUG_ON(!lowcore || !panic_stack || !async_stack);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
save_area = get_zeroed_page(GFP_KERNEL);
@@ -677,6 +684,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
+#else
+ BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
@@ -845,9 +854,11 @@ static ssize_t show_idle_count(struct sys_device *dev,
unsigned long long idle_count;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock_irq(&idle->lock);
+ spin_lock(&idle->lock);
idle_count = idle->idle_count;
- spin_unlock_irq(&idle->lock);
+ if (idle->idle_enter)
+ idle_count++;
+ spin_unlock(&idle->lock);
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -856,18 +867,17 @@ static ssize_t show_idle_time(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
struct s390_idle_data *idle;
- unsigned long long new_time;
+ unsigned long long now, idle_time, idle_enter;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock_irq(&idle->lock);
- if (idle->in_idle) {
- new_time = get_clock();
- idle->idle_time += new_time - idle->idle_enter;
- idle->idle_enter = new_time;
- }
- new_time = idle->idle_time;
- spin_unlock_irq(&idle->lock);
- return sprintf(buf, "%llu\n", new_time >> 12);
+ spin_lock(&idle->lock);
+ now = get_clock();
+ idle_time = idle->idle_time;
+ idle_enter = idle->idle_enter;
+ if (idle_enter != 0ULL && i