summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-02-16 01:28:04 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-16 08:13:59 -0800
commite9e2cdb412412326c4827fc78ba27f410d837e6e (patch)
treecd4ca03e6bdc3691619024492fb9414427b2f813 /arch/i386/kernel
parent79bf2bb335b85db25d27421c798595a2fa2a0e82 (diff)
[PATCH] clockevents: i386 drivers
Add clockevent drivers for i386: lapic (local) and PIT/HPET (global). Update the timer IRQ to call into the PIT/HPET driver's event handler and the lapic-timer IRQ to call into the lapic clockevent driver. The assignement of timer functionality is delegated to the core framework code and replaces the compile and runtime evalution in do_timer_interrupt_hook() Use the clockevents broadcast support and implement the lapic_broadcast function for ACPI. No changes to existing functionality. [ kdump fix from Vivek Goyal <vgoyal@in.ibm.com> ] [ fixes based on review feedback from Arjan van de Ven <arjan@infradead.org> ] Cleanups-from: Adrian Bunk <bunk@stusta.de> Build-fixes-from: Andrew Morton <akpm@osdl.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: john stultz <johnstul@us.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/apic.c291
-rw-r--r--arch/i386/kernel/hpet.c496
-rw-r--r--arch/i386/kernel/i8253.c96
-rw-r--r--arch/i386/kernel/i8259.c6
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/i386/kernel/time.c70
-rw-r--r--arch/i386/kernel/time_hpet.c497
8 files changed, 717 insertions, 745 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index c2b3b79dc436..4ae3dcf1d2f0 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module.o
obj-y += sysenter.o vsyscall.o
obj-$(CONFIG_ACPI_SRAT) += srat.o
-obj-$(CONFIG_HPET_TIMER) += time_hpet.o
obj-$(CONFIG_EFI) += efi.o efi_stub.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
obj-$(CONFIG_VM86) += vm86.o
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index b56448f214a7..e98b5c750bdf 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -25,6 +25,7 @@
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
+#include <linux/clockchips.h>
#include <linux/module.h>
#include <asm/atomic.h>
@@ -52,28 +53,44 @@
#endif
/*
- * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
- * IPIs in place of local APIC timers
- */
-static cpumask_t timer_bcast_ipi;
-
-/*
* Knob to control our willingness to enable the local APIC.
*
* -1=force-disable, +1=force-enable
*/
static int enable_local_apic __initdata = 0;
+/* Enable local APIC timer for highres/dyntick on UP */
+static int enable_local_apic_timer __initdata = 0;
+
/*
* Debug level, exported for io_apic.c
*/
int apic_verbosity;
-static void apic_pm_activate(void);
+static unsigned int calibration_result;
+static int lapic_next_event(unsigned long delta,
+ struct clock_event_device *evt);
+static void lapic_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+static void lapic_timer_broadcast(cpumask_t mask);
+static void apic_pm_activate(void);
-/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer __read_mostly = 0;
+/*
+ * The local apic timer can be used for any function which is CPU local.
+ */
+static struct clock_event_device lapic_clockevent = {
+ .name = "lapic",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_C3STOP,
+ .shift = 32,
+ .set_mode = lapic_timer_setup,
+ .set_next_event = lapic_next_event,
+ .broadcast = lapic_timer_broadcast,
+ .rating = 100,
+ .irq = -1,
+};
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
/* Local APIC was disabled by the BIOS and enabled by the kernel */
static int enabled_via_apicbase;
@@ -152,6 +169,11 @@ int lapic_get_maxlvt(void)
*/
/*
+ * FIXME: Move this to i8253.h. There is no need to keep the access to
+ * the PIT scattered all around the place -tglx
+ */
+
+/*
* The timer chip is already set up at HZ interrupts per second here,
* but we do not accept timer interrupts yet. We only allow the BP
* to calibrate.
@@ -209,16 +231,17 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
#define APIC_DIVISOR 16
-static void __setup_APIC_LVTT(unsigned int clocks)
+static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{
unsigned int lvtt_value, tmp_value;
- int cpu = smp_processor_id();
- lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
+ lvtt_value = LOCAL_TIMER_VECTOR;
+ if (!oneshot)
+ lvtt_value |= APIC_LVT_TIMER_PERIODIC;
if (!lapic_is_integrated())
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
- if (cpu_isset(cpu, timer_bcast_ipi))
+ if (!irqen)
lvtt_value |= APIC_LVT_MASKED;
apic_write_around(APIC_LVTT, lvtt_value);
@@ -231,31 +254,80 @@ static void __setup_APIC_LVTT(unsigned int clocks)
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16);
- apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
+ if (!oneshot)
+ apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
+}
+
+/*
+ * Program the next event, relative to now
+ */
+static int lapic_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ apic_write_around(APIC_TMICT, delta);
+ return 0;
}
-static void __devinit setup_APIC_timer(unsigned int clocks)
+/*
+ * Setup the lapic timer in periodic or oneshot mode
+ */
+static void lapic_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
unsigned long flags;
+ unsigned int v;
local_irq_save(flags);
- /*
- * Wait for IRQ0's slice:
- */
- wait_timer_tick();
-
- __setup_APIC_LVTT(clocks);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ __setup_APIC_LVTT(calibration_result,
+ mode != CLOCK_EVT_MODE_PERIODIC, 1);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ v = apic_read(APIC_LVTT);
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write_around(APIC_LVTT, v);
+ break;
+ }
local_irq_restore(flags);
}
/*
+ * Local APIC timer broadcast function
+ */
+static void lapic_timer_broadcast(cpumask_t mask)
+{
+#ifdef CONFIG_SMP
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#endif
+}
+
+/*
+ * Setup the local APIC timer for this CPU. Copy the initilized values
+ * of the boot CPU and register the clock event in the framework.
+ */
+static void __devinit setup_APIC_timer(void)
+{
+ struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+
+ memcpy(levt, &lapic_clockevent, sizeof(*levt));
+ levt->cpumask = cpumask_of_cpu(smp_processor_id());
+
+ clockevents_register_device(levt);
+}
+
+/*
* In this function we calibrate APIC bus clocks to the external
* timer. Unfortunately we cannot use jiffies and the timer irq
* to calibrate, since some later bootup code depends on getting
* the first irq? Ugh.
*
+ * TODO: Fix this rather than saying "Ugh" -tglx
+ *
* We want to do the calibration only once since we
* want to have local timer irqs syncron. CPUs connected
* by the same APIC bus have the very same bus frequency.
@@ -278,7 +350,7 @@ static int __init calibrate_APIC_clock(void)
* value into the APIC clock, we just want to get the
* counter running for calibration.
*/
- __setup_APIC_LVTT(1000000000);
+ __setup_APIC_LVTT(1000000000, 0, 0);
/*
* The timer chip counts down to zero. Let's wait
@@ -315,6 +387,17 @@ static int __init calibrate_APIC_clock(void)
result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
+ /* Calculate the scaled math multiplication factor */
+ lapic_clockevent.mult = div_sc(tt1-tt2, TICK_NSEC * LOOPS, 32);
+ lapic_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+ lapic_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &lapic_clockevent);
+
+ apic_printk(APIC_VERBOSE, "..... tt1-tt2 %ld\n", tt1 - tt2);
+ apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
+ apic_printk(APIC_VERBOSE, "..... calibration result: %ld\n", result);
+
if (cpu_has_tsc)
apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
"%ld.%04ld MHz.\n",
@@ -329,13 +412,10 @@ static int __init calibrate_APIC_clock(void)
return result;
}
-static unsigned int calibration_result;
-
void __init setup_boot_APIC_clock(void)
{
unsigned long flags;
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
- using_apic_timer = 1;
local_irq_save(flags);
@@ -343,97 +423,47 @@ void __init setup_boot_APIC_clock(void)
/*
* Now set up the timer for real.
*/
- setup_APIC_timer(calibration_result);
+ setup_APIC_timer();
local_irq_restore(flags);
}
void __devinit setup_secondary_APIC_clock(void)
{
- setup_APIC_timer(calibration_result);
-}
-
-void disable_APIC_timer(void)
-{
- if (using_apic_timer) {
- unsigned long v;
-
- v = apic_read(APIC_LVTT);
- /*
- * When an illegal vector value (0-15) is written to an LVT
- * entry and delivery mode is Fixed, the APIC may signal an
- * illegal vector error, with out regard to whether the mask
- * bit is set or whether an interrupt is actually seen on
- * input.
- *
- * Boot sequence might call this function when the LVTT has
- * '0' vector value. So make sure vector field is set to
- * valid value.
- */
- v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
- apic_write_around(APIC_LVTT, v);
- }
-}
-
-void enable_APIC_timer(void)
-{
- int cpu = smp_processor_id();
-
- if (using_apic_timer && !cpu_isset(cpu, timer_bcast_ipi)) {
- unsigned long v;
-
- v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
- }
-}
-
-void switch_APIC_timer_to_ipi(void *cpumask)
-{
- cpumask_t mask = *(cpumask_t *)cpumask;
- int cpu = smp_processor_id();
-
- if (cpu_isset(cpu, mask) &&
- !cpu_isset(cpu, timer_bcast_ipi)) {
- disable_APIC_timer();
- cpu_set(cpu, timer_bcast_ipi);
- }
-}
-EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
-
-void switch_ipi_to_APIC_timer(void *cpumask)
-{
- cpumask_t mask = *(cpumask_t *)cpumask;
- int cpu = smp_processor_id();
-
- if (cpu_isset(cpu, mask) &&
- cpu_isset(cpu, timer_bcast_ipi)) {
- cpu_clear(cpu, timer_bcast_ipi);
- enable_APIC_timer();
- }
+ setup_APIC_timer();
}
-EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
/*
- * Local timer interrupt handler. It does both profiling and
- * process statistics/rescheduling.
+ * The guts of the apic timer interrupt
*/
-inline void smp_local_timer_interrupt(void)
+static void local_apic_timer_interrupt(void)
{
- profile_tick(CPU_PROFILING);
-#ifdef CONFIG_SMP
- update_process_times(user_mode_vm(get_irq_regs()));
-#endif
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
/*
- * We take the 'long' return path, and there every subsystem
- * grabs the apropriate locks (kernel lock/ irq lock).
+ * Normally we should not be here till LAPIC has been
+ * initialized but in some cases like kdump, its possible that
+ * there is a pending LAPIC timer interrupt from previous
+ * kernel's context and is delivered in new kernel the moment
+ * interrupts are enabled.
*
- * we might want to decouple profiling from the 'long path',
- * and do the profiling totally in assembly.
- *
- * Currently this isn't too much of an issue (performance wise),
- * we can take more than 100K local irqs per second on a 100 MHz P5.
+ * Interrupts are enabled early and LAPIC is setup much later,
+ * hence its possible that when we get here evt->event_handler
+ * is NULL. Check for event_handler being NULL and discard
+ * the interrupt as spurious.
*/
+ if (!evt->event_handler) {
+ printk(KERN_WARNING
+ "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+ /* Switch it off */
+ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
+ return;
+ }
+
+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
+
+ evt->event_handler(evt);
}
/*
@@ -445,15 +475,9 @@ inline void smp_local_timer_interrupt(void)
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
+void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- int cpu = smp_processor_id();
-
- /*
- * the NMI deadlock-detector uses this.
- */
- per_cpu(irq_stat, cpu).apic_timer_irqs++;
/*
* NOTE! We'd better ACK the irq immediately,
@@ -467,41 +491,10 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
*/
exit_idle();
irq_enter();
- smp_local_timer_interrupt();
+ local_apic_timer_interrupt();
irq_exit();
- set_irq_regs(old_regs);
-}
-#ifndef CONFIG_SMP
-static void up_apic_timer_interrupt_call(void)
-{
- int cpu = smp_processor_id();
-
- /*
- * the NMI deadlock-detector uses this.
- */
- per_cpu(irq_stat, cpu).apic_timer_irqs++;
-
- smp_local_timer_interrupt();
-}
-#endif
-
-void smp_send_timer_broadcast_ipi(void)
-{
- cpumask_t mask;
-
- cpus_and(mask, cpu_online_map, timer_bcast_ipi);
- if (!cpus_empty(mask)) {
-#ifdef CONFIG_SMP
- send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
-#else
- /*
- * We can directly call the apic timer interrupt handler
- * in UP case. Minus all irq related functions
- */
- up_apic_timer_interrupt_call();
-#endif
- }
+ set_irq_regs(old_regs);
}
int setup_profiling_timer(unsigned int multiplier)
@@ -914,6 +907,11 @@ void __devinit setup_local_APIC(void)
printk(KERN_INFO "No ESR for 82489DX.\n");
}
+ /* Disable the local apic timer */
+ value = apic_read(APIC_LVTT);
+ value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write_around(APIC_LVTT, value);
+
setup_apic_nmi_watchdog(NULL);
apic_pm_activate();
}
@@ -1128,6 +1126,13 @@ static int __init parse_nolapic(char *arg)
}
early_param("nolapic", parse_nolapic);
+static int __init apic_enable_lapic_timer(char *str)
+{
+ enable_local_apic_timer = 1;
+ return 0;
+}
+early_param("lapictimer", apic_enable_lapic_timer);
+
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
@@ -1147,7 +1152,7 @@ __setup("apic=", apic_set_verbosity);
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
-fastcall void smp_spurious_interrupt(struct pt_regs *regs)
+void smp_spurious_interrupt(struct pt_regs *regs)
{
unsigned long v;
@@ -1171,7 +1176,7 @@ fastcall void smp_spurious_interrupt(struct pt_regs *regs)
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
-fastcall void smp_error_interrupt(struct pt_regs *regs)
+void smp_error_interrupt(struct pt_regs *regs)
{
unsigned long v, v1;
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 7d2739fff3a3..e1006b7acc9e 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -1,4 +1,5 @@
#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/errno.h>
#include <linux/hpet.h>
#include <linux/init.h>
@@ -6,17 +7,278 @@
#include <asm/hpet.h>
#include <asm/io.h>
+extern struct clock_event_device *global_clock_event;
+
#define HPET_MASK CLOCKSOURCE_MASK(32)
#define HPET_SHIFT 22
/* FSEC = 10^-15 NSEC = 10^-9 */
#define FSEC_PER_NSEC 1000000
-static void __iomem *hpet_ptr;
+/*
+ * HPET address is set in acpi/boot.c, when an ACPI entry exists
+ */
+unsigned long hpet_address;
+static void __iomem * hpet_virt_address;
+
+static inline unsigned long hpet_readl(unsigned long a)
+{
+ return readl(hpet_virt_address + a);
+}
+
+static inline void hpet_writel(unsigned long d, unsigned long a)
+{
+ writel(d, hpet_virt_address + a);
+}
+
+/*
+ * HPET command line enable / disable
+ */
+static int boot_hpet_disable;
+
+static int __init hpet_setup(char* str)
+{
+ if (str) {
+ if (!strncmp("disable", str, 7))
+ boot_hpet_disable = 1;
+ }
+ return 1;
+}
+__setup("hpet=", hpet_setup);
+
+static inline int is_hpet_capable(void)
+{
+ return (!boot_hpet_disable && hpet_address);
+}
+
+/*
+ * HPET timer interrupt enable / disable
+ */
+static int hpet_legacy_int_enabled;
+
+/**
+ * is_hpet_enabled - check whether the hpet timer interrupt is enabled
+ */
+int is_hpet_enabled(void)
+{
+ return is_hpet_capable() && hpet_legacy_int_enabled;
+}
+
+/*
+ * When the hpet driver (/dev/hpet) is enabled, we need to reserve
+ * timer 0 and timer 1 in case of RTC emulation.
+ */
+#ifdef CONFIG_HPET
+static void hpet_reserve_platform_timers(unsigned long id)
+{
+ struct hpet __iomem *hpet = hpet_virt_address;
+ struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
+ unsigned int nrtimers, i;
+ struct hpet_data hd;
+
+ nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
+
+ memset(&hd, 0, sizeof (hd));
+ hd.hd_phys_address = hpet_address;
+ hd.hd_address = hpet_virt_address;
+ hd.hd_nirqs = nrtimers;
+ hd.hd_flags = HPET_DATA_PLATFORM;
+ hpet_reserve_timer(&hd, 0);
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+ hpet_reserve_timer(&hd, 1);
+#endif
+
+ hd.hd_irq[0] = HPET_LEGACY_8254;
+ hd.hd_irq[1] = HPET_LEGACY_RTC;
+
+ for (i = 2; i < nrtimers; timer++, i++)
+ hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
+ Tn_INT_ROUTE_CNF_SHIFT;
+
+ hpet_alloc(&hd);
+
+}
+#else
+static void hpet_reserve_platform_timers(unsigned long id) { }
+#endif
+
+/*
+ * Common hpet info
+ */
+static unsigned long hpet_period;
+
+static void hpet_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+static int hpet_next_event(unsigned long delta,
+ struct clock_event_device *evt);
+
+/*
+ * The hpet clock event device
+ */
+static struct clock_event_device hpet_clockevent = {
+ .name = "hpet",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = hpet_set_mode,
+ .set_next_event = hpet_next_event,
+ .shift = 32,
+ .irq = 0,
+};
+
+static void hpet_start_counter(void)
+{
+ unsigned long cfg = hpet_readl(HPET_CFG);
+
+ cfg &= ~HPET_CFG_ENABLE;
+ hpet_writel(cfg, HPET_CFG);
+ hpet_writel(0, HPET_COUNTER);
+ hpet_writel(0, HPET_COUNTER + 4);
+ cfg |= HPET_CFG_ENABLE;
+ hpet_writel(cfg, HPET_CFG);
+}
+
+static void hpet_enable_int(void)
+{
+ unsigned long cfg = hpet_readl(HPET_CFG);
+
+ cfg |= HPET_CFG_LEGACY;
+ hpet_writel(cfg, HPET_CFG);
+ hpet_legacy_int_enabled = 1;
+}
+
+static void hpet_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long cfg, cmp, now;
+ uint64_t delta;
+
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
+ delta >>= hpet_clockevent.shift;
+ now = hpet_readl(HPET_COUNTER);
+ cmp = now + (unsigned long) delta;
+ cfg = hpet_readl(HPET_T0_CFG);
+ cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
+ HPET_TN_SETVAL | HPET_TN_32BIT;
+ hpet_writel(cfg, HPET_T0_CFG);
+ /*
+ * The first write after writing TN_SETVAL to the
+ * config register sets the counter value, the second
+ * write sets the period.
+ */
+ hpet_writel(cmp, HPET_T0_CMP);
+ udelay(1);
+ hpet_writel((unsigned long) delta, HPET_T0_CMP);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ cfg = hpet_readl(HPET_T0_CFG);
+ cfg &= ~HPET_TN_PERIODIC;
+ cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+ hpet_writel(cfg, HPET_T0_CFG);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ cfg = hpet_readl(HPET_T0_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T0_CFG);
+ break;
+ }
+}
+
+static int hpet_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long cnt;
+
+ cnt = hpet_readl(HPET_COUNTER);
+ cnt += delta;
+ hpet_writel(cnt, HPET_T0_CMP);
+
+ return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
+}
+
+/*
+ * Try to setup the HPET timer
+ */
+int __init hpet_enable(void)
+{
+ unsigned long id;
+ uint64_t hpet_freq;
+
+ if (!is_hpet_capable())
+ return 0;
+
+ hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
+
+ /*
+ * Read the period and check for a sane value:
+ */
+ hpet_period = hpet_readl(HPET_PERIOD);
+ if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
+ goto out_nohpet;
+
+ /*
+ * The period is a femto seconds value. We need to calculate the
+ * scaled math multiplication factor for nanosecond to hpet tick
+ * conversion.
+ */
+ hpet_freq = 1000000000000000ULL;
+ do_div(hpet_freq, hpet_period);
+ hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
+ NSEC_PER_SEC, 32);
+ /* Calculate the min / max delta */
+ hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
+ &hpet_clockevent);
+ hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
+ &hpet_clockevent);
+
+ /*
+ * Read the HPET ID register to retrieve the IRQ routing
+ * information and the number of channels
+ */
+ id = hpet_readl(HPET_ID);
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+ /*
+ * The legacy routing mode needs at least two channels, tick timer
+ * and the rtc emulation channel.
+ */
+ if (!(id & HPET_ID_NUMBER))
+ goto out_nohpet;
+#endif
+
+ /* Start the counter */
+ hpet_start_counter();
+
+ if (id & HPET_ID_LEGSUP) {
+ hpet_enable_int();
+ hpet_reserve_platform_timers(id);
+ /*
+ * Start hpet with the boot cpu mask and make it
+ * global after the IO_APIC has been initialized.
+ */
+ hpet_clockevent.cpumask =cpumask_of_cpu(0);
+ clockevents_register_device(&hpet_clockevent);
+ global_clock_event = &hpet_clockevent;
+ return 1;
+ }
+ return 0;
+out_nohpet:
+ iounmap(hpet_virt_address);
+ hpet_virt_address = NULL;
+ return 0;
+}
+
+/*
+ * Clock source related code
+ */
static cycle_t read_hpet(void)
{
- return (cycle_t)readl(hpet_ptr);
+ return (cycle_t)hpet_readl(HPET_COUNTER);
}
static struct clocksource clocksource_hpet = {
@@ -24,28 +286,17 @@ static struct clocksource clocksource_hpet = {
.rating = 250,
.read = read_hpet,
.mask = HPET_MASK,
- .mult = 0, /* set below */
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init init_hpet_clocksource(void)
{
- unsigned long hpet_period;
- void __iomem* hpet_base;
u64 tmp;
- int err;
- if (!is_hpet_enabled())
+ if (!hpet_virt_address)
return -ENODEV;
- /* calculate the hpet address: */
- hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
- hpet_ptr = hpet_base + HPET_COUNTER;
-
- /* calculate the frequency: */
- hpet_period = readl(hpet_base + HPET_PERIOD);
-
/*
* hpet period is in femto seconds per cycle
* so we need to convert this to ns/cyc units
@@ -61,11 +312,218 @@ static int __init init_hpet_clocksource(void)
do_div(tmp, FSEC_PER_NSEC);
clocksource_hpet.mult = (u32)tmp;
- err = clocksource_register(&clocksource_hpet);
- if (err)
- iounmap(hpet_base);
-
- return err;
+ return clocksource_register(&clocksource_hpet);
}
module_init(init_hpet_clocksource);
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+
+/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
+ * is enabled, we support RTC interrupt functionality in software.
+ * RTC has 3 kinds of interrupts:
+ * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
+ * is updated
+ * 2) Alarm Interrupt - generate an interrupt at a specific time of day
+ * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
+ * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
+ * (1) and (2) above are implemented using polling at a frequency of
+ * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
+ * overhead. (DEFAULT_RTC_INT_FREQ)
+ * For (3), we use interrupts at 64Hz or user specified periodic
+ * frequency, whichever is higher.
+ */
+#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
+
+#define DEFAULT_RTC_INT_FREQ 64
+#define DEFAULT_RTC_SHIFT 6
+#define RTC_NUM_INTS 1
+
+static unsigned long hpet_rtc_flags;
+static unsigned long hpet_prev_update_sec;
+static struct rtc_time hpet_alarm_time;
+static unsigned long hpet_pie_count;
+static unsigned long hpet_t1_cmp;
+static unsigned long hpet_default_delta;
+static unsigned long hpet_pie_delta;
+static unsigned long hpet_pie_limit;
+
+/*
+ * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
+ * is not supported by all HPET implementations for timer 1.
+ *
+ * hpet_rtc_timer_init() is called when the rtc is initialized.
+ */
+int hpet_rtc_timer_init(void)
+{
+ unsigned long cfg, cnt, delta, flags;
+
+ if (!is_hpet_enabled())
+ return 0;
+
+ if (!hpet_default_delta) {
+ uint64_t clc;
+
+ clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
+ clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
+ hpet_default_delta = (unsigned long) clc;
+ }
+
+ if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
+ delta = hpet_default_delta;
+ else
+ delta = hpet_pie_delta;
+
+ local_irq_save(flags);
+
+ cnt = delta + hpet_readl(HPET_COUNTER);
+ hpet_writel(cnt, HPET_T1_CMP);
+ hpet_t1_cmp = cnt;
+
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_PERIODIC;
+ cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+ hpet_writel(cfg, HPET_T1_CFG);
+
+ local_irq_restore(flags);
+
+ return 1;
+}
+
+/*
+ * The functions below are called from rtc driver.
+ * Return 0 if HPET is not being used.
+ * Otherwise do the necessary changes and return 1.
+ */
+int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
+{
+ if (!is_hpet_enabled())
+ return 0;
+
+ hpet_rtc_flags &= ~bit_mask;
+ return 1;
+}
+
+int hpet_set_rtc_irq_bit(unsigned long bit_mask)
+{
+ unsigned long oldbits = hpet_rtc_flags;
+
+ if (!is_hpet_enabled())
+ return 0;
+
+ hpet_rtc_flags |= bit_mask;
+
+ if (!oldbits)
+ hpet_rtc_timer_init();
+
+ return 1;
+}
+
+int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
+ unsigned char sec)
+{
+ if (!is_hpet_enabled())
+ return 0;
+
+ hpet_alarm_time.tm_hour = hrs;
+ hpet_alarm_time.tm_min = min;
+ hpet_alarm_time.tm_sec = sec;
+
+ return 1;
+}
+
+int hpet_set_periodic_freq(unsigned long freq)
+{
+ uint64_t clc;
+
+ if (!is_hpet_enabled())
+ return 0;
+
+ if (freq <= DEFAULT_RTC_INT_FREQ)
+ hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
+ else {
+ clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
+ do_div(clc, freq);
+ clc >>= hpet_clockevent.shift;
+ hpet_pie_delta = (unsigned long) clc;
+ }
+ return 1;
+}
+
+int hpet_rtc_dropped_irq(void)
+{
+ return is_hpet_enabled();
+}
+
+static void hpet_rtc_timer_reinit(void)
+{
+ unsigned long cfg, delta;
+ int lost_ints = -1;
+
+ if (unlikely(!hpet_rtc_flags)) {
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+ return;
+ }
+
+ if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
+ delta = hpet_default_delta;
+ else
+ delta = hpet_pie_delta;
+
+ /*
+ * Increment the comparator value until we are ahead of the
+ * current count.
+ */
+ do {
+ hpet_t1_cmp += delta;
+ hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
+ lost_ints++;
+ } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
+
+ if (lost_ints) {
+ if (hpet_rtc_flags & RTC_PIE)
+ hpet_pie_count += lost_ints;
+ if (printk_ratelimit())
+ printk(KERN_WARNING "rtc: lost %d interrupts\n",
+ lost_ints);
+ }
+}
+
+irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+{
+ struct rtc_time curr_time;
+ unsigned long rtc_int_flag = 0;
+
+ hpet_rtc_timer_reinit();
+
+ if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
+ rtc_get_rtc_time(&curr_time);
+
+ if (hpet_rtc_flags & RTC_UIE &&
+ curr_time.tm_sec != hpet_prev_update_sec) {
+ rtc_int_flag = RTC_UF;
+ hpet_prev_update_sec = curr_time.tm_sec;
+ }
+
+ if (hpet_rtc_flags & RTC_PIE &&
+ ++hpet_pie_count >= hpet_pie_limit) {
+ rtc_int_flag |= RTC_PF;
+ hpet_pie_count = 0;
+ }
+
+ if (hpet_rtc_flags & RTC_PIE &&
+ (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
+ (curr_time.tm_min == hpet_alarm_time.tm_min) &&
+ (curr_time.tm_hour == hpet_alarm_time.tm_hour))
+ rtc_int_flag |= RTC_AF;
+
+ if (rtc_int_flag) {
+ rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
+ rtc_interrupt(rtc_int_flag, dev_id);
+ }
+ return IRQ_HANDLED;
+}
+#endif
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 9a0060b92e32..a6bc7bb38834 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -2,7 +2,7 @@
* i8253.c 8253/PIT functions
*
*/
-#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/sysdev.h>
@@ -19,17 +19,97 @@
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
-void setup_pit_timer(void)
+/*
+ * HPET replaces the PIT, when enabled. So we need to know, which of
+ * the two timers is used
+ */
+struct clock_event_device *global_clock_event;
+
+/*
+ * Initialize the PIT timer.
+ *
+ * This is also called after resume to bring the PIT into operation again.
+ */
+static void init_pit_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8253_lock, flags);
+
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ udelay(10);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ udelay(10);
+ outb(LATCH >> 8 , PIT_CH0); /* MSB */
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ /* One shot setup */
+ outb_p(0x38, PIT_MODE);
+ udelay(10);
+ break;
+ }
+ spin_unlock_irqrestore(&i8253_lock, flags);
+}
+
+/*
+ * Program the next event in oneshot mode
+ *
+ * Delta is given in PIT ticks
+ */
+static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned long flags;
spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
- udelay(10);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- udelay(10);
- outb(LATCH >> 8 , PIT_CH0); /* MSB */
+ outb_p(delta & 0xff , PIT_CH0); /* LSB */
+ outb(delta >> 8 , PIT_CH0); /* MSB */
spin_unlock_irqrestore(&i8253_lock, flags);
+
+ return 0;
+}
+
+/*
+ * On UP the PIT can serve all of the possible timer functions. On SMP systems
+ * it can be solely used for the global tick.
+ *
+ * The profiling and update capabilites are switched off once the local apic is
+ * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
+ * !using_apic_timer decisions in do_timer_interrupt_hook()
+ */
+struct clock_event_device pit_clockevent = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_O