summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 17:46:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 17:46:42 -0700
commite7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch)
treedaa51c16462c318b890acf7f01fba5827275dd74 /drivers/clocksource
parent08d69a25714429850cf9ef71f22d8cdc9189d93f (diff)
parent953dec21aed4038464fec02f96a2f1b8701a5bce (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner: "A rather large update of timers, timekeeping & co - Core timekeeping code is year-2038 safe now for 32bit machines. Now we just need to fix all in kernel users and the gazillion of user space interfaces which rely on timespec/timeval :) - Better cache layout for the timekeeping internal data structures. - Proper nanosecond based interfaces for in kernel users. - Tree wide cleanup of code which wants nanoseconds but does hoops and loops to convert back and forth from timespecs. Some of it definitely belongs into the ugly code museum. - Consolidation of the timekeeping interface zoo. - A fast NMI safe accessor to clock monotonic for tracing. This is a long standing request to support correlated user/kernel space traces. With proper NTP frequency correction it's also suitable for correlation of traces accross separate machines. - Checkpoint/restart support for timerfd. - A few NOHZ[_FULL] improvements in the [hr]timer code. - Code move from kernel to kernel/time of all time* related code. - New clocksource/event drivers from the ARM universe. I'm really impressed that despite an architected timer in the newer chips SoC manufacturers insist on inventing new and differently broken SoC specific timers. [ Ed. "Impressed"? I don't think that word means what you think it means ] - Another round of code move from arch to drivers. Looks like most of the legacy mess in ARM regarding timers is sorted out except for a few obnoxious strongholds. - The usual updates and fixlets all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) timekeeping: Fixup typo in update_vsyscall_old definition clocksource: document some basic timekeeping concepts timekeeping: Use cached ntp_tick_length when accumulating error timekeeping: Rework frequency adjustments to work better w/ nohz timekeeping: Minor fixup for timespec64->timespec assignment ftrace: Provide trace clocks monotonic timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC seqcount: Add raw_write_seqcount_latch() seqcount: Provide raw_read_seqcount() timekeeping: Use tk_read_base as argument for timekeeping_get_ns() timekeeping: Create struct tk_read_base and use it in struct timekeeper timekeeping: Restructure the timekeeper some more clocksource: Get rid of cycle_last clocksource: Move cycle_last validation to core code clocksource: Make delta calculation a function wireless: ath9k: Get rid of timespec conversions drm: vmwgfx: Use nsec based interfaces drm: i915: Use nsec based interfaces timekeeping: Provide ktime_get_raw() hangcheck-timer: Use ktime_get_ns() ...
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/clps711x-timer.c131
-rw-r--r--drivers/clocksource/exynos_mct.c63
-rw-r--r--drivers/clocksource/mtk_timer.c261
-rw-r--r--drivers/clocksource/pxa_timer.c227
-rw-r--r--drivers/clocksource/sh_cmt.c233
-rw-r--r--drivers/clocksource/sh_mtu2.c146
-rw-r--r--drivers/clocksource/sh_tmu.c127
-rw-r--r--drivers/clocksource/timer-marco.c3
-rw-r--r--drivers/clocksource/timer-prima2.c3
11 files changed, 868 insertions, 343 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 065131cbfcc0..cfd6519df661 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,5 @@
+menu "Clock Source drivers"
+
config CLKSRC_OF
bool
@@ -125,6 +127,7 @@ config CLKSRC_METAG_GENERIC
config CLKSRC_EXYNOS_MCT
def_bool y if ARCH_EXYNOS
+ depends on !ARM64
help
Support for Multi Core Timer controller on Exynos SoCs.
@@ -149,6 +152,11 @@ config VF_PIT_TIMER
config SYS_SUPPORTS_SH_CMT
bool
+config MTK_TIMER
+ select CLKSRC_OF
+ select CLKSRC_MMIO
+ bool
+
config SYS_SUPPORTS_SH_MTU2
bool
@@ -173,7 +181,7 @@ config SH_TIMER_MTU2
default SYS_SUPPORTS_SH_MTU2
help
This enables build of a clockevent driver for the Multi-Function
- Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
+ Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
This hardware comes with 16 bit-timer registers.
config SH_TIMER_TMU
@@ -187,7 +195,7 @@ config SH_TIMER_TMU
config EM_TIMER_STI
bool "Renesas STI timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
+ depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
default SYS_SUPPORTS_EM_STI
help
This enables build of a clocksource and clockevent driver for
@@ -207,3 +215,5 @@ config CLKSRC_VERSATILE
counter available in the "System Registers" block of
ARM Versatile, RealView and Versatile Express reference
platforms.
+
+endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 800b1303c236..7fd9fd1dff42 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,9 +16,11 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
+obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
+obj-$(CONFIG_ARCH_PXA) += pxa_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_ARCH_U300) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
+obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
new file mode 100644
index 000000000000..d83ec1f2fddc
--- /dev/null
+++ b/drivers/clocksource/clps711x-timer.c
@@ -0,0 +1,131 @@
+/*
+ * Cirrus Logic CLPS711X clocksource driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+enum {
+ CLPS711X_CLKSRC_CLOCKSOURCE,
+ CLPS711X_CLKSRC_CLOCKEVENT,
+};
+
+static void __iomem *tcd;
+
+static u64 notrace clps711x_sched_clock_read(void)
+{
+ return ~readw(tcd);
+}
+
+static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+{
+ unsigned long rate;
+
+ if (!base)
+ return -ENOMEM;
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ rate = clk_get_rate(clock);
+
+ tcd = base;
+
+ clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16,
+ clocksource_mmio_readw_down);
+
+ sched_clock_register(clps711x_sched_clock_read, 16, rate);
+
+ return 0;
+}
+
+static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
+ unsigned int irq)
+{
+ struct clock_event_device *clkevt;
+ unsigned long rate;
+
+ if (!irq)
+ return -EINVAL;
+ if (!base)
+ return -ENOMEM;
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+ if (!clkevt)
+ return -ENOMEM;
+
+ rate = clk_get_rate(clock);
+
+ /* Set Timer prescaler */
+ writew(DIV_ROUND_CLOSEST(rate, HZ), base);
+
+ clkevt->name = "clps711x-clockevent";
+ clkevt->rating = 300;
+ clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
+ clkevt->set_mode = clps711x_clockevent_set_mode;
+ clkevt->cpumask = cpumask_of(0);
+ clockevents_config_and_register(clkevt, HZ, 0, 0);
+
+ return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER,
+ "clps711x-timer", clkevt);
+}
+
+void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
+ unsigned int irq)
+{
+ struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
+ struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
+
+ BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
+ BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
+}
+
+#ifdef CONFIG_CLKSRC_OF
+static void __init clps711x_timer_init(struct device_node *np)
+{
+ unsigned int irq = irq_of_parse_and_map(np, 0);
+ struct clk *clock = of_clk_get(np, 0);
+ void __iomem *base = of_iomap(np, 0);
+
+ switch (of_alias_get_id(np, "timer")) {
+ case CLPS711X_CLKSRC_CLOCKSOURCE:
+ BUG_ON(_clps711x_clksrc_init(clock, base));
+ break;
+ case CLPS711X_CLKSRC_CLOCKEVENT:
+ BUG_ON(_clps711x_clkevt_init(clock, base, irq));
+ break;
+ default:
+ break;
+ }
+}
+CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
+#endif
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ab51bf20a3ed..9403061a2acc 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -94,7 +94,7 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
u32 mask;
u32 i;
- __raw_writel(value, reg_base + offset);
+ writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
@@ -144,8 +144,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
/* Wait maximum 1 ms until written values are applied */
for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
- if (__raw_readl(reg_base + stat_addr) & mask) {
- __raw_writel(mask, reg_base + stat_addr);
+ if (readl_relaxed(reg_base + stat_addr) & mask) {
+ writel_relaxed(mask, reg_base + stat_addr);
return;
}
@@ -157,28 +157,51 @@ static void exynos4_mct_frc_start(void)
{
u32 reg;
- reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
reg |= MCT_G_TCON_START;
exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}
-static cycle_t notrace _exynos4_frc_read(void)
+/**
+ * exynos4_read_count_64 - Read all 64-bits of the global counter
+ *
+ * This will read all 64-bits of the global counter taking care to make sure
+ * that the upper and lower half match. Note that reading the MCT can be quite
+ * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
+ * only) version when possible.
+ *
+ * Returns the number of cycles in the global counter.
+ */
+static u64 exynos4_read_count_64(void)
{
unsigned int lo, hi;
- u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+ u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
do {
hi = hi2;
- lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
- hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+ lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
+ hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
} while (hi != hi2);
return ((cycle_t)hi << 32) | lo;
}
+/**
+ * exynos4_read_count_32 - Read the lower 32-bits of the global counter
+ *
+ * This will read just the lower 32-bits of the global counter. This is marked
+ * as notrace so it can be used by the scheduler clock.
+ *
+ * Returns the number of cycles in the global counter (lower 32 bits).
+ */
+static u32 notrace exynos4_read_count_32(void)
+{
+ return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
+}
+
static cycle_t exynos4_frc_read(struct clocksource *cs)
{
- return _exynos4_frc_read();
+ return exynos4_read_count_32();
}
static void exynos4_frc_resume(struct clocksource *cs)
@@ -190,21 +213,23 @@ struct clocksource mct_frc = {
.name = "mct-frc",
.rating = 400,
.read = exynos4_frc_read,
- .mask = CLOCKSOURCE_MASK(64),
+ .mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = exynos4_frc_resume,
};
static u64 notrace exynos4_read_sched_clock(void)
{
- return _exynos4_frc_read();
+ return exynos4_read_count_32();
}
static struct delay_timer exynos4_delay_timer;
static cycles_t exynos4_read_current_timer(void)
{
- return _exynos4_frc_read();
+ BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
+ "cycles_t needs to move to 32-bit for ARM64 usage");
+ return exynos4_read_count_32();
}
static void __init exynos4_clocksource_init(void)
@@ -218,14 +243,14 @@ static void __init exynos4_clocksource_init(void)
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
- sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);
+ sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
}
static void exynos4_mct_comp0_stop(void)
{
unsigned int tcon;
- tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
@@ -238,14 +263,14 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode,
unsigned int tcon;
cycle_t comp_cycle;
- tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
if (mode == CLOCK_EVT_MODE_PERIODIC) {
tcon |= MCT_G_TCON_COMP0_AUTO_INC;
exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
}
- comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
+ comp_cycle = exynos4_read_count_64() + cycles;
exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
@@ -327,7 +352,7 @@ static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
- tmp = __raw_readl(reg_base + offset);
+ tmp = readl_relaxed(reg_base + offset);
if (tmp & mask) {
tmp &= ~mask;
exynos4_mct_write(tmp, offset);
@@ -349,7 +374,7 @@ static void exynos4_mct_tick_start(unsigned long cycles,
/* enable MCT tick interrupt */
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
- tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
+ tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
MCT_L_TCON_INTERVAL_MODE;
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
@@ -401,7 +426,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
exynos4_mct_tick_stop(mevt);
/* Clear the MCT tick interrupt */
- if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
+ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
return 1;
} else {
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
new file mode 100644
index 000000000000..32a3d25795d3
--- /dev/null
+++ b/drivers/clocksource/mtk_timer.c
@@ -0,0 +1,261 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define GPT_IRQ_EN_REG 0x00
+#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
+#define GPT_IRQ_ACK_REG 0x08
+#define GPT_IRQ_ACK(val) BIT((val) - 1)
+
+#define TIMER_CTRL_REG(val) (0x10 * (val))
+#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
+#define TIMER_CTRL_OP_ONESHOT (0)
+#define TIMER_CTRL_OP_REPEAT (1)
+#define TIMER_CTRL_OP_FREERUN (3)
+#define TIMER_CTRL_CLEAR (2)
+#define TIMER_CTRL_ENABLE (1)
+#define TIMER_CTRL_DISABLE (0)
+
+#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
+#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
+#define TIMER_CLK_SRC_SYS13M (0)
+#define TIMER_CLK_SRC_RTC32K (1)
+#define TIMER_CLK_DIV1 (0x0)
+#define TIMER_CLK_DIV2 (0x1)
+
+#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
+#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
+
+#define GPT_CLK_EVT 1
+#define GPT_CLK_SRC 2
+
+struct mtk_clock_event_device {
+ void __iomem *gpt_base;
+ u32 ticks_per_jiffy;
+ struct clock_event_device dev;
+};
+
+static inline struct mtk_clock_event_device *to_mtk_clk(
+ struct clock_event_device *c)
+{
+ return container_of(c, struct mtk_clock_event_device, dev);
+}
+
+static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
+{
+ u32 val;
+
+ val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
+ writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
+ TIMER_CTRL_REG(timer));
+}
+
+static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
+ unsigned long delay, u8 timer)
+{
+ writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
+}
+
+static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
+ bool periodic, u8 timer)
+{
+ u32 val;
+
+ /* Acknowledge interrupt */
+ writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
+
+ val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
+
+ /* Clear 2 bit timer operation mode field */
+ val &= ~TIMER_CTRL_OP(0x3);
+
+ if (periodic)
+ val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
+ else
+ val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
+
+ writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
+ evt->gpt_base + TIMER_CTRL_REG(timer));
+}
+
+static void mtk_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ struct mtk_clock_event_device *evt = to_mtk_clk(clk);
+
+ mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
+ mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Timer is enabled in set_next_event */
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ /* No more interrupts will occur as source is disabled */
+ break;
+ }
+}
+
+static int mtk_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct mtk_clock_event_device *evt = to_mtk_clk(clk);
+
+ mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
+ mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
+ mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
+
+ return 0;
+}
+
+static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
+{
+ struct mtk_clock_event_device *evt = dev_id;
+
+ /* Acknowledge timer0 irq */
+ writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
+ evt->dev.event_handler(&evt->dev);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
+{
+ /* Disable all interrupts */
+ writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
+ /* Acknowledge all interrupts */
+ writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
+}
+
+static void
+mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
+{
+ writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
+ evt->gpt_base + TIMER_CTRL_REG(timer));
+
+ writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
+ evt->gpt_base + TIMER_CLK_REG(timer));
+
+ writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
+
+ writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
+ evt->gpt_base + TIMER_CTRL_REG(timer));
+}
+
+static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
+{
+ u32 val;
+
+ val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
+ writel(val | GPT_IRQ_ENABLE(timer),
+ evt->gpt_base + GPT_IRQ_EN_REG);
+}
+
+static void __init mtk_timer_init(struct device_node *node)
+{
+ struct mtk_clock_event_device *evt;
+ struct resource res;
+ unsigned long rate = 0;
+ struct clk *clk;
+
+ evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt) {
+ pr_warn("Can't allocate mtk clock event driver struct");
+ return;
+ }
+
+ evt->dev.name = "mtk_tick";
+ evt->dev.rating = 300;
+ evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->dev.set_mode = mtk_clkevt_mode;
+ evt->dev.set_next_event = mtk_clkevt_next_event;
+ evt->dev.cpumask = cpu_possible_mask;
+
+ evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
+ if (IS_ERR(evt->gpt_base)) {
+ pr_warn("Can't get resource\n");
+ return;
+ }
+
+ evt->dev.irq = irq_of_parse_and_map(node, 0);
+ if (evt->dev.irq <= 0) {
+ pr_warn("Can't parse IRQ");
+ goto err_mem;
+ }
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_warn("Can't get timer clock");
+ goto err_irq;
+ }
+
+ if (clk_prepare_enable(clk)) {
+ pr_warn("Can't prepare clock");
+ goto err_clk_put;
+ }
+ rate = clk_get_rate(clk);
+
+ if (request_irq(evt->dev.irq, mtk_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
+ pr_warn("failed to setup irq %d\n", evt->dev.irq);
+ goto err_clk_disable;
+ }
+
+ evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+ mtk_timer_global_reset(evt);
+
+ /* Configure clock source */
+ mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
+ clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
+ node->name, rate, 300, 32, clocksource_mmio_readl_up);
+
+ /* Configure clock event */
+ mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
+ mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
+ clockevents_config_and_register(&evt->dev, rate, 0x3,
+ 0xffffffff);
+ return;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_clk_put:
+ clk_put(clk);
+err_irq:
+ irq_dispose_mapping(evt->dev.irq);
+err_mem:
+ iounmap(evt->gpt_base);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+}
+CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
new file mode 100644
index 000000000000..941f3f344e08
--- /dev/null
+++ b/drivers/clocksource/pxa_timer.c
@@ -0,0 +1,227 @@
+/*
+ * arch/arm/mach-pxa/time.c
+ *
+ * PXA clocksource, clockevents, and OST interrupt handlers.
+ * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>.
+ *
+ * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
+ * by MontaVista Software, Inc. (Nico, your code rocks!)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <asm/div64.h>
+
+#define OSMR0 0x00 /* OS Timer 0 Match Register */
+#define OSMR1 0x04 /* OS Timer 1 Match Register */
+#define OSMR2 0x08 /* OS Timer 2 Match Register */
+#define OSMR3 0x0C /* OS Timer 3 Match Register */
+
+#define OSCR 0x10 /* OS Timer Counter Register */
+#define OSSR 0x14 /* OS Timer Status Register */
+#define OWER 0x18 /* OS Timer Watchdog Enable Register */
+#define OIER 0x1C /* OS Timer Interrupt Enable Register */
+
+#define OSSR_M3 (1 << 3) /* Match status channel 3 */
+#define OSSR_M2 (1 << 2) /* Match status channel 2 */
+#define OSSR_M1 (1 << 1) /* Match status channel 1 */
+#define OSSR_M0 (1 << 0) /* Match status channel 0 */
+
+#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
+
+/*
+ * This is PXA's sched_clock implementation. This has a resolution
+ * of at least 308 ns and a maximum value of 208 days.
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 582 seconds between successive
+ * calls to sched_clock() which should always be the case in practice.
+ */
+
+#define timer_readl(reg) readl_relaxed(timer_base + (reg))
+#define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg))
+
+static void __iomem *timer_base;
+
+static u64 notrace pxa_read_sched_clock(void)
+{
+ return timer_readl(OSCR);
+}
+
+
+#define MIN_OSCR_DELTA 16
+
+static irqreturn_t
+pxa_ost0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *c = dev_id;
+
+ /* Disarm the compare/match, signal the event. */
+ timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+ timer_writel(OSSR_M0, OSSR);
+ c->event_handler(c);
+
+ return IRQ_HANDLED;
+}
+
+static int
+pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
+{
+ unsigned long next, oscr;
+
+ timer_writel(timer_readl(OIER) | OIER_E0, OIER);
+ next = timer_readl(OSCR) + delta;
+ timer_writel(next, OSMR0);
+ oscr = timer_readl(OSCR);
+
+ return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
+}
+
+static void
+pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+ timer_writel(OSSR_M0, OSSR);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ /* initializing, released, or preparing for suspend */
+ timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+ timer_writel(OSSR_M0, OSSR);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ }
+}
+
+#ifdef CONFIG_PM
+static unsigned long osmr[4], oier, oscr;
+
+static void pxa_timer_suspend(struct clock_event_device *cedev)
+{
+ osmr[0] = timer_readl(OSMR0);
+ osmr[1] = timer_readl(OSMR1);
+ osmr[2] = timer_readl(OSMR2);
+ osmr[3] = timer_readl(OSMR3);
+ oier = timer_readl(OIER);
+ oscr = timer_readl(OSCR);
+}
+
+static void pxa_timer_resume(struct clock_event_device *cedev)
+{
+ /*
+ * Ensure that we have at least MIN_OSCR_DELTA between match
+ * register 0 and the OSCR, to guarantee that we will receive
+ * the one-shot timer interrupt. We adjust OSMR0 in preference
+ * to OSCR to guarantee that OSCR is monotonically incrementing.
+ */
+ if (osmr[0] - oscr < MIN_OSCR_DELTA)
+ osmr[0] += MIN_OSCR_DELTA;
+
+ timer_writel(osmr[0], OSMR0);
+ timer_writel(osmr[1], OSMR1);
+ timer_writel(osmr[2], OSMR2);
+ timer_writel(osmr[3], OSMR3);
+ timer_writel(oier, OIER);
+ timer_writel(oscr, OSCR);
+}
+#else
+#define pxa_timer_suspend NULL
+#define pxa_timer_resume NULL
+#endif
+
+static struct clock_event_device ckevt_pxa_osmr0 = {
+ .name = "osmr0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = pxa_osmr0_set_next_event,
+ .set_mode = pxa_osmr0_set_mode,
+ .suspend = pxa_timer_suspend,
+ .resume = pxa_timer_resume,
+};
+
+static struct irqaction pxa_ost0_irq = {
+ .name = "ost0",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = pxa_ost0_interrupt,
+ .dev_id = &ckevt_pxa_osmr0,
+};
+
+static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+{
+ timer_writel(0, OIER);
+ timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
+
+ sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
+
+ ckevt_pxa_osmr0.cpumask = cpumask_of(0);
+
+ setup_irq(irq, &pxa_ost0_irq);
+
+ clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+ 32, clocksource_mmio_readl_up);
+ clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
+ MIN_OSCR_DELTA * 2, 0x7fffffff);
+}
+
+static void __init pxa_timer_dt_init(struct device_node *np)
+{
+ struct clk *clk;
+ int irq;
+
+ /* timer registers are shared with watchdog timer */
+ timer_base = of_iomap(np, 0);
+ if (!timer_base)
+ panic("%s: unable to map resource\n", np->name);
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_crit("%s: unable to get clk\n", np->name);
+ return;
+ }
+ clk_prepare_enable(clk);
+
+ /* we are only interested in OS-timer0 irq */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
+ return;
+ }
+
+ pxa_timer_common_init(irq, clk_get_rate(clk));
+}
+CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
+
+/*
+ * Legacy timer init for non device-tree boards.
+ */
+void __init pxa_timer_nodt_init(int irq, void __iomem *base,
+ unsigned long clock_tick_rate)
+{
+ struct clk *clk;
+
+ timer_base = base;
+ clk = clk_get(NULL, "OSTIMER0");
+ if (clk && !IS_ERR(clk))
+ clk_prepare_enable(clk);
+ else
+ pr_crit("%s: unable to get clk\n", __func__);
+
+ pxa_timer_common_init(irq, clock_tick_rate);
+}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dfa780396b91..2bd13b53b727 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,6 +24,7 @@
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -114,14 +115,15 @@ struct sh_cmt_device {
struct platform_device *pdev;
const struct sh_cmt_info *info;
- bool legacy;
- void __iomem *mapbase_ch;
void __iomem *mapbase;
struct clk *clk;
+ raw_spinlock_t lock; /* Protect the shared start/stop register */
+
struct sh_cmt_channel *channels;
unsigned int num_channels;
+ unsigned int hw_channels;
bool has_clockevent;
bool has_clocksource;
@@ -301,14 +303,12 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
return v2;
}
-static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
-
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- raw_spin_lock_irqsave(&sh_cmt_lock, flags);
+ raw_spin_lock_irqsave(&ch->cmt->lock, flags);
value = sh_cmt_read_cmstr(ch);
if (start)
@@ -317,7 +317,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
value &= ~(1 << ch->timer_bit);
sh_cmt_write_cmstr(ch, value);
- raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
+ raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
@@ -792,7 +792,7 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
int irq;
int ret;
- irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index);
+ irq = platform_get_irq(ch->cmt->pdev, ch->index);
if (irq < 0) {
dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
ch->index);
@@ -863,33 +863,26 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
* Compute the address of the channel control register block. For the
* timers with a per-channel start/stop register, compute its address
* as well.
- *
- * For legacy configuration the address has been mapped explicitly.
*/
- if (cmt->legacy) {
- ch->ioctrl = cmt->mapbase_ch;
- } else {
- switch (cmt->info->model) {
- case SH_CMT_16BIT:
- ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
- break;
- case SH_CMT_32BIT:
- case SH_CMT_48BIT:
- ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
- break;
- case SH_CMT_32BIT_FAST:
- /*
- * The 32-bit "fast" timer has a single channel at hwidx
- * 5 but is located at offset 0x40 instead of 0x60 for
- * some reason.
- */