summaryrefslogtreecommitdiffstats
path: root/arch/sh64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh64/kernel
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sh64/kernel')
-rw-r--r--arch/sh64/kernel/Makefile36
-rw-r--r--arch/sh64/kernel/alphanum.c45
-rw-r--r--arch/sh64/kernel/asm-offsets.c33
-rw-r--r--arch/sh64/kernel/dma.c297
-rw-r--r--arch/sh64/kernel/early_printk.c105
-rw-r--r--arch/sh64/kernel/entry.S2103
-rw-r--r--arch/sh64/kernel/fpu.c170
-rw-r--r--arch/sh64/kernel/head.S373
-rw-r--r--arch/sh64/kernel/init_task.c46
-rw-r--r--arch/sh64/kernel/irq.c116
-rw-r--r--arch/sh64/kernel/irq_intc.c272
-rw-r--r--arch/sh64/kernel/led.c41
-rw-r--r--arch/sh64/kernel/module.c161
-rw-r--r--arch/sh64/kernel/pci-dma.c50
-rw-r--r--arch/sh64/kernel/pci_sh5.c541
-rw-r--r--arch/sh64/kernel/pci_sh5.h107
-rw-r--r--arch/sh64/kernel/pcibios.c168
-rw-r--r--arch/sh64/kernel/process.c962
-rw-r--r--arch/sh64/kernel/ptrace.c376
-rw-r--r--arch/sh64/kernel/semaphore.c140
-rw-r--r--arch/sh64/kernel/setup.c385
-rw-r--r--arch/sh64/kernel/sh_ksyms.c89
-rw-r--r--arch/sh64/kernel/signal.c727
-rw-r--r--arch/sh64/kernel/switchto.S198
-rw-r--r--arch/sh64/kernel/sys_sh64.c300
-rw-r--r--arch/sh64/kernel/syscalls.S345
-rw-r--r--arch/sh64/kernel/time.c610
-rw-r--r--arch/sh64/kernel/traps.c961
-rw-r--r--arch/sh64/kernel/unwind.c326
-rw-r--r--arch/sh64/kernel/vmlinux.lds.S181
30 files changed, 10264 insertions, 0 deletions
diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile
new file mode 100644
index 000000000000..5816657c079c
--- /dev/null
+++ b/arch/sh64/kernel/Makefile
@@ -0,0 +1,36 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2000, 2001 Paolo Alberelli
+# Copyright (C) 2003 Paul Mundt
+#
+# Makefile for the Linux sh64 kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+
+extra-y := head.o init_task.o vmlinux.lds
+
+obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \
+ ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
+ switchto.o syscalls.o
+
+obj-$(CONFIG_HEARTBEAT) += led.o
+obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o
+obj-$(CONFIG_SH_DMA) += dma.o
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_KALLSYMS) += unwind.o
+obj-$(CONFIG_PCI) += pci-dma.o pcibios.o
+obj-$(CONFIG_MODULES) += module.o
+
+ifeq ($(CONFIG_PCI),y)
+obj-$(CONFIG_CPU_SH5) += pci_sh5.o
+endif
+
+USE_STANDARD_AS_RULE := true
+
diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c
new file mode 100644
index 000000000000..56d6f9f71524
--- /dev/null
+++ b/arch/sh64/kernel/alphanum.c
@@ -0,0 +1,45 @@
+/*
+ * arch/sh64/kernel/alpanum.c
+ *
+ * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Machine-independent functions for handling 8-digit alphanumeric display
+ * (e.g. Agilent HDSP-253x)
+ */
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+
+void mach_alphanum(int pos, unsigned char val);
+void mach_led(int pos, int val);
+
+void print_seg(char *file, int line)
+{
+ int i;
+ unsigned int nibble;
+
+ for (i = 0; i < 5; i++) {
+ mach_alphanum(i, file[i]);
+ }
+
+ for (i = 0; i < 3; i++) {
+ nibble = ((line >> (i * 4)) & 0xf);
+ mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
+ }
+}
+
+void print_seg_num(unsigned num)
+{
+ int i;
+ unsigned int nibble;
+
+ for (i = 0; i < 8; i++) {
+ nibble = ((num >> (i * 4)) & 0xf);
+
+ mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
+ }
+}
+
diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c
new file mode 100644
index 000000000000..ca76537c16c0
--- /dev/null
+++ b/arch/sh64/kernel/asm-offsets.c
@@ -0,0 +1,33 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <asm/thread_info.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* offsets into the thread_info struct */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+
+ return 0;
+}
diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c
new file mode 100644
index 000000000000..09cd9f4670b5
--- /dev/null
+++ b/arch/sh64/kernel/dma.c
@@ -0,0 +1,297 @@
+/*
+ * arch/sh64/kernel/dma.c
+ *
+ * DMA routines for the SH-5 DMAC.
+ *
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <asm/hardware.h>
+#include <asm/dma.h>
+#include <asm/signal.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+typedef struct {
+ unsigned long dev_addr;
+ unsigned long mem_addr;
+
+ unsigned int mode;
+ unsigned int count;
+} dma_info_t;
+
+static dma_info_t dma_info[MAX_DMA_CHANNELS];
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+/* arch/sh64/kernel/irq_intc.c */
+extern void make_intc_irq(unsigned int irq);
+
+/* DMAC Interrupts */
+#define DMA_IRQ_DMTE0 18
+#define DMA_IRQ_DERR 22
+
+#define DMAC_COMMON_BASE (dmac_base + 0x08)
+#define DMAC_SAR_BASE (dmac_base + 0x10)
+#define DMAC_DAR_BASE (dmac_base + 0x18)
+#define DMAC_COUNT_BASE (dmac_base + 0x20)
+#define DMAC_CTRL_BASE (dmac_base + 0x28)
+#define DMAC_STATUS_BASE (dmac_base + 0x30)
+
+#define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28))
+#define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28))
+#define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28))
+#define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28))
+#define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28))
+
+/* DMAC.COMMON Bit Definitions */
+#define DMAC_COMMON_PR 0x00000001 /* Priority */
+ /* Bits 1-2 Reserved */
+#define DMAC_COMMON_ME 0x00000008 /* Master Enable */
+#define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */
+ /* Bits 5-6 Reserved */
+#define DMAC_COMMON_ER 0x00000780 /* Error Response */
+#define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */
+ /* Bits 15-63 Reserved */
+
+/* DMAC.SAR Bit Definitions */
+#define DMAC_SAR_ADDR 0xffffffff /* Source Address */
+
+/* DMAC.DAR Bit Definitions */
+#define DMAC_DAR_ADDR 0xffffffff /* Destination Address */
+
+/* DMAC.COUNT Bit Definitions */
+#define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */
+
+/* DMAC.CTRL Bit Definitions */
+#define DMAC_CTRL_TS 0x00000007 /* Transfer Size */
+#define DMAC_CTRL_SI 0x00000018 /* Source Increment */
+#define DMAC_CTRL_DI 0x00000060 /* Destination Increment */
+#define DMAC_CTRL_RS 0x00000780 /* Resource Select */
+#define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */
+#define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */
+ /* Bits 15-63 Reserved */
+
+/* DMAC.STATUS Bit Definitions */
+#define DMAC_STATUS_TE 0x00000001 /* Transfer End */
+#define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */
+ /* Bits 2-63 Reserved */
+
+static unsigned long dmac_base;
+
+void set_dma_count(unsigned int chan, unsigned int count);
+void set_dma_addr(unsigned int chan, unsigned int addr);
+
+static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int chan = irq - DMA_IRQ_DMTE0;
+ dma_info_t *info = dma_info + chan;
+ u64 status;
+
+ if (info->mode & DMA_MODE_WRITE) {
+ sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
+ } else {
+ sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
+ }
+
+ set_dma_count(chan, info->count);
+
+ /* Clear the TE bit */
+ status = sh64_in64(DMAC_STATUS(chan));
+ status &= ~DMAC_STATUS_TE;
+ sh64_out64(status, DMAC_STATUS(chan));
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_dmte = {
+ .handler = dma_mte,
+ .flags = SA_INTERRUPT,
+ .name = "DMA MTE",
+};
+
+static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u64 tmp;
+ u8 chan;
+
+ printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
+
+ tmp = sh64_in64(DMAC_COMMON_BASE);
+
+ /* Check for the type of error */
+ if ((chan = tmp & DMAC_COMMON_AAE)) {
+ /* It's an address alignment error.. */
+ printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
+
+ printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
+ (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
+ (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
+ (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
+
+ } else if ((chan = tmp & DMAC_COMMON_ER)) {
+ /* Something else went wrong.. */
+ printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
+ }
+
+ /* Reset the ME bit to clear the interrupt */
+ tmp |= DMAC_COMMON_ME;
+ sh64_out64(tmp, DMAC_COMMON_BASE);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_derr = {
+ .handler = dma_err,
+ .flags = SA_INTERRUPT,
+ .name = "DMA Error",
+};
+
+static inline unsigned long calc_xmit_shift(unsigned int chan)
+{
+ return sh64_in64(DMAC_CTRL(chan)) & 0x03;
+}
+
+void setup_dma(unsigned int chan, dma_info_t *info)
+{
+ unsigned int irq = DMA_IRQ_DMTE0 + chan;
+ dma_info_t *dma = dma_info + chan;
+
+ make_intc_irq(irq);
+ setup_irq(irq, &irq_dmte);
+ dma = info;
+}
+
+void enable_dma(unsigned int chan)
+{
+ u64 ctrl;
+
+ ctrl = sh64_in64(DMAC_CTRL(chan));
+ ctrl |= DMAC_CTRL_TE;
+ sh64_out64(ctrl, DMAC_CTRL(chan));
+}
+
+void disable_dma(unsigned int chan)
+{
+ u64 ctrl;
+
+ ctrl = sh64_in64(DMAC_CTRL(chan));
+ ctrl &= ~DMAC_CTRL_TE;
+ sh64_out64(ctrl, DMAC_CTRL(chan));
+}
+
+void set_dma_mode(unsigned int chan, char mode)
+{
+ dma_info_t *info = dma_info + chan;
+
+ info->mode = mode;
+
+ set_dma_addr(chan, info->mem_addr);
+ set_dma_count(chan, info->count);
+}
+
+void set_dma_addr(unsigned int chan, unsigned int addr)
+{
+ dma_info_t *info = dma_info + chan;
+ unsigned long sar, dar;
+
+ info->mem_addr = addr;
+ sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
+ dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
+
+ sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
+ sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
+}
+
+void set_dma_count(unsigned int chan, unsigned int count)
+{
+ dma_info_t *info = dma_info + chan;
+ u64 tmp;
+
+ info->count = count;
+
+ tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
+
+ sh64_out64(tmp, DMAC_COUNT(chan));
+}
+
+unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma_spin_lock, flags);
+
+ return flags;
+}
+
+void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+int get_dma_residue(unsigned int chan)
+{
+ return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
+}
+
+int __init init_dma(void)
+{
+ struct vcr_info vcr;
+ u64 tmp;
+
+ /* Remap the DMAC */
+ dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
+ if (!dmac_base) {
+ printk(KERN_ERR "Unable to remap DMAC\n");
+ return -ENOMEM;
+ }
+
+ /* Report DMAC.VCR Info */
+ vcr = sh64_get_vcr_info(dmac_base);
+ printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
+ vcr.mod_id, vcr.mod_vers);
+
+ /* Set the ME bit */
+ tmp = sh64_in64(DMAC_COMMON_BASE);
+ tmp |= DMAC_COMMON_ME;
+ sh64_out64(tmp, DMAC_COMMON_BASE);
+
+ /* Enable the DMAC Error Interrupt */
+ make_intc_irq(DMA_IRQ_DERR);
+ setup_irq(DMA_IRQ_DERR, &irq_derr);
+
+ return 0;
+}
+
+static void __exit exit_dma(void)
+{
+ onchip_unmap(dmac_base);
+ free_irq(DMA_IRQ_DERR, 0);
+}
+
+module_init(init_dma);
+module_exit(exit_dma);
+
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(setup_dma);
+EXPORT_SYMBOL(claim_dma_lock);
+EXPORT_SYMBOL(release_dma_lock);
+EXPORT_SYMBOL(enable_dma);
+EXPORT_SYMBOL(disable_dma);
+EXPORT_SYMBOL(set_dma_mode);
+EXPORT_SYMBOL(set_dma_addr);
+EXPORT_SYMBOL(set_dma_count);
+EXPORT_SYMBOL(get_dma_residue);
+
diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c
new file mode 100644
index 000000000000..8c8a76e180aa
--- /dev/null
+++ b/arch/sh64/kernel/early_printk.c
@@ -0,0 +1,105 @@
+/*
+ * arch/sh64/kernel/early_printk.c
+ *
+ * SH-5 Early SCIF console (cloned and hacked from sh implementation)
+ *
+ * Copyright (C) 2003, 2004 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (C) 2002 M. R. Brown <mrbrown@0xd6.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#define SCIF_BASE_ADDR 0x01030000
+#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
+
+/*
+ * Fixed virtual address where SCIF is mapped (should already be done
+ * in arch/sh64/kernel/head.S!).
+ */
+#define SCIF_REG 0xfa030000
+
+enum {
+ SCIF_SCSMR2 = SCIF_REG + 0x00,
+ SCIF_SCBRR2 = SCIF_REG + 0x04,
+ SCIF_SCSCR2 = SCIF_REG + 0x08,
+ SCIF_SCFTDR2 = SCIF_REG + 0x0c,
+ SCIF_SCFSR2 = SCIF_REG + 0x10,
+ SCIF_SCFRDR2 = SCIF_REG + 0x14,
+ SCIF_SCFCR2 = SCIF_REG + 0x18,
+ SCIF_SCFDR2 = SCIF_REG + 0x1c,
+ SCIF_SCSPTR2 = SCIF_REG + 0x20,
+ SCIF_SCLSR2 = SCIF_REG + 0x24,
+};
+
+static void sh_console_putc(int c)
+{
+ while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
+ cpu_relax();
+
+ ctrl_outb(c, SCIF_SCFTDR2);
+ ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
+
+ if (c == '\n')
+ sh_console_putc('\r');
+}
+
+static void sh_console_flush(void)
+{
+ ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
+
+ while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
+ cpu_relax();
+
+ ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
+}
+
+static void sh_console_write(struct console *con, const char *s, unsigned count)
+{
+ while (count-- > 0)
+ sh_console_putc(*s++);
+
+ sh_console_flush();
+}
+
+static int __init sh_console_setup(struct console *con, char *options)
+{
+ con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
+
+ return 0;
+}
+
+static struct console sh_console = {
+ .name = "scifcon",
+ .write = sh_console_write,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void __init enable_early_printk(void)
+{
+ ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */
+
+ ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */
+ ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */
+
+ ctrl_outw(0, SCIF_SCSPTR2);
+ ctrl_outw(0x60, SCIF_SCFSR2);
+ ctrl_outw(0, SCIF_SCLSR2);
+ ctrl_outw(0x30, SCIF_SCSCR2);
+
+ register_console(&sh_console);
+}
+
+void disable_early_printk(void)
+{
+ unregister_console(&sh_console);
+}
+
diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
new file mode 100644
index 000000000000..2e2cfe20b426
--- /dev/null
+++ b/arch/sh64/kernel/entry.S
@@ -0,0 +1,2103 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * arch/sh64/kernel/entry.S
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2004, 2005 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sys.h>
+
+#include <asm/processor.h>
+#include <asm/registers.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * SR fields.
+ */
+#define SR_ASID_MASK 0x00ff0000
+#define SR_FD_MASK 0x00008000
+#define SR_SS 0x08000000
+#define SR_BL 0x10000000
+#define SR_MD 0x40000000
+
+/*
+ * Event code.
+ */
+#define EVENT_INTERRUPT 0
+#define EVENT_FAULT_TLB 1
+#define EVENT_FAULT_NOT_TLB 2
+#define EVENT_DEBUG 3
+
+/* EXPEVT values */
+#define RESET_CAUSE 0x20
+#define DEBUGSS_CAUSE 0x980
+
+/*
+ * Frame layout. Quad index.
+ */
+#define FRAME_T(x) FRAME_TBASE+(x*8)
+#define FRAME_R(x) FRAME_RBASE+(x*8)
+#define FRAME_S(x) FRAME_SBASE+(x*8)
+#define FSPC 0
+#define FSSR 1
+#define FSYSCALL_ID 2
+
+/* Arrange the save frame to be a multiple of 32 bytes long */
+#define FRAME_SBASE 0
+#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
+#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
+#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
+#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
+
+#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
+#define FP_FRAME_BASE 0
+
+#define SAVED_R2 0*8
+#define SAVED_R3 1*8
+#define SAVED_R4 2*8
+#define SAVED_R5 3*8
+#define SAVED_R18 4*8
+#define SAVED_R6 5*8
+#define SAVED_TR0 6*8
+
+/* These are the registers saved in the TLB path that aren't saved in the first
+ level of the normal one. */
+#define TLB_SAVED_R25 7*8
+#define TLB_SAVED_TR1 8*8
+#define TLB_SAVED_TR2 9*8
+#define TLB_SAVED_TR3 10*8
+#define TLB_SAVED_TR4 11*8
+/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
+ breakage otherwise. */
+#define TLB_SAVED_R0 12*8
+#define TLB_SAVED_R1 13*8
+
+#define CLI() \
+ getcon SR, r6; \
+ ori r6, 0xf0, r6; \
+ putcon r6, SR;
+
+#define STI() \
+ getcon SR, r6; \
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop() CLI()
+#else
+# define preempt_stop()
+# define resume_kernel restore_all
+#endif
+
+ .section .data, "aw"
+
+#define FAST_TLBMISS_STACK_CACHELINES 4
+#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
+
+/* Register back-up area for all exceptions */
+ .balign 32
+ /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
+ * register saves etc. */
+ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
+/* This is 32 byte aligned by construction */
+/* Register back-up area for all exceptions */
+reg_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .quad 0
+ .quad 0
+
+/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
+ * reentrancy. Note this area may be accessed via physical address.
+ * Align so this fits a whole single cache line, for ease of purging.
+ */
+ .balign 32,0,32
+resvec_save_area:
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .balign 32,0,32
+
+/* Jump table of 3rd level handlers */
+trap_jtable:
+ .long do_exception_error /* 0x000 */
+ .long do_exception_error /* 0x020 */
+ .long tlb_miss_load /* 0x040 */
+ .long tlb_miss_store /* 0x060 */
+ ! ARTIFICIAL pseudo-EXPEVT setting
+ .long do_debug_interrupt /* 0x080 */
+ .long tlb_miss_load /* 0x0A0 */
+ .long tlb_miss_store /* 0x0C0 */
+ .long do_address_error_load /* 0x0E0 */
+ .long do_address_error_store /* 0x100 */
+#ifdef CONFIG_SH_FPU
+ .long do_fpu_error /* 0x120 */
+#else
+ .long do_exception_error /* 0x120 */
+#endif
+ .long do_exception_error /* 0x140 */
+ .long system_call /* 0x160 */
+ .long do_reserved_inst /* 0x180 */
+ .long do_illegal_slot_inst /* 0x1A0 */
+ .long do_NMI /* 0x1C0 */
+ .long do_exception_error /* 0x1E0 */
+ .rept 15
+ .long do_IRQ /* 0x200 - 0x3C0 */
+ .endr
+ .long do_exception_error /* 0x3E0 */
+ .rept 32
+ .long do_IRQ /* 0x400 - 0x7E0 */
+ .endr
+ .long fpu_error_or_IRQA /* 0x800 */
+ .long fpu_error_or_IRQB /* 0x820 */
+ .long do_IRQ /* 0x840 */
+ .long do_IRQ /* 0x860 */
+ .rept 6
+ .long do_exception_error /* 0x880 - 0x920 */
+ .endr
+ .long do_software_break_point /* 0x940 */
+ .long do_exception_error /* 0x960 */
+ .long do_single_step /* 0x980 */
+
+ .rept 3
+ .long do_exception_error /* 0x9A0 - 0x9E0 */
+ .endr
+ .long do_IRQ /* 0xA00 */
+ .long do_IRQ /* 0xA20 */
+ .long itlb_miss_or_IRQ /* 0xA40 */
+ .long do_IRQ /* 0xA60 */
+ .long do_IRQ /* 0xA80 */
+ .long itlb_miss_or_IRQ /* 0xAA0 */
+ .long do_exception_error /* 0xAC0 */
+ .long do_address_error_exec /* 0xAE0 */
+ .rept 8
+ .long do_exception_error /* 0xB00 - 0xBE0 */
+ .endr
+ .rept 18
+ .long do_IRQ /* 0xC00 - 0xE20 */
+ .endr
+
+ .section .text64, "ax"
+
+/*
+ * --- Exception/Interrupt/Event Handling Section
+ */
+
+/*
+ * VBR and RESVEC blocks.
+ *
+ * First level handler for VBR-based exceptions.
+ *
+ * To avoid waste of space, align to the maximum text block size.
+ * This is assumed to be at most 128 bytes or 32 instructions.
+ * DO NOT EXCEED 32 instructions on the first level handlers !
+ *
+ * Also note that RESVEC is contained within the VBR block
+ * where the room left (1KB - TEXT_SIZE) allows placing
+ * the RESVEC block (at most 512B + TEXT_SIZE).
+ *
+ * So first (and only) level handler for RESVEC-based exceptions.
+ *
+ * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
+ * and interrupt) we are a lot tight with register space until
+ * saving onto the stack frame, which is done in handle_exception().
+ *
+ */
+
+#define TEXT_SIZE 128
+#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
+
+ .balign TEXT_SIZE
+LVBR_block:
+ .space 256, 0 /* Power-on class handler, */
+ /* not required here */
+not_a_tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for Non-debug, Not a TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_NOT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+ .balign 256
+ ! VBR+0x200
+ nop
+ .balign 256
+ ! VBR+0x300
+ nop
+ .balign 256
+ /*
+ * Instead of the natural .balign 1024 place RESVEC here
+ * respecting the final 1KB alignment.
+ */
+ .balign TEXT_SIZE
+ /*
+ * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
+ * block making sure the final alignment is correct.
+ */
+tlb_miss:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ putcon SP, KCR1
+ movi reg_save_area, SP
+ /* SP is guaranteed 32-byte aligned. */
+ st.q SP, TLB_SAVED_R0 , r0
+ st.q SP, TLB_SAVED_R1 , r1
+ st.q SP, SAVED_R2 , r2
+ st.q SP, SAVED_R3 , r3
+ st.q SP, SAVED_R4 , r4
+ st.q SP, SAVED_R5 , r5
+ st.q SP, SAVED_R6 , r6
+ st.q SP, SAVED_R18, r18
+
+ /* Save R25 for safety; as/ld may want to use it to achieve the call to
+ * the code in mm/tlbmiss.c */
+ st.q SP, TLB_SAVED_R25, r25
+ gettr tr0, r2
+ gettr tr1, r3
+ gettr tr2, r4
+ gettr tr3, r5
+ gettr tr4, r18
+ st.q SP, SAVED_TR0 , r2
+ st.q SP, TLB_SAVED_TR1 , r3
+ st.q SP, TLB_SAVED_TR2 , r4
+ st.q SP, TLB_SAVED_TR3 , r5
+ st.q SP, TLB_SAVED_TR4 , r18
+
+ pt do_fast_page_fault, tr0
+ getcon SSR, r2
+ getcon EXPEVT, r3
+ getcon TEA, r4
+ shlri r2, 30, r2
+ andi r2, 1, r2 /* r2 = SSR.MD */
+ blink tr0, LINK
+
+ pt fixup_to_invoke_general_handler, tr1
+
+ /* If the fast path handler fixed the fault, just drop through quickly
+ to the restore code right away to return to the excepting context.
+ */
+ beqi/u r2, 0, tr1
+
+fast_tlb_miss_restore:
+ ld.q SP, SAVED_TR0, r2
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+
+ ptabs r2, tr0
+ ptabs r3, tr1
+ ptabs r4, tr2
+ ptabs r5, tr3
+ ptabs r18, tr4
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+ ld.q SP, SAVED_R2, r2
+ ld.q SP, SAVED_R3, r3
+ ld.q SP, SAVED_R4, r4
+ ld.q SP, SAVED_R5, r5
+ ld.q SP, SAVED_R6, r6
+ ld.q SP, SAVED_R18, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ getcon KCR1, SP
+ rte
+ nop /* for safety, in case the code is run on sh5-101 cut1.x */
+
+fixup_to_invoke_general_handler:
+
+ /* OK, new method. Restore stuff that's not expected to get saved into
+ the 'first-level' reg save area, then just fall through to setting
+ up the registers and calling the second-level handler. */
+
+ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
+ r25,tr1-4 and save r6 to get into the right state. */
+
+ ld.q SP, TLB_SAVED_TR1, r3
+ ld.q SP, TLB_SAVED_TR2, r4
+ ld.q SP, TLB_SAVED_TR3, r5
+ ld.q SP, TLB_SAVED_TR4, r18
+ ld.q SP, TLB_SAVED_R25, r25
+
+ ld.q SP, TLB_SAVED_R0, r0
+ ld.q SP, TLB_SAVED_R1, r1
+
+ ptabs/u r3, tr1
+ ptabs/u r4, tr2
+ ptabs/u r5, tr3
+ ptabs/u r18, tr4
+
+ /* Set args for Non-debug, TLB miss class handler */
+ getcon EXPEVT, r2
+ movi ret_from_exception, r3
+ ori r3, 1, r3
+ movi EVENT_FAULT_TLB, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+
+/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
+ DOES END UP AT VBR+0x600 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .balign 256
+ /* VBR + 0x600 */
+
+interrupt:
+ synco /* TAKum03020 (but probably a good idea anyway.) */
+ /* Save original stack pointer into KCR1 */
+ putcon SP, KCR1
+
+ /* Save other original registers into reg_save_area */
+ movi reg_save_area, SP
+ st.q SP, SAVED_R2, r2
+ st.q SP, SAVED_R3, r3
+ st.q SP, SAVED_R4, r4
+ st.q SP, SAVED_R5, r5
+ st.q SP, SAVED_R6, r6
+ st.q SP, SAVED_R18, r18
+ gettr tr0, r3
+ st.q SP, SAVED_TR0, r3
+
+ /* Set args for interrupt class handler */
+ getcon INTEVT, r2
+ movi ret_from_irq, r3
+ ori r3, 1, r3
+ movi EVENT_INTERRUPT, r4
+ or SP, ZERO, r5
+ getcon KCR1, SP
+ pta handle_exception, tr0
+ blink tr0, ZERO
+ .balign TEXT_SIZE /* let's waste the bare