From 4baa9922430662431231ac637adedddbb0cfb2d7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Aug 2008 10:55:55 +0100 Subject: [ARM] move include/asm-arm to arch/arm/include/asm Move platform independent header files to arch/arm/include/asm, leaving those in asm/arch* and asm/plat* alone. Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 3 + arch/arm/include/asm/a.out-core.h | 49 ++ arch/arm/include/asm/a.out.h | 34 + arch/arm/include/asm/assembler.h | 116 +++ arch/arm/include/asm/atomic.h | 212 ++++++ arch/arm/include/asm/auxvec.h | 4 + arch/arm/include/asm/bitops.h | 340 +++++++++ arch/arm/include/asm/bug.h | 24 + arch/arm/include/asm/bugs.h | 21 + arch/arm/include/asm/byteorder.h | 58 ++ arch/arm/include/asm/cache.h | 10 + arch/arm/include/asm/cacheflush.h | 537 ++++++++++++++ arch/arm/include/asm/checksum.h | 139 ++++ arch/arm/include/asm/cnt32_to_63.h | 78 ++ arch/arm/include/asm/cpu-multi32.h | 69 ++ arch/arm/include/asm/cpu-single.h | 44 ++ arch/arm/include/asm/cpu.h | 25 + arch/arm/include/asm/cputime.h | 6 + arch/arm/include/asm/current.h | 15 + arch/arm/include/asm/delay.h | 44 ++ arch/arm/include/asm/device.h | 15 + arch/arm/include/asm/div64.h | 227 ++++++ arch/arm/include/asm/dma-mapping.h | 456 ++++++++++++ arch/arm/include/asm/dma.h | 143 ++++ arch/arm/include/asm/domain.h | 78 ++ arch/arm/include/asm/ecard.h | 219 ++++++ arch/arm/include/asm/elf.h | 116 +++ arch/arm/include/asm/emergency-restart.h | 6 + arch/arm/include/asm/errno.h | 6 + arch/arm/include/asm/fb.h | 19 + arch/arm/include/asm/fcntl.h | 11 + arch/arm/include/asm/fiq.h | 37 + arch/arm/include/asm/flat.h | 19 + arch/arm/include/asm/floppy.h | 148 ++++ arch/arm/include/asm/fpstate.h | 93 +++ arch/arm/include/asm/ftrace.h | 14 + arch/arm/include/asm/futex.h | 6 + arch/arm/include/asm/glue.h | 149 ++++ arch/arm/include/asm/gpio.h | 7 + arch/arm/include/asm/hardirq.h | 32 + arch/arm/include/asm/hardware.h | 18 + arch/arm/include/asm/hardware/arm_timer.h | 21 + arch/arm/include/asm/hardware/arm_twd.h | 21 + arch/arm/include/asm/hardware/cache-l2x0.h | 56 ++ arch/arm/include/asm/hardware/clps7111.h | 184 +++++ arch/arm/include/asm/hardware/cs89712.h | 49 ++ arch/arm/include/asm/hardware/debug-8250.S | 29 + arch/arm/include/asm/hardware/debug-pl01x.S | 29 + arch/arm/include/asm/hardware/dec21285.h | 147 ++++ arch/arm/include/asm/hardware/entry-macro-iomd.S | 139 ++++ arch/arm/include/asm/hardware/ep7211.h | 40 + arch/arm/include/asm/hardware/ep7212.h | 83 +++ arch/arm/include/asm/hardware/gic.h | 42 ++ arch/arm/include/asm/hardware/icst307.h | 38 + arch/arm/include/asm/hardware/icst525.h | 36 + arch/arm/include/asm/hardware/ioc.h | 72 ++ arch/arm/include/asm/hardware/iomd.h | 226 ++++++ arch/arm/include/asm/hardware/iop3xx-adma.h | 888 +++++++++++++++++++++++ arch/arm/include/asm/hardware/iop3xx-gpio.h | 73 ++ arch/arm/include/asm/hardware/iop3xx.h | 312 ++++++++ arch/arm/include/asm/hardware/iop_adma.h | 116 +++ arch/arm/include/asm/hardware/it8152.h | 99 +++ arch/arm/include/asm/hardware/linkup-l1110.h | 48 ++ arch/arm/include/asm/hardware/locomo.h | 217 ++++++ arch/arm/include/asm/hardware/memc.h | 26 + arch/arm/include/asm/hardware/pci_v3.h | 186 +++++ arch/arm/include/asm/hardware/sa1111.h | 581 +++++++++++++++ arch/arm/include/asm/hardware/scoop.h | 69 ++ arch/arm/include/asm/hardware/sharpsl_pm.h | 106 +++ arch/arm/include/asm/hardware/ssp.h | 28 + arch/arm/include/asm/hardware/uengine.h | 62 ++ arch/arm/include/asm/hardware/vic.h | 45 ++ arch/arm/include/asm/hw_irq.h | 9 + arch/arm/include/asm/hwcap.h | 29 + arch/arm/include/asm/ide.h | 23 + arch/arm/include/asm/io.h | 287 ++++++++ arch/arm/include/asm/ioctl.h | 1 + arch/arm/include/asm/ioctls.h | 84 +++ arch/arm/include/asm/ipcbuf.h | 29 + arch/arm/include/asm/irq.h | 28 + arch/arm/include/asm/irq_regs.h | 1 + arch/arm/include/asm/irqflags.h | 132 ++++ arch/arm/include/asm/kdebug.h | 1 + arch/arm/include/asm/kexec.h | 31 + arch/arm/include/asm/kgdb.h | 104 +++ arch/arm/include/asm/kmap_types.h | 24 + arch/arm/include/asm/kprobes.h | 79 ++ arch/arm/include/asm/leds.h | 50 ++ arch/arm/include/asm/limits.h | 11 + arch/arm/include/asm/linkage.h | 11 + arch/arm/include/asm/local.h | 1 + arch/arm/include/asm/locks.h | 274 +++++++ arch/arm/include/asm/mach/arch.h | 60 ++ arch/arm/include/asm/mach/dma.h | 57 ++ arch/arm/include/asm/mach/flash.h | 39 + arch/arm/include/asm/mach/irda.h | 20 + arch/arm/include/asm/mach/irq.h | 54 ++ arch/arm/include/asm/mach/map.h | 36 + arch/arm/include/asm/mach/mmc.h | 15 + arch/arm/include/asm/mach/pci.h | 72 ++ arch/arm/include/asm/mach/serial_at91.h | 33 + arch/arm/include/asm/mach/serial_sa1100.h | 31 + arch/arm/include/asm/mach/sharpsl_param.h | 37 + arch/arm/include/asm/mach/time.h | 57 ++ arch/arm/include/asm/mach/udc_pxa2xx.h | 29 + arch/arm/include/asm/mc146818rtc.h | 28 + arch/arm/include/asm/memory.h | 334 +++++++++ arch/arm/include/asm/mman.h | 17 + arch/arm/include/asm/mmu.h | 33 + arch/arm/include/asm/mmu_context.h | 117 +++ arch/arm/include/asm/mmzone.h | 30 + arch/arm/include/asm/module.h | 18 + arch/arm/include/asm/msgbuf.h | 31 + arch/arm/include/asm/mtd-xip.h | 26 + arch/arm/include/asm/mutex.h | 127 ++++ arch/arm/include/asm/nwflash.h | 9 + arch/arm/include/asm/page-nommu.h | 49 ++ arch/arm/include/asm/page.h | 199 +++++ arch/arm/include/asm/param.h | 31 + arch/arm/include/asm/parport.h | 18 + arch/arm/include/asm/pci.h | 91 +++ arch/arm/include/asm/percpu.h | 6 + arch/arm/include/asm/pgalloc.h | 136 ++++ arch/arm/include/asm/pgtable-hwdef.h | 90 +++ arch/arm/include/asm/pgtable-nommu.h | 118 +++ arch/arm/include/asm/pgtable.h | 401 ++++++++++ arch/arm/include/asm/poll.h | 1 + arch/arm/include/asm/posix_types.h | 77 ++ arch/arm/include/asm/proc-fns.h | 241 ++++++ arch/arm/include/asm/processor.h | 131 ++++ arch/arm/include/asm/procinfo.h | 49 ++ arch/arm/include/asm/ptrace.h | 162 +++++ arch/arm/include/asm/resource.h | 6 + arch/arm/include/asm/scatterlist.h | 27 + arch/arm/include/asm/sections.h | 1 + arch/arm/include/asm/segment.h | 11 + arch/arm/include/asm/sembuf.h | 25 + arch/arm/include/asm/serial.h | 19 + arch/arm/include/asm/setup.h | 226 ++++++ arch/arm/include/asm/shmbuf.h | 42 ++ arch/arm/include/asm/shmparam.h | 16 + arch/arm/include/asm/sigcontext.h | 34 + arch/arm/include/asm/siginfo.h | 6 + arch/arm/include/asm/signal.h | 164 +++++ arch/arm/include/asm/sizes.h | 56 ++ arch/arm/include/asm/smp.h | 147 ++++ arch/arm/include/asm/socket.h | 57 ++ arch/arm/include/asm/sockios.h | 13 + arch/arm/include/asm/sparsemem.h | 10 + arch/arm/include/asm/spinlock.h | 224 ++++++ arch/arm/include/asm/spinlock_types.h | 20 + arch/arm/include/asm/stat.h | 87 +++ arch/arm/include/asm/statfs.h | 42 ++ arch/arm/include/asm/string.h | 50 ++ arch/arm/include/asm/suspend.h | 4 + arch/arm/include/asm/system.h | 388 ++++++++++ arch/arm/include/asm/termbits.h | 197 +++++ arch/arm/include/asm/termios.h | 92 +++ arch/arm/include/asm/therm.h | 28 + arch/arm/include/asm/thread_info.h | 153 ++++ arch/arm/include/asm/thread_notify.h | 48 ++ arch/arm/include/asm/timex.h | 24 + arch/arm/include/asm/tlb.h | 94 +++ arch/arm/include/asm/tlbflush.h | 500 +++++++++++++ arch/arm/include/asm/topology.h | 6 + arch/arm/include/asm/traps.h | 29 + arch/arm/include/asm/types.h | 31 + arch/arm/include/asm/uaccess.h | 444 ++++++++++++ arch/arm/include/asm/ucontext.h | 103 +++ arch/arm/include/asm/unaligned.h | 19 + arch/arm/include/asm/unistd.h | 450 ++++++++++++ arch/arm/include/asm/user.h | 84 +++ arch/arm/include/asm/vfp.h | 84 +++ arch/arm/include/asm/vfpmacros.h | 47 ++ arch/arm/include/asm/vga.h | 12 + arch/arm/include/asm/xor.h | 141 ++++ arch/arm/kernel/head-common.S | 2 +- arch/arm/lib/getuser.S | 2 +- arch/arm/lib/putuser.S | 2 +- arch/arm/mm/ioremap.c | 2 +- arch/arm/mm/proc-arm720.S | 2 +- arch/arm/nwfpe/fpa11.h | 2 +- 182 files changed, 16301 insertions(+), 6 deletions(-) create mode 100644 arch/arm/include/asm/Kbuild create mode 100644 arch/arm/include/asm/a.out-core.h create mode 100644 arch/arm/include/asm/a.out.h create mode 100644 arch/arm/include/asm/assembler.h create mode 100644 arch/arm/include/asm/atomic.h create mode 100644 arch/arm/include/asm/auxvec.h create mode 100644 arch/arm/include/asm/bitops.h create mode 100644 arch/arm/include/asm/bug.h create mode 100644 arch/arm/include/asm/bugs.h create mode 100644 arch/arm/include/asm/byteorder.h create mode 100644 arch/arm/include/asm/cache.h create mode 100644 arch/arm/include/asm/cacheflush.h create mode 100644 arch/arm/include/asm/checksum.h create mode 100644 arch/arm/include/asm/cnt32_to_63.h create mode 100644 arch/arm/include/asm/cpu-multi32.h create mode 100644 arch/arm/include/asm/cpu-single.h create mode 100644 arch/arm/include/asm/cpu.h create mode 100644 arch/arm/include/asm/cputime.h create mode 100644 arch/arm/include/asm/current.h create mode 100644 arch/arm/include/asm/delay.h create mode 100644 arch/arm/include/asm/device.h create mode 100644 arch/arm/include/asm/div64.h create mode 100644 arch/arm/include/asm/dma-mapping.h create mode 100644 arch/arm/include/asm/dma.h create mode 100644 arch/arm/include/asm/domain.h create mode 100644 arch/arm/include/asm/ecard.h create mode 100644 arch/arm/include/asm/elf.h create mode 100644 arch/arm/include/asm/emergency-restart.h create mode 100644 arch/arm/include/asm/errno.h create mode 100644 arch/arm/include/asm/fb.h create mode 100644 arch/arm/include/asm/fcntl.h create mode 100644 arch/arm/include/asm/fiq.h create mode 100644 arch/arm/include/asm/flat.h create mode 100644 arch/arm/include/asm/floppy.h create mode 100644 arch/arm/include/asm/fpstate.h create mode 100644 arch/arm/include/asm/ftrace.h create mode 100644 arch/arm/include/asm/futex.h create mode 100644 arch/arm/include/asm/glue.h create mode 100644 arch/arm/include/asm/gpio.h create mode 100644 arch/arm/include/asm/hardirq.h create mode 100644 arch/arm/include/asm/hardware.h create mode 100644 arch/arm/include/asm/hardware/arm_timer.h create mode 100644 arch/arm/include/asm/hardware/arm_twd.h create mode 100644 arch/arm/include/asm/hardware/cache-l2x0.h create mode 100644 arch/arm/include/asm/hardware/clps7111.h create mode 100644 arch/arm/include/asm/hardware/cs89712.h create mode 100644 arch/arm/include/asm/hardware/debug-8250.S create mode 100644 arch/arm/include/asm/hardware/debug-pl01x.S create mode 100644 arch/arm/include/asm/hardware/dec21285.h create mode 100644 arch/arm/include/asm/hardware/entry-macro-iomd.S create mode 100644 arch/arm/include/asm/hardware/ep7211.h create mode 100644 arch/arm/include/asm/hardware/ep7212.h create mode 100644 arch/arm/include/asm/hardware/gic.h create mode 100644 arch/arm/include/asm/hardware/icst307.h create mode 100644 arch/arm/include/asm/hardware/icst525.h create mode 100644 arch/arm/include/asm/hardware/ioc.h create mode 100644 arch/arm/include/asm/hardware/iomd.h create mode 100644 arch/arm/include/asm/hardware/iop3xx-adma.h create mode 100644 arch/arm/include/asm/hardware/iop3xx-gpio.h create mode 100644 arch/arm/include/asm/hardware/iop3xx.h create mode 100644 arch/arm/include/asm/hardware/iop_adma.h create mode 100644 arch/arm/include/asm/hardware/it8152.h create mode 100644 arch/arm/include/asm/hardware/linkup-l1110.h create mode 100644 arch/arm/include/asm/hardware/locomo.h create mode 100644 arch/arm/include/asm/hardware/memc.h create mode 100644 arch/arm/include/asm/hardware/pci_v3.h create mode 100644 arch/arm/include/asm/hardware/sa1111.h create mode 100644 arch/arm/include/asm/hardware/scoop.h create mode 100644 arch/arm/include/asm/hardware/sharpsl_pm.h create mode 100644 arch/arm/include/asm/hardware/ssp.h create mode 100644 arch/arm/include/asm/hardware/uengine.h create mode 100644 arch/arm/include/asm/hardware/vic.h create mode 100644 arch/arm/include/asm/hw_irq.h create mode 100644 arch/arm/include/asm/hwcap.h create mode 100644 arch/arm/include/asm/ide.h create mode 100644 arch/arm/include/asm/io.h create mode 100644 arch/arm/include/asm/ioctl.h create mode 100644 arch/arm/include/asm/ioctls.h create mode 100644 arch/arm/include/asm/ipcbuf.h create mode 100644 arch/arm/include/asm/irq.h create mode 100644 arch/arm/include/asm/irq_regs.h create mode 100644 arch/arm/include/asm/irqflags.h create mode 100644 arch/arm/include/asm/kdebug.h create mode 100644 arch/arm/include/asm/kexec.h create mode 100644 arch/arm/include/asm/kgdb.h create mode 100644 arch/arm/include/asm/kmap_types.h create mode 100644 arch/arm/include/asm/kprobes.h create mode 100644 arch/arm/include/asm/leds.h create mode 100644 arch/arm/include/asm/limits.h create mode 100644 arch/arm/include/asm/linkage.h create mode 100644 arch/arm/include/asm/local.h create mode 100644 arch/arm/include/asm/locks.h create mode 100644 arch/arm/include/asm/mach/arch.h create mode 100644 arch/arm/include/asm/mach/dma.h create mode 100644 arch/arm/include/asm/mach/flash.h create mode 100644 arch/arm/include/asm/mach/irda.h create mode 100644 arch/arm/include/asm/mach/irq.h create mode 100644 arch/arm/include/asm/mach/map.h create mode 100644 arch/arm/include/asm/mach/mmc.h create mode 100644 arch/arm/include/asm/mach/pci.h create mode 100644 arch/arm/include/asm/mach/serial_at91.h create mode 100644 arch/arm/include/asm/mach/serial_sa1100.h create mode 100644 arch/arm/include/asm/mach/sharpsl_param.h create mode 100644 arch/arm/include/asm/mach/time.h create mode 100644 arch/arm/include/asm/mach/udc_pxa2xx.h create mode 100644 arch/arm/include/asm/mc146818rtc.h create mode 100644 arch/arm/include/asm/memory.h create mode 100644 arch/arm/include/asm/mman.h create mode 100644 arch/arm/include/asm/mmu.h create mode 100644 arch/arm/include/asm/mmu_context.h create mode 100644 arch/arm/include/asm/mmzone.h create mode 100644 arch/arm/include/asm/module.h create mode 100644 arch/arm/include/asm/msgbuf.h create mode 100644 arch/arm/include/asm/mtd-xip.h create mode 100644 arch/arm/include/asm/mutex.h create mode 100644 arch/arm/include/asm/nwflash.h create mode 100644 arch/arm/include/asm/page-nommu.h create mode 100644 arch/arm/include/asm/page.h create mode 100644 arch/arm/include/asm/param.h create mode 100644 arch/arm/include/asm/parport.h create mode 100644 arch/arm/include/asm/pci.h create mode 100644 arch/arm/include/asm/percpu.h create mode 100644 arch/arm/include/asm/pgalloc.h create mode 100644 arch/arm/include/asm/pgtable-hwdef.h create mode 100644 arch/arm/include/asm/pgtable-nommu.h create mode 100644 arch/arm/include/asm/pgtable.h create mode 100644 arch/arm/include/asm/poll.h create mode 100644 arch/arm/include/asm/posix_types.h create mode 100644 arch/arm/include/asm/proc-fns.h create mode 100644 arch/arm/include/asm/processor.h create mode 100644 arch/arm/include/asm/procinfo.h create mode 100644 arch/arm/include/asm/ptrace.h create mode 100644 arch/arm/include/asm/resource.h create mode 100644 arch/arm/include/asm/scatterlist.h create mode 100644 arch/arm/include/asm/sections.h create mode 100644 arch/arm/include/asm/segment.h create mode 100644 arch/arm/include/asm/sembuf.h create mode 100644 arch/arm/include/asm/serial.h create mode 100644 arch/arm/include/asm/setup.h create mode 100644 arch/arm/include/asm/shmbuf.h create mode 100644 arch/arm/include/asm/shmparam.h create mode 100644 arch/arm/include/asm/sigcontext.h create mode 100644 arch/arm/include/asm/siginfo.h create mode 100644 arch/arm/include/asm/signal.h create mode 100644 arch/arm/include/asm/sizes.h create mode 100644 arch/arm/include/asm/smp.h create mode 100644 arch/arm/include/asm/socket.h create mode 100644 arch/arm/include/asm/sockios.h create mode 100644 arch/arm/include/asm/sparsemem.h create mode 100644 arch/arm/include/asm/spinlock.h create mode 100644 arch/arm/include/asm/spinlock_types.h create mode 100644 arch/arm/include/asm/stat.h create mode 100644 arch/arm/include/asm/statfs.h create mode 100644 arch/arm/include/asm/string.h create mode 100644 arch/arm/include/asm/suspend.h create mode 100644 arch/arm/include/asm/system.h create mode 100644 arch/arm/include/asm/termbits.h create mode 100644 arch/arm/include/asm/termios.h create mode 100644 arch/arm/include/asm/therm.h create mode 100644 arch/arm/include/asm/thread_info.h create mode 100644 arch/arm/include/asm/thread_notify.h create mode 100644 arch/arm/include/asm/timex.h create mode 100644 arch/arm/include/asm/tlb.h create mode 100644 arch/arm/include/asm/tlbflush.h create mode 100644 arch/arm/include/asm/topology.h create mode 100644 arch/arm/include/asm/traps.h create mode 100644 arch/arm/include/asm/types.h create mode 100644 arch/arm/include/asm/uaccess.h create mode 100644 arch/arm/include/asm/ucontext.h create mode 100644 arch/arm/include/asm/unaligned.h create mode 100644 arch/arm/include/asm/unistd.h create mode 100644 arch/arm/include/asm/user.h create mode 100644 arch/arm/include/asm/vfp.h create mode 100644 arch/arm/include/asm/vfpmacros.h create mode 100644 arch/arm/include/asm/vga.h create mode 100644 arch/arm/include/asm/xor.h (limited to 'arch') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild new file mode 100644 index 000000000000..73237bd130a2 --- /dev/null +++ b/arch/arm/include/asm/Kbuild @@ -0,0 +1,3 @@ +include include/asm-generic/Kbuild.asm + +unifdef-y += hwcap.h diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h new file mode 100644 index 000000000000..93d04acaa31f --- /dev/null +++ b/arch/arm/include/asm/a.out-core.h @@ -0,0 +1,49 @@ +/* a.out coredump register dumper + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_A_OUT_CORE_H +#define _ASM_A_OUT_CORE_H + +#ifdef __KERNEL__ + +#include +#include + +/* + * fill in the user structure for an a.out core dump + */ +static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) +{ + struct task_struct *tsk = current; + + dump->magic = CMAGIC; + dump->start_code = tsk->mm->start_code; + dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); + + dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; + dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; + dump->u_ssize = 0; + + dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; + dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; + dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; + dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; + dump->u_debugreg[4] = tsk->thread.debug.nsaved; + + if (dump->start_stack < 0x04000000) + dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; + + dump->regs = *regs; + dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h new file mode 100644 index 000000000000..79489fdcc8b8 --- /dev/null +++ b/arch/arm/include/asm/a.out.h @@ -0,0 +1,34 @@ +#ifndef __ARM_A_OUT_H__ +#define __ARM_A_OUT_H__ + +#include +#include + +struct exec +{ + __u32 a_info; /* Use macros N_MAGIC, etc for access */ + __u32 a_text; /* length of text, in bytes */ + __u32 a_data; /* length of data, in bytes */ + __u32 a_bss; /* length of uninitialized data area for file, in bytes */ + __u32 a_syms; /* length of symbol table data in file, in bytes */ + __u32 a_entry; /* start address */ + __u32 a_trsize; /* length of relocation info for text, in bytes */ + __u32 a_drsize; /* length of relocation info for data, in bytes */ +}; + +/* + * This is always the same + */ +#define N_TXTADDR(a) (0x00008000) + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#define M_ARM 103 + +#ifndef LIBRARY_START_TEXT +#define LIBRARY_START_TEXT (0x00c00000) +#endif + +#endif /* __A_OUT_GNU_H__ */ diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h new file mode 100644 index 000000000000..6116e4893c0a --- /dev/null +++ b/arch/arm/include/asm/assembler.h @@ -0,0 +1,116 @@ +/* + * arch/arm/include/asm/assembler.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains arm architecture specific defines + * for the different processors. + * + * Do not include any C declarations in this file - it is included by + * assembler source. + */ +#ifndef __ASSEMBLY__ +#error "Only include this from assembly code" +#endif + +#include + +/* + * Endian independent macros for shifting bytes within registers. + */ +#ifndef __ARMEB__ +#define pull lsr +#define push lsl +#define get_byte_0 lsl #0 +#define get_byte_1 lsr #8 +#define get_byte_2 lsr #16 +#define get_byte_3 lsr #24 +#define put_byte_0 lsl #0 +#define put_byte_1 lsl #8 +#define put_byte_2 lsl #16 +#define put_byte_3 lsl #24 +#else +#define pull lsl +#define push lsr +#define get_byte_0 lsr #24 +#define get_byte_1 lsr #16 +#define get_byte_2 lsr #8 +#define get_byte_3 lsl #0 +#define put_byte_0 lsl #24 +#define put_byte_1 lsl #16 +#define put_byte_2 lsl #8 +#define put_byte_3 lsl #0 +#endif + +/* + * Data preload for architectures that support it + */ +#if __LINUX_ARM_ARCH__ >= 5 +#define PLD(code...) code +#else +#define PLD(code...) +#endif + +/* + * This can be used to enable code to cacheline align the destination + * pointer when bulk writing to memory. Experiments on StrongARM and + * XScale didn't show this a worthwhile thing to do when the cache is not + * set to write-allocate (this would need further testing on XScale when WA + * is used). + * + * On Feroceon there is much to gain however, regardless of cache mode. + */ +#ifdef CONFIG_CPU_FEROCEON +#define CALGN(code...) code +#else +#define CALGN(code...) +#endif + +/* + * Enable and disable interrupts + */ +#if __LINUX_ARM_ARCH__ >= 6 + .macro disable_irq + cpsid i + .endm + + .macro enable_irq + cpsie i + .endm +#else + .macro disable_irq + msr cpsr_c, #PSR_I_BIT | SVC_MODE + .endm + + .macro enable_irq + msr cpsr_c, #SVC_MODE + .endm +#endif + +/* + * Save the current IRQ state and disable IRQs. Note that this macro + * assumes FIQs are enabled, and that the processor is in SVC mode. + */ + .macro save_and_disable_irqs, oldcpsr + mrs \oldcpsr, cpsr + disable_irq + .endm + +/* + * Restore interrupt state previously stored in a register. We don't + * guarantee that this will preserve the flags. + */ + .macro restore_irqs, oldcpsr + msr cpsr_c, \oldcpsr + .endm + +#define USER(x...) \ +9999: x; \ + .section __ex_table,"a"; \ + .align 3; \ + .long 9999b,9001f; \ + .previous diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h new file mode 100644 index 000000000000..325f881ccb50 --- /dev/null +++ b/arch/arm/include/asm/atomic.h @@ -0,0 +1,212 @@ +/* + * arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +#include +#include + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ + +#define atomic_read(v) ((v)->counter) + +#if __LINUX_ARM_ARCH__ >= 6 + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. Writing to 'v->counter' + * without using the following operations WILL break the atomic + * nature of these ops. + */ +static inline void atomic_set(atomic_t *v, int i) +{ + unsigned long tmp; + + __asm__ __volatile__("@ atomic_set\n" +"1: ldrex %0, [%1]\n" +" strex %0, %2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%2]\n" +" add %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%2]\n" +" sub %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long oldval, res; + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + return oldval; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__("@ atomic_clear_mask\n" +"1: ldrex %0, [%2]\n" +" bic %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#else /* ARM_ARCH_6 */ + +#include + +#ifdef CONFIG_SMP +#error SMP not supported on pre-ARMv6 CPUs +#endif + +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + raw_local_irq_save(flags); + val = v->counter; + v->counter = val += i; + raw_local_irq_restore(flags); + + return val; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + raw_local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + raw_local_irq_restore(flags); + + return val; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + raw_local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + raw_local_irq_restore(flags); + + return ret; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + raw_local_irq_save(flags); + *addr &= ~mask; + raw_local_irq_restore(flags); +} + +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include +#endif +#endif diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h new file mode 100644 index 000000000000..c0536f6b29a7 --- /dev/null +++ b/arch/arm/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __ASMARM_AUXVEC_H +#define __ASMARM_AUXVEC_H + +#endif diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h new file mode 100644 index 000000000000..9a1db20e032a --- /dev/null +++ b/arch/arm/include/asm/bitops.h @@ -0,0 +1,340 @@ +/* + * Copyright 1995, Russell King. + * Various bits and pieces copyrights include: + * Linus Torvalds (test_bit). + * Big endian support: Copyright 2001, Nicolas Pitre + * reworked by rmk. + * + * bit 0 is the LSB of an "unsigned long" quantity. + * + * Please note that the code in this file should never be included + * from user space. Many of these are not implemented in assembler + * since they would be too costly. Also, they require privileged + * instructions (which are not available from user mode) to ensure + * that they are atomic. + */ + +#ifndef __ASM_ARM_BITOPS_H +#define __ASM_ARM_BITOPS_H + +#ifdef __KERNEL__ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +#define smp_mb__before_clear_bit() mb() +#define smp_mb__after_clear_bit() mb() + +/* + * These functions are the basis of our bit ops. + * + * First, the atomic bitops. These use native endian. + */ +static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + *p |= mask; + raw_local_irq_restore(flags); +} + +static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + *p &= ~mask; + raw_local_irq_restore(flags); +} + +static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + *p ^= mask; + raw_local_irq_restore(flags); +} + +static inline int +____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + res = *p; + *p = res | mask; + raw_local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + res = *p; + *p = res & ~mask; + raw_local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + raw_local_irq_save(flags); + res = *p; + *p = res ^ mask; + raw_local_irq_restore(flags); + + return res & mask; +} + +#include + +/* + * A note about Endian-ness. + * ------------------------- + * + * When the ARM is put into big endian mode via CR15, the processor + * merely swaps the order of bytes within words, thus: + * + * ------------ physical data bus bits ----------- + * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 + * little byte 3 byte 2 byte 1 byte 0 + * big byte 0 byte 1 byte 2 byte 3 + * + * This means that reading a 32-bit word at address 0 returns the same + * value irrespective of the endian mode bit. + * + * Peripheral devices should be connected with the data bus reversed in + * "Big Endian" mode. ARM Application Note 61 is applicable, and is + * available from http://www.arm.com/. + * + * The following assumes that the data bus connectivity for big endian + * mode has been followed. + * + * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. + */ + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern void _set_bit_le(int nr, volatile unsigned long * p); +extern void _clear_bit_le(int nr, volatile unsigned long * p); +extern void _change_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); +extern int _find_first_zero_bit_le(const void * p, unsigned size); +extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); + +/* + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. + */ +extern void _set_bit_be(int nr, volatile unsigned long * p); +extern void _clear_bit_be(int nr, volatile unsigned long * p); +extern void _change_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); +extern int _find_first_zero_bit_be(const void * p, unsigned size); +extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_bit_be(const unsigned long *p, int size, int offset); + +#ifndef CONFIG_SMP +/* + * The __* form of bitops are non-atomic and may be reordered. + */ +#define ATOMIC_BITOP_LE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_le(nr,p)) + +#define ATOMIC_BITOP_BE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_be(nr,p)) +#else +#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) +#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#endif + +#define NONATOMIC_BITOP(name,nr,p) \ + (____nonatomic_##name(nr, p)) + +#ifndef __ARMEB__ +/* + * These are the little endian, atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) + +#define WORD_BITOFF_TO_LE(x) ((x)) + +#else + +/* + * These are the big endian, atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) +#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_be(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) + +#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) + +#endif + +#if __LINUX_ARM_ARCH__ < 5 + +#include +#include +#include +#include + +#else + +static inline int constant_fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +/* + * On ARMv5 and above those functions can be implemented around + * the clz instruction for much better code efficiency. + */ + +#define __fls(x) \ + ( __builtin_constant_p(x) ? constant_fls(x) : \ + ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) + +/* Implement fls() in C so that 64-bit args are suitably truncated */ +static inline int fls(int x) +{ + return __fls(x); +} + +#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) +#define __ffs(x) (ffs(x) - 1) +#define ffz(x) __ffs( ~(x) ) + +#endif + +#include + +#include +#include +#include + +/* + * Ext2 is defined to use little-endian byte ordering. + * These do not need to be atomic. + */ +#define ext2_set_bit(nr,p) \ + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_set_bit_atomic(lock,nr,p) \ + test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_clear_bit(nr,p) \ + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_clear_bit_atomic(lock,nr,p) \ + test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_test_bit(nr,p) \ + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_find_first_zero_bit(p,sz) \ + _find_first_zero_bit_le(p,sz) +#define ext2_find_next_zero_bit(p,sz,off) \ + _find_next_zero_bit_le(p,sz,off) +#define ext2_find_next_bit(p, sz, off) \ + _find_next_bit_le(p, sz, off) + +/* + * Minix is defined to use little-endian byte ordering. + * These do not need to be atomic. + */ +#define minix_set_bit(nr,p) \ + __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define minix_test_bit(nr,p) \ + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define minix_test_and_set_bit(nr,p) \ + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define minix_test_and_clear_bit(nr,p) \ + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define minix_find_first_zero_bit(p,sz) \ + _find_first_zero_bit_le(p,sz) + +#endif /* __KERNEL__ */ + +#endif /* _ARM_BITOPS_H */ diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h new file mode 100644 index 000000000000..7b62351f097d --- /dev/null +++ b/arch/arm/include/asm/bug.h @@ -0,0 +1,24 @@ +#ifndef _ASMARM_BUG_H +#define _ASMARM_BUG_H + + +#ifdef CONFIG_BUG +#ifdef CONFIG_DEBUG_BUGVERBOSE +extern void __bug(const char *file, int line) __attribute__((noreturn)); + +/* give file/line information */ +#define BUG() __bug(__FILE__, __LINE__) + +#else + +/* this just causes an oops */ +#define BUG() (*(int *)0 = 0) + +#endif + +#define HAVE_ARCH_BUG +#endif + +#include + +#endif diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h new file mode 100644 index 000000000000..a97f1ea708d1 --- /dev/null +++ b/arch/arm/include/asm/bugs.h @@ -0,0 +1,21 @@ +/* + * arch/arm/include/asm/bugs.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_BUGS_H +#define __ASM_BUGS_H + +#ifdef CONFIG_MMU +extern void check_writebuffer_bugs(void); + +#define check_bugs() check_writebuffer_bugs() +#else +#define check_bugs() do { } while (0) +#endif + +#endif diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h new file mode 100644 index 000000000000..4fbfb22f65a0 --- /dev/null +++ b/arch/arm/include/asm/byteorder.h @@ -0,0 +1,58 @@ +/* + * arch/arm/include/asm/byteorder.h + * + * ARM Endian-ness. In little endian mode, the data bus is connected such + * that byte accesses appear as: + * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31 + * and word accesses (data or instruction) appear as: + * d0...d31 + * + * When in big endian mode, byte accesses appear as: + * 0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7 + * and word accesses (data or instruction) appear as: + * d0...d31 + */ +#ifndef __ASM_ARM_BYTEORDER_H +#define __ASM_ARM_BYTEORDER_H + +#include +#include + +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + __u32 t; + +#ifndef __thumb__ + if (!__builtin_constant_p(x)) { + /* + * The compiler needs a bit of a hint here to always do the + * right thing and not screw it up to different degrees + * depending on the gcc version. + */ + asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x)); + } else +#endif + t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */ + + x = (x << 24) | (x >> 8); /* mov r0,r0,ror #8 */ + t &= ~0x00FF0000; /* bic r1,r1,#0x00FF0000 */ + x ^= (t >> 8); /* eor r0,r0,r1,lsr #8 */ + + return x; +} + +#define __arch__swab32(x) ___arch__swab32(x) + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#ifdef __ARMEB__ +#include +#else +#include +#endif + +#endif + diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h new file mode 100644 index 000000000000..cb7a9e97fd7e --- /dev/null +++ b/arch/arm/include/asm/cache.h @@ -0,0 +1,10 @@ +/* + * arch/arm/include/asm/cache.h + */ +#ifndef __ASMARM_CACHE_H +#define __ASMARM_CACHE_H + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h new file mode 100644 index 000000000000..9073d9c6567e --- /dev/null +++ b/arch/arm/include/asm/cacheflush.h @@ -0,0 +1,537 @@ +/* + * arch/arm/include/asm/cacheflush.h + * + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_CACHEFLUSH_H +#define _ASMARM_CACHEFLUSH_H + +#include +#include + +#include +#include + +#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) + +/* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_CACHE_V3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + +#if defined(CONFIG_CPU_FEROCEON) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_V6) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v6 +//# endif +#endif + +#if defined(CONFIG_CPU_V7) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v7 +//# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +/* + * This flag is used to indicate that the page pointed to by a pte + * is dirty and requires cleaning before returning it to the user. + */ +#define PG_dcache_dirty PG_arch_1 + +/* + * MM Cache Management + * =================== + * + * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files + * implement these methods. + * + * Start addresses are inclusive and end addresses are exclusive; + * start addresses should be rounded down, end addresses up. + * + * See Documentation/cachetlb.txt for more information. + * Please note that the implementation of these, and the required + * effects are cache-type (VIVT/VIPT/PIPT) specific. + * + * flush_cache_kern_all() + * + * Unconditionally clean and invalidate the entire cache. + * + * flush_cache_user_mm(mm) + * + * Clean and invalidate all user space cache entries + * before a change of page tables. + * + * flush_cache_user_range(start, end, flags) + * + * Clean and invalidate a range of cache entries in the + * specified address space before a change of page tables. + * - start - user start address (inclusive, page aligned) + * - end - user end address (exclusive, page aligned) + * - flags - vma->vm_flags field + * + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * - start - virtual start address + * - end - virtual end address + * + * DMA Cache Coherency + * =================== + * + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * - start - virtual start address + * - end - virtual end address + * + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + * + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + */ + +struct cpu_cache_fns { + void (*flush_kern_all)(void); + void (*flush_user_all)(void); + void (*flush_user_range)(unsigned long, unsigned long, unsigned int); + + void (*coherent_kern_range)(unsigned long, unsigned long); + void (*coherent_user_range)(unsigned long, unsigned long); + void (*flush_kern_dcache_page)(void *); + + void (*dma_inv_range)(const void *, const void *); + void (*dma_clean_range)(const void *, const void *); + void (*dma_flush_range)(const void *, const void *); +}; + +struct outer_cache_fns { + void (*inv_range)(unsigned long, unsigned long); + void (*clean_range)(unsigned long, unsigned long); + void (*flush_range)(unsigned long, unsigned long); +}; + +/* + * Select the calling method + */ +#ifdef MULTI_CACHE + +extern struct cpu_cache_fns cpu_cache; + +#define __cpuc_flush_kern_all cpu_cache.flush_kern_all +#define __cpuc_flush_user_all cpu_cache.flush_user_all +#define __cpuc_flush_user_range cpu_cache.flush_user_range +#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_coherent_user_range cpu_cache.coherent_user_range +#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_inv_range cpu_cache.dma_inv_range +#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_flush_range cpu_cache.dma_flush_range + +#else + +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) +#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) + +extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_user_all(void); +extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); +extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_coherent_user_range(unsigned long, unsigned long); +extern void __cpuc_flush_dcache_page(void *); + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_inv_range __glue(_CACHE,_dma_inv_range) +#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) + +extern void dmac_inv_range(const void *, const void *); +extern void dmac_clean_range(const void *, const void *); +extern void dmac_flush_range(const void *, const void *); + +#endif + +#ifdef CONFIG_OUTER_CACHE + +extern struct outer_cache_fns outer_cache; + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ + if (outer_cache.inv_range) + outer_cache.inv_range(start, end); +} +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ + if (outer_cache.clean_range) + outer_cache.clean_range(start, end); +} +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ + if (outer_cache.flush_range) + outer_cache.flush_range(start, end); +} + +#else + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ } +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ } +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ } + +#endif + +/* + * flush_cache_vmap() is used when creating mappings (eg, via vmap, + * vmalloc, ioremap etc) in kernel space for pages. Since the + * direct-mappings of these pages may contain cached data, we need + * to do a full cache flush to ensure that writebacks don't corrupt + * data placed into these pages via the new mappings. + */ +#define flush_cache_vmap(start, end) flush_cache_all() +#define flush_cache_vunmap(start, end) flush_cache_all() + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + */ +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ + } while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + } while (0) + +/* + * Convert calls to our calling convention. + */ +#define flush_cache_all() __cpuc_flush_kern_all() +#ifndef CONFIG_CPU_CACHE_VIPT +static inline void flush_cache_mm(struct mm_struct *mm) +{ + if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + __cpuc_flush_user_all(); +} + +static inline void +flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) + __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), + vma->vm_flags); +} + +static inline void +flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +{ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = user_addr & PAGE_MASK; + __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); + } +} + +static inline void +flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } +} +#else +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); +extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write); +#endif + +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + +/* + * flush_cache_user_range is used when we want to ensure that the + * Harvard caches are synchronised for the user space address range. + * This is used for the ARM private sys_cacheflush system call. + */ +#define flush_cache_user_range(vma,start,end) \ + __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) + +/* + * Perform necessary cache operations to ensure that data previously + * stored within this range of addresses can be executed by the CPU. + */ +#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) + +/* + * Perform necessary cache operations to ensure that the TLB will + * see data written in the specified area. + */ +#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) + +/* + * flush_dcache_page is used when the kernel has written to the page + * cache page at virtual address page->virtual. + * + * If this page isn't mapped (ie, page_mapping == NULL), or it might + * have userspace mappings, then we _must_ always clean + invalidate + * the dcache entries associated with the kernel mapping. + * + * Otherwise we can defer the operation, and clean the cache when we are + * about to change to user space. This is the same method as used on SPARC64. + * See update_mmu_cache for the user space part. + */ +extern void flush_dcache_page(struct page *); + +extern void __flush_dcache_page(struct address_space *mapping, struct page *page); + +static inline void __flush_icache_all(void) +{ + asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" + : + : "r" (0)); +} + +#define ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + extern void __flush_anon_page(struct vm_area_struct *vma, + struct page *, unsigned long); + if (PageAnon(page)) + __flush_anon_page(vma, page, vmaddr); +} + +#define flush_dcache_mmap_lock(mapping) \ + spin_lock_irq(&(mapping)->tree_lock) +#define flush_dcache_mmap_unlock(mapping) \ + spin_unlock_irq(&(mapping)->tree_lock) + +#define flush_icache_user_range(vma,page,addr,len) \ + flush_dcache_page(page) + +/* + * We don't appear to need to do anything here. In fact, if we did, we'd + * duplicate cache flushing elsewhere performed by flush_dcache_page(). + */ +#define flush_icache_page(vma,page) do { } while (0) + +static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, + unsigned offset, size_t size) +{ + const void *start = (void __force *)virt + offset; + dmac_inv_range(start, start + size); +} + +#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) +#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) + +#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) +#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) +#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) +#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) + +#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) +#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) +#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) +#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) +#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) + +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) +/* + * VIVT caches only + */ +#define cache_is_vivt() 1 +#define cache_is_vipt() 0 +#define cache_is_vipt_nonaliasing() 0 +#define cache_is_vipt_aliasing() 0 +#define icache_is_vivt_asid_tagged() 0 + +#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) +/* + * VIPT caches only + */ +#define cache_is_vivt() 0 +#define cache_is_vipt() 1 +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vipt_aliasing(__val); \ + }) + +#define icache_is_vivt_asid_tagged() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_vivt_asid_tagged_instr(__val); \ + }) + +#else +/* + * VIVT or VIPT caches. Note that this is unreliable since ARM926 + * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. + * There's no way to tell from the CacheType register what type (!) + * the cache is. + */ +#define cache_is_vivt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ + }) + +#define cache_is_vipt() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && __cacheid_vipt(__val); \ + }) + +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vipt_aliasing(__val); \ + }) + +#define icache_is_vivt_asid_tagged() \ + ({ \ + unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ + __cacheid_present(__val) && \ + __cacheid_vivt_asid_tagged_instr(__val); \ + }) + +#endif + +#endif diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h new file mode 100644 index 000000000000..6dcc16430868 --- /dev/null +++ b/arch/arm/include/asm/checksum.h @@ -0,0 +1,139 @@ +/* + * arch/arm/include/asm/checksum.h + * + * IP checksum routines + * + * Copyright (C) Original authors of ../asm-i386/checksum.h + * Copyright (C) 1996-1999 Russell King + */ +#ifndef __ASM_ARM_CHECKSUM_H +#define __ASM_ARM_CHECKSUM_H + +#include + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums, and handles user-space pointer exceptions correctly, when needed. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); + +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); + +/* + * Fold a partial checksum without adding pseudo headers + */ +static inline __sum16 csum_fold(__wsum sum) +{ + __asm__( + "add %0, %1, %1, ror #16 @ csum_fold" + : "=r" (sum) + : "r" (sum) + : "cc"); + return (__force __sum16)(~(__force u32)sum >> 16); +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +static inline __sum16 +ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int tmp1; + __wsum sum; + + __asm__ __volatile__( + "ldr %0, [%1], #4 @ ip_fast_csum \n\ + ldr %3, [%1], #4 \n\ + sub %2, %2, #5 \n\ + adds %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ + adcs %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ +1: adcs %0, %0, %3 \n\ + ldr %3, [%1], #4 \n\ + tst %2, #15 @ do this carefully \n\ + subne %2, %2, #1 @ without destroying \n\ + bne 1b @ the carry flag \n\ + adcs %0, %0, %3 \n\ + adc %0, %0, #0" + : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1) + : "1" (iph), "2" (ihl) + : "cc", "memory"); + return csum_fold(sum); +} + +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + __asm__( + "adds %0, %1, %2 @ csum_tcpudp_nofold \n\ + adcs %0, %0, %3 \n" +#ifdef __ARMEB__ + "adcs %0, %0, %4 \n" +#else + "adcs %0, %0, %4, lsl #8 \n" +#endif + "adcs %0, %0, %5 \n\ + adc %0, %0, #0" + : "=&r"(sum) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)