summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cputable.c118
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/dbell.c78
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S50
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c364
-rw-r--r--arch/powerpc/kernel/idle_book3e.S86
-rw-r--r--arch/powerpc/kernel/irq.c16
-rw-r--r--arch/powerpc/kernel/machine_kexec.c22
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c78
-rw-r--r--arch/powerpc/kernel/paca.c10
-rw-r--r--arch/powerpc/kernel/process.c36
-rw-r--r--arch/powerpc/kernel/prom_init.c44
-rw-r--r--arch/powerpc/kernel/ptrace.c64
-rw-r--r--arch/powerpc/kernel/rtas.c105
-rw-r--r--arch/powerpc/kernel/setup-common.c13
-rw-r--r--arch/powerpc/kernel/setup_64.c19
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/powerpc/kernel/smp.c10
-rw-r--r--arch/powerpc/kernel/time.c197
-rw-r--r--arch/powerpc/kernel/traps.c29
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S184
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S88
25 files changed, 1126 insertions, 499 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 58d0572de6f9..77d831a1cc32 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -34,9 +34,10 @@ obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
-obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o
+obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
@@ -67,6 +68,7 @@ obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o
+obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
extra-y := head_$(CONFIG_WORD_SIZE).o
extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 496cc5b3984f..1c0607ddccc0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -194,7 +194,6 @@ int main(void)
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
- DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
@@ -342,6 +341,7 @@ int main(void)
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
+ DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction));
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 87aa0f3c6047..65e2b4e10f97 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1364,10 +1364,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
- { /* 405EX */
- .pvr_mask = 0xffff0004,
- .pvr_value = 0x12910004,
- .cpu_name = "405EX",
+ { /* 405EX Rev. A/B with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910007,
+ .cpu_name = "405EX Rev. A/B",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
@@ -1377,10 +1377,114 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
- { /* 405EXr */
- .pvr_mask = 0xffff0004,
+ { /* 405EX Rev. C without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000d,
+ .cpu_name = "405EX Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. C with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000f,
+ .cpu_name = "405EX Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. D without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910003,
+ .cpu_name = "405EX Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. D with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910005,
+ .cpu_name = "405EX Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. A/B without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910001,
+ .cpu_name = "405EXr Rev. A/B",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. C without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910009,
+ .cpu_name = "405EXr Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. C with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000b,
+ .cpu_name = "405EXr Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. D without Security */
+ .pvr_mask = 0xffff000f,
.pvr_value = 0x12910000,
- .cpu_name = "405EXr",
+ .cpu_name = "405EXr Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. D with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910002,
+ .cpu_name = "405EXr Rev. D",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 40f524643ba6..8e05c16344e4 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -128,9 +128,9 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
- csize = min(csize, PAGE_SIZE);
+ csize = min_t(size_t, csize, PAGE_SIZE);
- if (pfn < max_pfn) {
+ if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
vaddr = __va(pfn << PAGE_SHIFT);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 1493734cd871..3307a52d797f 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,32 +13,88 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/percpu.h>
#include <asm/dbell.h>
+#include <asm/irq_regs.h>
#ifdef CONFIG_SMP
-unsigned long dbell_smp_message[NR_CPUS];
+struct doorbell_cpu_info {
+ unsigned long messages; /* current messages bits */
+ unsigned int tag; /* tag value */
+};
-void smp_dbell_message_pass(int target, int msg)
+static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
+
+void doorbell_setup_this_cpu(void)
+{
+ struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+
+ info->messages = 0;
+ info->tag = mfspr(SPRN_PIR) & 0x3fff;
+}
+
+void doorbell_message_pass(int target, int msg)
{
+ struct doorbell_cpu_info *info;
int i;
- if(target < NR_CPUS) {
- set_bit(msg, &dbell_smp_message[target]);
- ppc_msgsnd(PPC_DBELL, 0, target);
+ if (target < NR_CPUS) {
+ info = &per_cpu(doorbell_cpu_info, target);
+ set_bit(msg, &info->messages);
+ ppc_msgsnd(PPC_DBELL, 0, info->tag);
}
- else if(target == MSG_ALL_BUT_SELF) {
+ else if (target == MSG_ALL_BUT_SELF) {
for_each_online_cpu(i) {
if (i == smp_processor_id())
continue;
- set_bit(msg, &dbell_smp_message[i]);
- ppc_msgsnd(PPC_DBELL, 0, i);
+ info = &per_cpu(doorbell_cpu_info, i);
+ set_bit(msg, &info->messages);
+ ppc_msgsnd(PPC_DBELL, 0, info->tag);
}
}
else { /* target == MSG_ALL */
- for_each_online_cpu(i)
- set_bit(msg, &dbell_smp_message[i]);
+ for_each_online_cpu(i) {
+ info = &per_cpu(doorbell_cpu_info, i);
+ set_bit(msg, &info->messages);
+ }
ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0);
}
}
-#endif
+
+void doorbell_exception(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+ int msg;
+
+ /* Warning: regs can be NULL when called from irq enable */
+
+ if (!info->messages || (num_online_cpus() < 2))
+ goto out;
+
+ for (msg = 0; msg < 4; msg++)
+ if (test_and_clear_bit(msg, &info->messages))
+ smp_message_recv(msg);
+
+out:
+ set_irq_regs(old_regs);
+}
+
+void doorbell_check_self(void)
+{
+ struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+
+ if (!info->messages)
+ return;
+
+ ppc_msgsnd(PPC_DBELL, 0, info->tag);
+}
+
+#else /* CONFIG_SMP */
+void doorbell_exception(struct pt_regs *regs)
+{
+ printk(KERN_WARNING "Received doorbell on non-smp system\n");
+}
+#endif /* CONFIG_SMP */
+
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 24dcc0ecf246..5c43063d2506 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -191,6 +191,12 @@ exc_##n##_bad_stack: \
sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
b bad_stack_book3e; /* bad stack error */
+/* WARNING: If you change the layout of this stub, make sure you chcek
+ * the debug exception handler which handles single stepping
+ * into exceptions from userspace, and the MM code in
+ * arch/powerpc/mm/tlb_nohash.c which patches the branch here
+ * and would need to be updated if that branch is moved
+ */
#define EXCEPTION_STUB(loc, label) \
. = interrupt_base_book3e + loc; \
nop; /* To make debug interrupts happy */ \
@@ -204,11 +210,30 @@ exc_##n##_bad_stack: \
lis r,TSR_FIS@h; \
mtspr SPRN_TSR,r
+/* Used by asynchronous interrupt that may happen in the idle loop.
+ *
+ * This check if the thread was in the idle loop, and if yes, returns
+ * to the caller rather than the PC. This is to avoid a race if
+ * interrupts happen before the wait instruction.
+ */
+#define CHECK_NAPPING() \
+ clrrdi r11,r1,THREAD_SHIFT; \
+ ld r10,TI_LOCAL_FLAGS(r11); \
+ andi. r9,r10,_TLF_NAPPING; \
+ beq+ 1f; \
+ ld r8,_LINK(r1); \
+ rlwinm r7,r10,0,~_TLF_NAPPING; \
+ std r8,_NIP(r1); \
+ std r7,TI_LOCAL_FLAGS(r11); \
+1:
+
+
#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \
ack(r8); \
+ CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b .ret_from_except_lite;
@@ -246,11 +271,9 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
EXCEPTION_STUB(0x1c0, data_tlb_miss)
EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
+ EXCEPTION_STUB(0x280, doorbell)
+ EXCEPTION_STUB(0x2a0, doorbell_crit)
-#if 0
- EXCEPTION_STUB(0x280, processor_doorbell)
- EXCEPTION_STUB(0x220, processor_doorbell_crit)
-#endif
.globl interrupt_end_book3e
interrupt_end_book3e:
@@ -259,6 +282,7 @@ interrupt_end_book3e:
CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL)
// bl special_reg_save_crit
+// CHECK_NAPPING();
// addi r3,r1,STACK_FRAME_OVERHEAD
// bl .critical_exception
// b ret_from_crit_except
@@ -270,6 +294,7 @@ interrupt_end_book3e:
// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL)
// bl special_reg_save_mc
// addi r3,r1,STACK_FRAME_OVERHEAD
+// CHECK_NAPPING();
// bl .machine_check_exception
// b ret_from_mc_except
b .
@@ -340,6 +365,7 @@ interrupt_end_book3e:
CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL)
// bl special_reg_save_crit
+// CHECK_NAPPING();
// addi r3,r1,STACK_FRAME_OVERHEAD
// bl .unknown_exception
// b ret_from_crit_except
@@ -428,6 +454,20 @@ interrupt_end_book3e:
kernel_dbg_exc:
b . /* NYI */
+/* Doorbell interrupt */
+ MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
+
+/* Doorbell critical Interrupt */
+ START_EXCEPTION(doorbell_crit);
+ CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE)
+// EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL)
+// bl special_reg_save_crit
+// CHECK_NAPPING();
+// addi r3,r1,STACK_FRAME_OVERHEAD
+// bl .doorbell_critical_exception
+// b ret_from_crit_except
+ b .
+
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -563,6 +603,8 @@ BAD_STACK_TRAMPOLINE(0xd00)
BAD_STACK_TRAMPOLINE(0xe00)
BAD_STACK_TRAMPOLINE(0xf00)
BAD_STACK_TRAMPOLINE(0xf20)
+BAD_STACK_TRAMPOLINE(0x2070)
+BAD_STACK_TRAMPOLINE(0x2080)
.globl bad_stack_book3e
bad_stack_book3e:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3e423fbad6bc..f53029a01554 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -828,6 +828,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
/* We have a data breakpoint exception - handle it */
handle_dabr_fault:
+ bl .save_nvgprs
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..5ecd0401cdb1
--- /dev/null
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -0,0 +1,364 @@
+/*
+ * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
+ * using the CPU's debug registers. Derived from
+ * "arch/x86/kernel/hw_breakpoint.c"
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2010 IBM Corporation
+ * Author: K.Prasad <prasad@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/percpu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/hw_breakpoint.h>
+#include <asm/processor.h>
+#include <asm/sstep.h>
+#include <asm/uaccess.h>
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for every cpu
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
+
+/*
+ * Returns total number of data or instruction breakpoints available.
+ */
+int hw_breakpoint_slots(int type)
+{
+ if (type == TYPE_DATA)
+ return HBP_NUM;
+ return 0; /* no instruction breakpoints available */
+}
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free debug address register and use it for this
+ * breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+
+ *slot = bp;
+
+ /*
+ * Do not install DABR values if the instruction must be single-stepped.
+ * If so, DABR will be populated in single_step_dabr_instruction().
+ */
+ if (current->thread.last_hit_ubp != bp)
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+
+ return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+
+ if (*slot != bp) {
+ WARN_ONCE(1, "Can't find the breakpoint");
+ return;
+ }
+
+ *slot = NULL;
+ set_dabr(0);
+}
+
+/*
+ * Perform cleanup of arch-specific counters during unregistration
+ * of the perf-event
+ */
+void arch_unregister_hw_breakpoint(struct perf_event *bp)
+{
+ /*
+ * If the breakpoint is unregistered between a hw_breakpoint_handler()
+ * and the single_step_dabr_instruction(), then cleanup the breakpoint
+ * restoration variables to prevent dangling pointers.
+ */
+ if (bp->ctx->task)
+ bp->ctx->task->thread.last_hit_ubp = NULL;
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ return is_kernel_addr(info->address);
+}
+
+int arch_bp_generic_fields(int type, int *gen_bp_type)
+{
+ switch (type) {
+ case DABR_DATA_READ:
+ *gen_bp_type = HW_BREAKPOINT_R;
+ break;
+ case DABR_DATA_WRITE:
+ *gen_bp_type = HW_BREAKPOINT_W;
+ break;
+ case (DABR_DATA_WRITE | DABR_DATA_READ):
+ *gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
+{
+ int ret = -EINVAL;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ if (!bp)
+ return ret;
+
+ switch (bp->attr.bp_type) {
+ case HW_BREAKPOINT_R:
+ info->type = DABR_DATA_READ;
+ break;
+ case HW_BREAKPOINT_W:
+ info->type = DABR_DATA_WRITE;
+ break;
+ case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
+ info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
+ break;
+ default:
+ return ret;
+ }
+
+ info->address = bp->attr.bp_addr;
+ info->len = bp->attr.bp_len;
+
+ /*
+ * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
+ * and breakpoint addresses are aligned to nearest double-word
+ * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
+ * 'symbolsize' should satisfy the check below.
+ */
+ if (info->len >
+ (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Restores the breakpoint on the debug registers.
+ * Invoke this function if it is known that the execution context is
+ * about to change to cause loss of MSR_SE settings.
+ */
+void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
+{
+ struct arch_hw_breakpoint *info;
+
+ if (likely(!tsk->thread.last_hit_ubp))
+ return;
+
+ info = counter_arch_bp(tsk->thread.last_hit_ubp);
+ regs->msr &= ~MSR_SE;
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+ tsk->thread.last_hit_ubp = NULL;
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+ int rc = NOTIFY_STOP;
+ struct perf_event *bp;
+ struct pt_regs *regs = args->regs;
+ int stepped = 1;
+ struct arch_hw_breakpoint *info;
+ unsigned int instr;
+ unsigned long dar = regs->dar;
+
+ /* Disable breakpoints during exception handling */
+ set_dabr(0);
+
+ /*
+ * The counter may be concurrently released but that can only
+ * occur from a call_rcu() path. We can then safely fetch
+ * the breakpoint, use its callback, touch its counter
+ * while we are in an rcu_read_lock() path.
+ */
+ rcu_read_lock();
+
+ bp = __get_cpu_var(bp_per_reg);
+ if (!bp)
+ goto out;
+ info = counter_arch_bp(bp);
+
+ /*
+ * Return early after invoking user-callback function without restoring
+ * DABR if the breakpoint is from ptrace which always operates in
+ * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
+ * generated in do_dabr().
+ */
+ if (bp->overflow_handler == ptrace_triggered) {
+ perf_bp_event(bp, regs);
+ rc = NOTIFY_DONE;
+ goto out;
+ }
+
+ /*
+ * Verify if dar lies within the address range occupied by the symbol
+ * being watched to filter extraneous exceptions. If it doesn't,
+ * we still need to single-step the instruction, but we don't
+ * generate an event.
+ */
+ info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
+ (dar - bp->attr.bp_addr < bp->attr.bp_len));
+
+ /* Do not emulate user-space instructions, instead single-step them */
+ if (user_mode(regs)) {
+ bp->ctx->task->thread.last_hit_ubp = bp;
+ regs->msr |= MSR_SE;
+ goto out;
+ }
+
+ stepped = 0;
+ instr = 0;
+ if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
+ stepped = emulate_step(regs, instr);
+
+ /*
+ * emulate_step() could not execute it. We've failed in reliably
+ * handling the hw-breakpoint. Unregister it and throw a warning
+ * message to let the user know about it.
+ */
+ if (!stepped) {
+ WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
+ "0x%lx will be disabled.", info->address);
+ perf_event_disable(bp);
+ goto out;
+ }
+ /*
+ * As a policy, the callback is invoked in a 'trigger-after-execute'
+ * fashion
+ */
+ if (!info->extraneous_interrupt)
+ perf_bp_event(bp, regs);
+
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/*
+ * Handle single-step exceptions following a DABR hit.
+ */
+int __kprobes single_step_dabr_instruction(struct die_args *args)
+{
+ struct pt_regs *regs = args->regs;
+ struct perf_event *bp = NULL;
+ struct arch_hw_breakpoint *bp_info;
+
+ bp = current->thread.last_hit_ubp;
+ /*
+ * Check if we are single-stepping as a result of a
+ * previous HW Breakpoint exception
+ */
+ if (!bp)
+ return NOTIFY_DONE;
+
+ bp_info = counter_arch_bp(bp);
+
+ /*
+ * We shall invoke the user-defined callback function in the single
+ * stepping handler to confirm to 'trigger-after-execute' semantics
+ */
+ if (!bp_info->extraneous_interrupt)
+ perf_bp_event(bp, regs);
+
+ set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
+ current->thread.last_hit_ubp = NULL;
+
+ /*
+ * If the process was being single-stepped by ptrace, let the
+ * other single-step actions occur (e.g. generate SIGTRAP).
+ */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ return NOTIFY_DONE;
+
+ return NOTIFY_STOP;
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(
+ struct notifier_block *unused, unsigned long val, void *data)
+{
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_DABR_MATCH:
+ ret = hw_breakpoint_handler(data);
+ break;
+ case DIE_SSTEP:
+ ret = single_step_dabr_instruction(data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ struct thread_struct *t = &tsk->thread;
+
+ unregister_hw_breakpoint(t->ptrace_bps[0]);
+ t->ptrace_bps[0] = NULL;
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+ /* TODO */
+}
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
new file mode 100644
index 000000000000..16c002d6bdf1
--- /dev/null
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 IBM Corp, Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ * Generic idle routine for Book3E processors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ppc-opcode.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+
+/* 64-bit version only for now */
+#ifdef CONFIG_PPC64
+
+_GLOBAL(book3e_idle)
+ /* Save LR for later */
+ mflr r0
+ std r0,16(r1)
+
+ /* Hard disable interrupts */
+ wrteei 0
+
+ /* Now check if an interrupt came in while we were soft disabled
+ * since we may otherwise lose it (doorbells etc...). We know
+ * that since PACAHARDIRQEN will have been cleared in that case.
+ */
+ lbz r3,PACAHARDIRQEN(r13)
+ cmpwi cr0,r3,0
+ beqlr
+
+ /* Now we are going to mark ourselves as soft and hard enables in
+ * order to be able to take interrupts while asleep. We inform lockdep
+ * of that. We don't actually turn interrupts on just yet tho.
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ stdu r1,-128(r1)
+ bl .trace_hardirqs_on
+#endif
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Interrupts will make use return to LR, so get something we want
+ * in there
+ */
+ bl 1f
+
+ /* Hard disable interrupts again */
+ wrteei 0
+
+ /* Mark them off again in the PACA as well */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Tell lockdep about it */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl .trace_hardirqs_off
+ addi r1,r1,128
+#endif
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+1: /* Let's set the _TLF_NAPPING flag so interrupts make us return
+ * to the right spot
+ */
+ clrrdi r11,r1,THREAD_SHIFT
+ ld r10,TI_LOCAL_FLAGS(r11)
+ ori r10,r10,_TLF_NAPPING
+ std r10,TI_LOCAL_FLAGS(r11)
+
+ /* We can now re-enable hard interrupts and go to sleep */
+ wrteei 1
+1: PPC_WAIT(0)
+ b 1b
+
+#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 77be3d058a65..8f96d3198905 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -64,6 +64,8 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
+#include <asm/dbell.h>
+
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/firmware.h>
@@ -153,14 +155,28 @@ notrace void raw_local_irq_restore(unsigned long en)
if (get_hard_enabled())
return;
+#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
+