From 3ccc00a7e04ff7718c9aebb4b0c982571c798759 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Mon, 20 Feb 2012 02:15:03 +0000 Subject: fadump: Register for firmware assisted dump. On 2012-02-20 11:02:51 Mon, Paul Mackerras wrote: > On Thu, Feb 16, 2012 at 04:44:30PM +0530, Mahesh J Salgaonkar wrote: > > If I have read the code correctly, we are going to get this printk on > non-pSeries machines or on older pSeries machines, even if the user > has not put the fadump=on option on the kernel command line. The > printk will be annoying since there is no actual error condition. It > seems to me that the condition for the printk should include > fw_dump.fadump_enabled. In other words you should probably add > > if (!fw_dump.fadump_enabled) > return 0; > > at the beginning of the function. Hi Paul, Thanks for pointing it out. Please find the updated patch below. The existing patches above this (4/10 through 10/10) cleanly applies on this update. Thanks, -Mahesh. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 2d282186cb45..b534bbac3f82 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -55,6 +55,7 @@ #include #include #include +#include #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -625,6 +626,16 @@ static void __init htab_initialize(void) /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; +#ifdef CONFIG_FA_DUMP + /* + * If firmware assisted dump is active firmware preserves + * the contents of htab along with entire partition memory. + * Clear the htab if firmware assisted dump is active so + * that we dont end up using old mappings. + */ + if (is_fadump_active() && ppc_md.hpte_clear_all) + ppc_md.hpte_clear_all(); +#endif } else { /* Find storage for the HPT. Must be contiguous in * the absolute address space. On cell we want it to be -- cgit v1.2.3 From de801de139ba2a2f2c74393ea00a321477ecc0dc Mon Sep 17 00:00:00 2001 From: Jimi Xenidis Date: Tue, 28 Feb 2012 13:27:07 +0000 Subject: powerpc/icswx: Fix race condition with IPI setting ACOP There is a race where a thread causes a coprocessor type to be valid in its own ACOP _and_ in the current context, but it does not propagate to the ACOP register of other threads in time for them to use it. The original code tries to solve this by sending an IPI to all threads on the system, which is heavy handed, but unfortunately still provides a window where the icswx is issued by other threads and the ACOP is not up to date. This patch detects that the ACOP DSI fault was a "false positive" and syncs the ACOP and causes the icswx to be replayed. Signed-off-by: Jimi Xenidis Cc: Anton Blanchard Cc: Benjamin Herrenschmidt Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/icswx.c | 23 +++++++++++++++++++++-- arch/powerpc/mm/icswx.h | 6 ++++++ 2 files changed, 27 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c index 5d9a59eaad93..8cdbd8634a58 100644 --- a/arch/powerpc/mm/icswx.c +++ b/arch/powerpc/mm/icswx.c @@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(drop_cop); static int acop_use_cop(int ct) { - /* todo */ + /* There is no alternate policy, yet */ return -1; } @@ -227,11 +227,30 @@ int acop_handle_fault(struct pt_regs *regs, unsigned long address, ct = (ccw >> 16) & 0x3f; } + /* + * We could be here because another thread has enabled acop + * but the ACOP register has yet to be updated. + * + * This should have been taken care of by the IPI to sync all + * the threads (see smp_call_function(sync_cop, mm, 1)), but + * that could take forever if there are a significant amount + * of threads. + * + * Given the number of threads on some of these systems, + * perhaps this is the best way to sync ACOP rather than whack + * every thread with an IPI. + */ + if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) { + sync_cop(current->active_mm); + return 0; + } + + /* check for alternate policy */ if (!acop_use_cop(ct)) return 0; /* at this point the CT is unknown to the system */ - pr_warn("%s[%d]: Coprocessor %d is unavailable", + pr_warn("%s[%d]: Coprocessor %d is unavailable\n", current->comm, current->pid, ct); /* get inst if we don't already have it */ diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h index 42176bd0884c..6dedc08e62c8 100644 --- a/arch/powerpc/mm/icswx.h +++ b/arch/powerpc/mm/icswx.h @@ -59,4 +59,10 @@ extern void free_cop_pid(int free_pid); extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code); + +static inline u64 acop_copro_type_bit(unsigned int type) +{ + return 1ULL << (63 - type); +} + #endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ -- cgit v1.2.3 From a2234b4baefe2a2742b5fa8839be1ab1aca39057 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 28 Feb 2012 08:49:34 +0000 Subject: powerpc: Use vsprintf extention %pf with builtin_return_address Emit the function name not the address when possible. builtin_return_address() gives an address. When building a kernel with CONFIG_KALLSYMS, emit the actual function name not the address. Signed-off-by: Joe Perches Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/pgtable_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 51f87956f8f8..0907f92ce309 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -207,7 +207,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, */ if (mem_init_done && (p < virt_to_phys(high_memory)) && !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { - printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", + printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; } -- cgit v1.2.3 From 4f8cf36f48b4648a5231e9fc8e49faea377246f4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 28 Feb 2012 13:44:58 +1100 Subject: powerpc: Remove legacy iSeries bits from assembly files This removes the various bits of assembly in the kernel entry, exception handling and SLB management code that were specific to running under the legacy iSeries hypervisor which is no longer supported. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/slb_low.S | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index ef653dc95b65..b9ee79ce2200 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -217,21 +217,6 @@ slb_finish_load: * free slot first but that took too long. Unfortunately we * dont have any LRU information to help us choose a slot. */ -#ifdef CONFIG_PPC_ISERIES -BEGIN_FW_FTR_SECTION - /* - * On iSeries, the "bolted" stack segment can be cast out on - * shared processor switch so we need to check for a miss on - * it and restore it to the right slot. - */ - ld r9,PACAKSAVE(r13) - clrrdi r9,r9,28 - clrrdi r3,r3,28 - li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ - cmpld r9,r3 - beq 3f -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif /* CONFIG_PPC_ISERIES */ 7: ld r10,PACASTABRR(r13) addi r10,r10,1 @@ -282,7 +267,6 @@ _GLOBAL(slb_compare_rr_to_size) /* * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. - * We assume legacy iSeries will never have 1T segments. * * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 */ -- cgit v1.2.3 From a546498f3bf9aac311c66f965186373aee2ca0b0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 7 Mar 2012 16:48:45 +1100 Subject: powerpc: Call do_page_fault() with interrupts off We currently turn interrupts back to their previous state before calling do_page_fault(). This can be annoying when debugging as a bad fault will potentially have lost some processor state before getting into the debugger. We also end up calling some generic code with interrupts enabled such as notify_page_fault() with interrupts enabled, which could be unexpected. This changes our code to behave more like other architectures, and make the assembly entry code call into do_page_faults() with interrupts disabled. They are conditionally re-enabled from within do_page_fault() in the same spot x86 does it. While there, add the might_sleep() test in the case of a successful trylock of the mmap semaphore, again like x86. Also fix a bug in the existing assembly where r12 (_MSR) could get clobbered by C calls (the DTL accounting in the exception common macro and DISABLE_INTS) in some cases. Signed-off-by: Benjamin Herrenschmidt --- v2. Add the r12 clobber fix --- arch/powerpc/mm/fault.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2f0d1b032a89..7e890065cf39 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -179,6 +179,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, } #endif + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + if (in_atomic() || mm == NULL) { if (!user_mode(regs)) return SIGSEGV; @@ -213,6 +217,13 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, goto bad_area_nosemaphore; down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in + * which case we'll have missed the might_sleep() from + * down_read(): + */ + might_sleep(); } vma = find_vma(mm, address); -- cgit v1.2.3 From 9be72573a80648866ed0045db22d97c6e160a540 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 1 Mar 2012 18:14:45 +1100 Subject: powerpc: Add support for page fault retry and fatal signals Other architectures such as x86 and ARM have been growing new support for features like retrying page faults after dropping the mm semaphore to break contention, or being able to return from a stuck page fault when a SIGKILL is pending. This refactors our implementation of do_page_fault() to move the error handling out of line in a way similar to x86 and adds support for those two features. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/fault.c | 170 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 120 insertions(+), 50 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 7e890065cf39..19f2f9498b27 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -105,6 +105,82 @@ static int store_updates_sp(struct pt_regs *regs) } return 0; } +/* + * do_page_fault error handling helpers + */ + +#define MM_FAULT_RETURN 0 +#define MM_FAULT_CONTINUE -1 +#define MM_FAULT_ERR(sig) (sig) + +static int out_of_memory(struct pt_regs *regs) +{ + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ + up_read(¤t->mm->mmap_sem); + if (!user_mode(regs)) + return MM_FAULT_ERR(SIGKILL); + pagefault_out_of_memory(); + return MM_FAULT_RETURN; +} + +static int do_sigbus(struct pt_regs *regs, unsigned long address) +{ + siginfo_t info; + + up_read(¤t->mm->mmap_sem); + + if (user_mode(regs)) { + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, current); + return MM_FAULT_RETURN; + } + return MM_FAULT_ERR(SIGBUS); +} + +static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) +{ + /* + * Pagefault was interrupted by SIGKILL. We have no reason to + * continue the pagefault. + */ + if (fatal_signal_pending(current)) { + /* + * If we have retry set, the mmap semaphore will have + * alrady been released in __lock_page_or_retry(). Else + * we release it now. + */ + if (!(fault & VM_FAULT_RETRY)) + up_read(¤t->mm->mmap_sem); + /* Coming from kernel, we need to deal with uaccess fixups */ + if (user_mode(regs)) + return MM_FAULT_RETURN; + return MM_FAULT_ERR(SIGKILL); + } + + /* No fault: be happy */ + if (!(fault & VM_FAULT_ERROR)) + return MM_FAULT_CONTINUE; + + /* Out of memory */ + if (fault & VM_FAULT_OOM) + return out_of_memory(regs); + + /* Bus error. x86 handles HWPOISON here, we'll add this if/when + * we support the feature in HW + */ + if (fault & VM_FAULT_SIGBUS) + return do_sigbus(regs, addr); + + /* We don't understand the fault code, this is fatal */ + BUG(); + return MM_FAULT_CONTINUE; +} /* * For 600- and 800-family processors, the error_code parameter is DSISR @@ -124,11 +200,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; - siginfo_t info; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; int code = SEGV_MAPERR; - int is_write = 0, ret; + int is_write = 0; int trap = TRAP(regs); int is_exec = trap == 0x400; + int fault; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* @@ -145,6 +222,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ + if (is_write) + flags |= FAULT_FLAG_WRITE; + #ifdef CONFIG_PPC_ICSWX /* * we need to do this early because this "data storage @@ -152,13 +232,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * look at it */ if (error_code & ICSWX_DSI_UCT) { - int ret; - - ret = acop_handle_fault(regs, address, error_code); - if (ret) - return ret; + int rc = acop_handle_fault(regs, address, error_code); + if (rc) + return rc; } -#endif +#endif /* CONFIG_PPC_ICSWX */ if (notify_page_fault(regs)) return 0; @@ -216,6 +294,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, if (!user_mode(regs) && !search_exception_tables(regs->nip)) goto bad_area_nosemaphore; +retry: down_read(&mm->mmap_sem); } else { /* @@ -338,30 +417,43 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); - if (unlikely(ret & VM_FAULT_ERROR)) { - if (ret & VM_FAULT_OOM) - goto out_of_memory; - else if (ret & VM_FAULT_SIGBUS) - goto do_sigbus; - BUG(); + fault = handle_mm_fault(mm, vma, address, flags); + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { + int rc = mm_fault_error(regs, address, fault); + if (rc >= MM_FAULT_RETURN) + return rc; } - if (ret & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); + + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); #ifdef CONFIG_PPC_SMLPAR - if (firmware_has_feature(FW_FEATURE_CMO)) { - preempt_disable(); - get_lppaca()->page_ins += (1 << PAGE_FACTOR); - preempt_enable(); + if (firmware_has_feature(FW_FEATURE_CMO)) { + preempt_disable(); + get_lppaca()->page_ins += (1 << PAGE_FACTOR); + preempt_enable(); + } +#endif /* CONFIG_PPC_SMLPAR */ + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + } + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; } -#endif - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); } + up_read(&mm->mmap_sem); return 0; @@ -382,28 +474,6 @@ bad_area_nosemaphore: return SIGSEGV; -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ -out_of_memory: - up_read(&mm->mmap_sem); - if (!user_mode(regs)) - return SIGKILL; - pagefault_out_of_memory(); - return 0; - -do_sigbus: - up_read(&mm->mmap_sem); - if (user_mode(regs)) { - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); - return 0; - } - return SIGBUS; } /* -- cgit v1.2.3 From f0b8b3417d836f89d873f6d5de43d54f02cb11e2 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 5 Jan 2012 12:37:16 -0600 Subject: powerpc/fsl-booke: Fixup calc_cam_sz to support MMU v2 The registers that describe size supported by TLB are different on MMU v2 as well as we support power of two page sizes. For now we continue to assume that FSL variable size array supports all page sizes up to the maximum one reported in TLB1PS. Signed-off-by: Kumar Gala --- arch/powerpc/mm/fsl_booke_mmu.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 66a6fd38e9cd..07ba45b0f07c 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -149,12 +149,19 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys) { - unsigned int camsize = __ilog2(ram) & ~1U; - unsigned int align = __ffs(virt | phys) & ~1U; - unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf; - - /* Convert (4^max) kB to (2^max) bytes */ - max_cam = max_cam * 2 + 10; + unsigned int camsize = __ilog2(ram); + unsigned int align = __ffs(virt | phys); + unsigned long max_cam; + + if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { + /* Convert (4^max) kB to (2^max) bytes */ + max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; + camsize &= ~1U; + align &= ~1U; + } else { + /* Convert (2^max) kB to (2^max) bytes */ + max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; + } if (camsize > align) camsize = align; -- cgit v1.2.3 From f5339277eb8d3aed37f12a27988366f68ab68930 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 15 Mar 2012 18:18:00 +0000 Subject: powerpc: Remove FW_FEATURE ISERIES from arch code This is no longer selectable, so just remove all the dependent code. Signed-off-by: Stephen Rothwell Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 9 +++------ arch/powerpc/mm/slb.c | 6 ------ arch/powerpc/mm/stab.c | 9 --------- 3 files changed, 3 insertions(+), 21 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b534bbac3f82..3e8c37a4e395 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -56,6 +56,7 @@ #include #include #include +#include #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -756,12 +757,9 @@ void __init early_init_mmu(void) */ htab_initialize(); - /* Initialize stab / SLB management except on iSeries - */ + /* Initialize stab / SLB management */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); - else if (!firmware_has_feature(FW_FEATURE_ISERIES)) - stab_initialize(get_paca()->stab_real); } #ifdef CONFIG_SMP @@ -772,8 +770,7 @@ void __cpuinit early_init_mmu_secondary(void) mtspr(SPRN_SDR1, _SDR1); /* Initialize STAB/SLB. We use a virtual address as it works - * in real mode on pSeries and we want a virtual address on - * iSeries anyway + * in real mode on pSeries. */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index e22276cb67a4..a538c80db2df 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -307,11 +306,6 @@ void slb_initialize(void) get_paca()->stab_rr = SLB_NUM_BOLTED; - /* On iSeries the bolted entries have already been set up by - * the hypervisor from the lparMap data in head.S */ - if (firmware_has_feature(FW_FEATURE_ISERIES)) - return; - lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 41e31642a86a..9106ebb118f5 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -21,8 +21,6 @@ #include #include #include -#include -#include struct stab_entry { unsigned long esid_data; @@ -285,12 +283,5 @@ void stab_initialize(unsigned long stab) /* Set ASR */ stabreal = get_paca()->stab_real | 0x1ul; -#ifdef CONFIG_PPC_ISERIES - if (firmware_has_feature(FW_FEATURE_ISERIES)) { - HvCall1(HvCallBaseSetASR, stabreal); - return; - } -#endif /* CONFIG_PPC_ISERIES */ - mtspr(SPRN_ASR, stabreal); } -- cgit v1.2.3