summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509 /arch/powerpc/mm
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c1
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c192
-rw-r--r--arch/powerpc/mm/hash_utils_64.c48
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c13
-rw-r--r--arch/powerpc/mm/slb_low.S62
-rw-r--r--arch/powerpc/mm/slice.c112
-rw-r--r--arch/powerpc/mm/stab.c3
-rw-r--r--arch/powerpc/mm/subpage-prot.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S18
15 files changed, 357 insertions, 237 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e5f028b5794e..5495ebe983a2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address)
up_read(&current->mm->mmap_sem);
if (user_mode(regs)) {
+ current->thread.trap_nr = BUS_ADRERR;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d298..56585086413a 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28
- rldicl r3,r3,0,36
- or r29,r3,r29
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
* r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
- rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
- or r29,r3,r29 /* r29 = va */
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ /*
+ * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
+ * srdi r28,r3,VPN_SHIFT
+ */
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ /*
+ * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
+ * srdi r28,r3,VPN_SHIFT
+ */
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * Calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
* useless now that the segment has been switched to 4k pages.
*/
htab_inval_old_hpte:
- mr r3,r29 /* virtual addr */
+ mr r3,r29 /* vpn */
mr r4,r31 /* PTE.pte */
li r5,0 /* PTE.hidx */
li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28
- rldicl r3,r3,0,36
- or r29,r3,r29
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_64K
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 90039bc64119..ffc1e00f7a22 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -14,10 +14,10 @@
#include <linux/spinlock.h>
#include <linux/bitops.h>
+#include <linux/of.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <asm/abs_addr.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -39,22 +39,35 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbie(unsigned long va, int psize, int ssize)
+static inline void __tlbie(unsigned long vpn, int psize, int ssize)
{
+ unsigned long va;
unsigned int penc;
- /* clear top 16 bits, non SLS segment */
+ /*
+ * We need 14 to 65 bits of va for a tlibe of 4K page
+ * With vpn we ignore the lower VPN_SHIFT bits already.
+ * And top two bits are already ignored because we can
+ * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
+ * of 12.
+ */
+ va = vpn << VPN_SHIFT;
+ /*
+ * clear top 16 bits of 64bit va, non SLS segment
+ * Older versions of the architecture (2.02 and earler) require the
+ * masking of the top 16 bits.
+ */
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
- va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
+ /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
}
}
-static inline void __tlbiel(unsigned long va, int psize, int ssize)
+static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
{
+ unsigned long va;
unsigned int penc;
- /* clear top 16 bits, non SLS segment */
+ /* VPN_SHIFT can be atmost 12 */
+ va = vpn << VPN_SHIFT;
+ /*
+ * clear top 16 bits of 64 bit va, non SLS segment
+ * Older versions of the architecture (2.02 and earler) require the
+ * masking of the top 16 bits.
+ */
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
- va &= ~0xffful;
va |= ssize << 8;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
default:
+ /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
}
-static inline void tlbie(unsigned long va, int psize, int ssize, int local)
+static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
{
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
- __tlbiel(va, psize, ssize);
+ __tlbiel(vpn, psize, ssize);
asm volatile("ptesync": : :"memory");
} else {
- __tlbie(va, psize, ssize);
+ __tlbie(vpn, psize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
-static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
int i;
if (!(vflags & HPTE_V_BOLTED)) {
- DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
+ DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
" rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
+ hpte_group, vpn, pa, rflags, vflags, psize);
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
- hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+ hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
}
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int psize, int ssize,
+ unsigned long vpn, int psize, int ssize,
int local)
{
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0;
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
- DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
- va, want_v & HPTE_V_AVPN, slot, newpp);
+ DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
+ vpn, want_v & HPTE_V_AVPN, slot, newpp);
native_lock_hpte(hptep);
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
- tlbie(va, psize, ssize, local);
+ tlbie(vpn, psize, ssize, local);
return ret;
}
-static long native_hpte_find(unsigned long va, int psize, int ssize)
+static long native_hpte_find(unsigned long vpn, int psize, int ssize)
{
struct hash_pte *hptep;
unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
long slot;
unsigned long want_v, hpte_v;
- hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_v(va, psize, ssize);
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
- unsigned long vsid, va;
+ unsigned long vpn;
+ unsigned long vsid;
long slot;
struct hash_pte *hptep;
vsid = get_kernel_vsid(ea, ssize);
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
- slot = native_hpte_find(va, psize, ssize);
+ slot = native_hpte_find(vpn, psize, ssize);
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
(newpp & (HPTE_R_PP | HPTE_R_N));
/* Ensure it is out of the tlb too. */
- tlbie(va, psize, ssize, 0);
+ tlbie(vpn, psize, ssize, 0);
}
-static void native_hpte_invalidate(unsigned long slot, unsigned long va,
+static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_save(flags);
- DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
+ DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
hptep->v = 0;
/* Invalidate the TLB */
- tlbie(va, psize, ssize, local);
+ tlbie(vpn, psize, ssize, local);
local_irq_restore(flags);
}
@@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
- int *psize, int *ssize, unsigned long *va)
+ int *psize, int *ssize, unsigned long *vpn)
{
+ unsigned long avpn, pteg, vpi;
unsigned long hpte_r = hpte->r;
unsigned long hpte_v = hpte->v;
- unsigned long avpn;
+ unsigned long vsid, seg_off;
int i, size, shift, penc;
if (!(hpte_v & HPTE_V_LARGE))
@@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
}
/* This works for all page sizes, and for 256M and 1T segments */
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
- avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
-
- if (shift < 23) {
- unsigned long vpi, vsid, pteg;
- pteg = slot / HPTES_PER_GROUP;
- if (hpte_v & HPTE_V_SECONDARY)
- pteg = ~pteg;
- switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
- case MMU_SEGSIZE_256M:
- vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
- break;
- case MMU_SEGSIZE_1T:
- vsid = avpn >> 40;
+ avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
+ pteg = slot / HPTES_PER_GROUP;
+ if (hpte_v & HPTE_V_SECONDARY)
+ pteg = ~pteg;
+
+ switch (*ssize) {
+ case MMU_SEGSIZE_256M:
+ /* We only have 28 - 23 bits of seg_off in avpn */
+ seg_off = (avpn & 0x1f) << 23;
+ vsid = avpn >> 5;
+ /* We can find more bits from the pteg value */
+ if (shift < 23) {
+ vpi = (vsid ^ pteg) & htab_hash_mask;
+ seg_off |= vpi << shift;
+ }
+ *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ case MMU_SEGSIZE_1T:
+ /* We only have 40 - 23 bits of seg_off in avpn */
+ seg_off = (avpn & 0x1ffff) << 23;
+ vsid = avpn >> 17;
+ if (shift < 23) {
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
- break;
- default:
- avpn = vpi = size = 0;
+ seg_off |= vpi << shift;
}
- avpn |= (vpi << mmu_psize_defs[size].shift);
+ *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ default:
+ *vpn = size = 0;
}
-
- *va = avpn;
*psize = size;
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
}
/*
@@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
*/
static void native_hpte_clear(void)
{
+ unsigned long vpn = 0;
unsigned long slot, slots, flags;
struct hash_pte *hptep = htab_address;
- unsigned long hpte_v, va;
+ unsigned long hpte_v;
unsigned long pteg_count;
int psize, ssize;
@@ -448,9 +477,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
- hpte_decode(hptep, slot, &psize, &ssize, &va);
+ hpte_decode(hptep, slot, &psize, &ssize, &vpn);
hptep->v = 0;
- __tlbie(va, psize, ssize);
+ __tlbie(vpn, psize, ssize);
}
}
@@ -465,7 +494,8 @@ static void native_hpte_clear(void)
*/
static void native_flush_hash_range(unsigned long number, int local)
{
- unsigned long va, hash, index, hidx, shift, slot;
+ unsigned long vpn;
+ unsigned long hash, index, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
unsigned long want_v;
@@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_save(flags);
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
- hash = hpt_hash(va, shift, ssize);
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index,
- shift) {
- __tlbiel(va, psize, ssize);
+ pte_iterate_hashed_subpages(pte, psize,
+ vpn, index, shift) {
+ __tlbiel(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("ptesync":::"memory");
@@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index,
- shift) {
- __tlbie(va, psize, ssize);
+ pte_iterate_hashed_subpages(pte, psize,
+ vpn, index, shift) {
+ __tlbie(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
@@ -539,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_restore(flags);
}
-#ifdef CONFIG_PPC_PSERIES
-/* Disable TLB batching on nighthawk */
-static inline int tlb_batching_enabled(void)
-{
- struct device_node *root = of_find_node_by_path("/");
- int enabled = 1;
-
- if (root) {
- const char *model = of_get_property(root, "model", NULL);
- if (model && !strcmp(model, "IBM,9076-N81"))
- enabled = 0;
- of_node_put(root);
- }
-
- return enabled;
-}
-#else
-static inline int tlb_batching_enabled(void)
-{
- return 1;
-}
-#endif
-
void __init hpte_init_native(void)
{
ppc_md.hpte_invalidate = native_hpte_invalidate;
@@ -570,6 +577,5 @@ void __init hpte_init_native(void)
ppc_md.hpte_insert = native_hpte_insert;
ppc_md.hpte_remove = native_hpte_remove;
ppc_md.hpte_clear_all = native_hpte_clear;
- if (tlb_batching_enabled())
- ppc_md.flush_hash_range = native_flush_hash_range;
+ ppc_md.flush_hash_range = native_flush_hash_range;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 377e5cbedbbb..3a292be2e079 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -43,7 +43,6 @@
#include <asm/uaccess.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/abs_addr.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
@@ -192,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
vaddr += step, paddr += step) {
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
- unsigned long va = hpt_va(vaddr, vsid, ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
- hash = hpt_hash(va, shift, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
+ ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, ssize);
if (ret < 0)
@@ -651,7 +650,7 @@ static void __init htab_initialize(void)
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
- htab_address = abs_to_virt(table);
+ htab_address = __va(table);
/* htab absolute addr + encoded htabsize */
_SDR1 = table + __ilog2(pteg_count) - 11;
@@ -804,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
#ifdef CONFIG_PPC_MM_SLICES
unsigned int get_paca_psize(unsigned long addr)
{
- unsigned long index, slices;
+ u64 lpsizes;
+ unsigned char *hpsizes;
+ unsigned long index, mask_index;
if (addr < SLICE_LOW_TOP) {
- slices = get_paca()->context.low_slices_psize;
+ lpsizes = get_paca()->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
- } else {
- slices = get_paca()->context.high_slices_psize;
- index = GET_HIGH_SLICE_INDEX(addr);
+ return (lpsizes >> (index * 4)) & 0xF;
}
- return (slices >> (index * 4)) & 0xF;
+ hpsizes = get_paca()->context.high_slices_psize;
+ index = GET_HIGH_SLICE_INDEX(addr);
+ mask_index = index & 0x1;
+ return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
}
#else
@@ -1153,21 +1155,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
-void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
+void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
int local)
{
unsigned long hash, index, shift, hidx, slot;
- DBG_LOW("flush_hash_page(va=%016lx)\n", va);
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
- hash = hpt_hash(va, shift, ssize);
+ DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
- ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
+ ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
} pte_iterate_hashed_end();
}
@@ -1181,7 +1183,7 @@ void flush_hash_range(unsigned long number, int local)
&__get_cpu_var(ppc64_tlb_batch);
for (i = 0; i < number; i++)
- flush_hash_page(batch->vaddr[i], batch->pte[i],
+ flush_hash_page(batch->vpn[i], batch->pte[i],
batch->psize, batch->ssize, local);
}
}
@@ -1208,14 +1210,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
- unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
int ret;
- hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+ hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
+ ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
BUG_ON (ret < 0);
@@ -1229,9 +1231,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hidx, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
- unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
- hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+ hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
spin_lock(&linear_map_hash_lock);
BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1241,7 +1243,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
+ ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
}
void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086cf..cecad348f604 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
{
+ unsigned long vpn;
unsigned long old_pte, new_pte;
- unsigned long va, rflags, pa, sz;
+ unsigned long rflags, pa, sz;
long slot;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
/* Search the Linux page table for a match with va */
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
/* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
- hash = hpt_hash(va, shift, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
if (old_pte & _PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & _PAGE_F_GIX) >> 12;
- if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
+ if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
ssize, local) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
if (likely(!(old_pte & _PAGE_HASHPTE))) {
- unsigned long hash = hpt_hash(va, shift, ssize);
+