From 2bf1071a8d50928a4ae366bb3108833166c2b70c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 5 Jul 2018 18:47:00 +1000 Subject: powerpc/64s: Remove POWER9 DD1 support POWER9 DD1 was never a product. It is no longer supported by upstream firmware, and it is not effectively supported in Linux due to lack of testing. Signed-off-by: Nicholas Piggin Reviewed-by: Michael Ellerman [mpe: Remove arch_make_huge_pte() entirely] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_utils_64.c | 30 ----------------- arch/powerpc/mm/hugetlbpage.c | 9 ++--- arch/powerpc/mm/mmu_context_book3s64.c | 12 ++----- arch/powerpc/mm/pgtable-radix.c | 60 ++-------------------------------- arch/powerpc/mm/tlb-radix.c | 18 ---------- 5 files changed, 7 insertions(+), 122 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 8318716e5075..5a72e980e25a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -808,31 +808,6 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -static void update_hid_for_hash(void) -{ - unsigned long hid0; - unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */ - - asm volatile("ptesync": : :"memory"); - /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */ - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) - : : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory"); - asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory"); - trace_tlbie(0, 0, rb, 0, 2, 0, 0); - - /* - * now switch the HID - */ - hid0 = mfspr(SPRN_HID0); - hid0 &= ~HID0_POWER9_RADIX; - mtspr(SPRN_HID0, hid0); - asm volatile("isync": : :"memory"); - - /* Wait for it to happen */ - while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX)) - cpu_relax(); -} - static void __init hash_init_partition_table(phys_addr_t hash_table, unsigned long htab_size) { @@ -845,8 +820,6 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, htab_size = __ilog2(htab_size) - 18; mmu_partition_table_set_entry(0, hash_table | htab_size, 0); pr_info("Partition table %p\n", partition_tb); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - update_hid_for_hash(); } static void __init htab_initialize(void) @@ -1077,9 +1050,6 @@ void hash__early_init_mmu_secondary(void) /* Initialize hash table for that CPU */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - update_hid_for_hash(); - if (!cpu_has_feature(CPU_FTR_ARCH_300)) mtspr(SPRN_SDR1, _SDR1); else diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c5f479c5c00..ec7538a802f9 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -620,15 +620,12 @@ static int __init add_huge_page_size(unsigned long long size) * firmware we only add hugetlb support for page sizes that can be * supported by linux page table layout. * For now we have - * Radix: 2M + * Radix: 2M and 1G * Hash: 16M and 16G */ if (radix_enabled()) { - if (mmu_psize != MMU_PAGE_2M) { - if (cpu_has_feature(CPU_FTR_POWER9_DD1) || - (mmu_psize != MMU_PAGE_1G)) - return -EINVAL; - } + if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G) + return -EINVAL; } else { if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G) return -EINVAL; diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index f3d4b4a0e561..39e9ef0eb78b 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -273,15 +273,7 @@ void arch_exit_mmap(struct mm_struct *mm) #ifdef CONFIG_PPC_RADIX_MMU void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { - - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - isync(); - mtspr(SPRN_PID, next->context.id); - isync(); - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); - } else { - mtspr(SPRN_PID, next->context.id); - isync(); - } + mtspr(SPRN_PID, next->context.id); + isync(); } #endif diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 96f68c5aa1f5..bba168d02235 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void) { unsigned long start, end; - /* - * mark_rodata_ro() will mark itself as !writable at some point. - * Due to DD1 workaround in radix__pte_update(), we'll end up with - * an invalid pte and the system will crash quite severly. - */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n"); - return; - } - start = (unsigned long)_stext; end = (unsigned long)__init_begin; @@ -533,35 +523,6 @@ found: return; } -static void update_hid_for_radix(void) -{ - unsigned long hid0; - unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */ - - asm volatile("ptesync": : :"memory"); - /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */ - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) - : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory"); - /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */ - asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) - : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory"); - asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory"); - trace_tlbie(0, 0, rb, 0, 2, 0, 1); - trace_tlbie(0, 0, rb, 0, 2, 1, 1); - - /* - * now switch the HID - */ - hid0 = mfspr(SPRN_HID0); - hid0 |= HID0_POWER9_RADIX; - mtspr(SPRN_HID0, hid0); - asm volatile("isync": : :"memory"); - - /* Wait for it to happen */ - while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX)) - cpu_relax(); -} - static void radix_init_amor(void) { /* @@ -576,22 +537,12 @@ static void radix_init_amor(void) static void radix_init_iamr(void) { - unsigned long iamr; - - /* - * The IAMR should set to 0 on DD1. - */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - iamr = 0; - else - iamr = (1ul << 62); - /* * Radix always uses key0 of the IAMR to determine if an access is * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction * fetch. */ - mtspr(SPRN_IAMR, iamr); + mtspr(SPRN_IAMR, (1ul << 62)); } void __init radix__early_init_mmu(void) @@ -644,8 +595,6 @@ void __init radix__early_init_mmu(void) if (!firmware_has_feature(FW_FEATURE_LPAR)) { radix_init_native(); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - update_hid_for_radix(); lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); radix_init_partition_table(); @@ -671,10 +620,6 @@ void radix__early_init_mmu_secondary(void) * update partition table control register and UPRT */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { - - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - update_hid_for_radix(); - lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); @@ -1095,8 +1040,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, * To avoid NMMU hang while relaxing access, we need mark * the pte invalid in between. */ - if (cpu_has_feature(CPU_FTR_POWER9_DD1) || - atomic_read(&mm->context.copros) > 0) { + if (atomic_read(&mm->context.copros) > 0) { unsigned long old_pte, new_pte; old_pte = __radix_pte_update(ptep, ~0, 0); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 67a6e86d3e7e..902767b8a9c1 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -994,24 +994,6 @@ void radix__flush_tlb_all(void) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } -void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, - unsigned long address) -{ - /* - * We track page size in pte only for DD1, So we can - * call this only on DD1. - */ - if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) { - VM_WARN_ON(1); - return; - } - - if (old_pte & R_PAGE_LARGE) - radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M); - else - radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); -} - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) { -- cgit v1.2.3