diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/book3s32/tlb.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_pgtable.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/subpage_prot.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 2 |
5 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index 157f98f6aea9..b6c7427daa6f 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -129,7 +129,7 @@ void flush_tlb_mm(struct mm_struct *mm) /* * It is safe to go down the mm's list of vmas when called - * from dup_mmap, holding mmap_sem. It would also be safe from + * from dup_mmap, holding mmap_lock. It would also be safe from * unmap_region or exit_mmap, but not from vmtruncate on SMP - * but it seems dup_mmap is the only SMP case which gets here. */ diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 00af58cc8714..2a99167afbaf 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -237,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * to hugepage, we first clear the pmd, then invalidate all * the PTE entries. The assumption here is that any low level * page fault will see a none pmd and take the slow path that - * will wait on mmap_sem. But we could very well be in a + * will wait on mmap_lock. But we could very well be in a * hash_page with local ptep pointer value. Such a hash page * can result in adding new HPTE entries for normal subpages. * That means we could be modifying the page content as we @@ -251,7 +251,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * Now invalidate the hpte entries in the range * covered by pmd. This make sure we take a * fault and will find the pmd as none, which will - * result in a major fault which takes mmap_sem and + * result in a major fault which takes mmap_lock and * hence wait for collapse to complete. Without this * the __collapse_huge_page_copy can result in copying * the old content. diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index e814d34bf7f4..60c6ea16a972 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -225,7 +225,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (!spt) { /* * Allocate subpage prot table if not already done. - * Do this with mmap_sem held + * Do this with mmap_lock held */ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!spt) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ff3653e67c7b..641fc5f3d7dd 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -138,7 +138,7 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, * 2. T1 : set AMR to deny access to pkey=4, touches, page * 3. T1 : faults... * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); - * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 5. T1 : enters fault handler, takes mmap_lock, etc... * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ @@ -525,9 +525,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* - * We want to do this outside mmap_sem, because reading code around nip + * We want to do this outside mmap_lock, because reading code around nip * can result in fault, which will cause a deadlock when called with - * mmap_sem held + * mmap_lock held */ if (is_user) flags |= FAULT_FLAG_USER; @@ -539,7 +539,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -615,7 +615,7 @@ good_area: return user_mode(regs) ? 0 : SIGBUS; /* - * Handle the retry right now, the mmap_sem has been released in that + * Handle the retry right now, the mmap_lock has been released in that * case. */ if (unlikely(fault & VM_FAULT_RETRY)) { diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index cea5b4e25a24..45a0556089e8 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -306,7 +306,7 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) pmd = pmd_offset(pud, addr); /* * khugepaged to collapse normal pages to hugepage, first set - * pmd to none to force page fault/gup to take mmap_sem. After + * pmd to none to force page fault/gup to take mmap_lock. After * pmd is set to none, we do a pte_clear which does this assertion * so if we find pmd none, return. */ |