summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-22 09:06:58 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-01 21:12:20 +1100
commit957b778a166e32e242a33fdab693ffb256a19cbd (patch)
treee4ffce7347a62e55ac2cd99b3efdb2e62c7cf070 /arch/powerpc/mm
parentf6eedbba7a26fdaee9ea8121336dc86236c136c7 (diff)
powerpc/mm: Add addr_limit to mm_context and use it to derive max slice index
In the followup patch, we will increase the slice array size to handle 512TB range, but will limit the max addr to 128TB. Avoid doing unnecessary computation and avoid doing slice mask related operation above address limit. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c7
-rw-r--r--arch/powerpc/mm/slice.c20
2 files changed, 18 insertions, 9 deletions
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index e5fde156e11d..fd0bc6db2dcd 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -79,6 +79,13 @@ static int hash__init_new_context(struct mm_struct *mm)
return index;
/*
+ * We do switch_slb() early in fork, even before we setup the
+ * mm->context.addr_limit. Default to max task size so that we copy the
+ * default values to paca which will help us to handle slb miss early.
+ */
+ mm->context.addr_limit = TASK_SIZE_USER64;
+
+ /*
* The old code would re-promote on fork, we don't do that when using
* slices as it could cause problem promoting slices that have been
* forced down to 4K.
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 95e5a20b1b6a..ded96edac817 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -136,7 +136,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (mm->task_size <= SLICE_LOW_TOP)
return;
- for (i = 0; i < SLICE_NUM_HIGH; i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
ret->low_slices |= 1u << i;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -165,15 +165,17 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
}
}
-static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
+static int slice_check_fit(struct mm_struct *mm,
+ struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
bitmap_and(result, mask.high_slices,
- available.high_slices, SLICE_NUM_HIGH);
+ available.high_slices, slice_count);
return (mask.low_slices & available.low_slices) == mask.low_slices &&
- bitmap_equal(result, mask.high_slices, SLICE_NUM_HIGH);
+ bitmap_equal(result, mask.high_slices, slice_count);
}
static void slice_flush_segments(void *parm)
@@ -217,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
mm->context.low_slices_psize = lpsizes;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (test_bit(i, mask.high_slices))
@@ -484,7 +486,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Check if we fit in the good mask. If we do, we just return,
* nothing else to do
*/
- if (slice_check_fit(mask, good_mask)) {
+ if (slice_check_fit(mm, mask, good_mask)) {
slice_dbg(" fits good !\n");
return addr;
}
@@ -509,7 +511,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
- if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
+ if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
slice_dbg(" fits potential !\n");
goto convert;
}
@@ -734,6 +736,6 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
slice_print_mask(" mask", mask);
slice_print_mask(" available", available);
#endif
- return !slice_check_fit(mask, available);
+ return !slice_check_fit(mm, mask, available);
}
#endif