summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/mm/vmem.c45
1 files changed, 42 insertions, 3 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index df361bbacda1..70ebfc7958a6 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -74,7 +74,22 @@ static void vmem_pte_free(unsigned long *table)
#define PAGE_UNUSED 0xFD
-static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
+/*
+ * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
+ * from unused_pmd_start to next PMD_SIZE boundary.
+ */
+static unsigned long unused_pmd_start;
+
+static void vmemmap_flush_unused_pmd(void)
+{
+ if (!unused_pmd_start)
+ return;
+ memset(__va(unused_pmd_start), PAGE_UNUSED,
+ ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
+ unused_pmd_start = 0;
+}
+
+static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
{
/*
* As we expect to add in the same granularity as we remove, it's
@@ -85,18 +100,41 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
memset(__va(start), 0, sizeof(struct page));
}
+static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
+{
+ /*
+ * We only optimize if the new used range directly follows the
+ * previously unused range (esp., when populating consecutive sections).
+ */
+ if (unused_pmd_start == start) {
+ unused_pmd_start = end;
+ if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
+ unused_pmd_start = 0;
+ return;
+ }
+ vmemmap_flush_unused_pmd();
+ __vmemmap_use_sub_pmd(start, end);
+}
+
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
+ vmemmap_flush_unused_pmd();
+
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
- vmemmap_use_sub_pmd(start, end);
+ __vmemmap_use_sub_pmd(start, end);
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE))
memset(page, PAGE_UNUSED, start - __pa(page));
+ /*
+ * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
+ * consecutive sections. Remember for the last added PMD the last
+ * unused range in the populated PMD.
+ */
if (!IS_ALIGNED(end, PMD_SIZE))
- memset(__va(end), PAGE_UNUSED, __pa(page) + PMD_SIZE - end);
+ unused_pmd_start = end;
}
/* Returns true if the PMD is completely unused and can be freed. */
@@ -104,6 +142,7 @@ static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
+ vmemmap_flush_unused_pmd();
memset(__va(start), PAGE_UNUSED, end - start);
return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
}