summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-isolation.h4
-rw-r--r--mm/debug.c10
-rw-r--r--mm/page_alloc.c23
-rw-r--r--mm/page_isolation.c11
4 files changed, 31 insertions, 17 deletions
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 148e65a9c606..572458016331 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,8 +33,8 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-bool has_unmovable_pages(struct zone *zone, struct page *page, int migratetype,
- int flags);
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+ int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
diff --git a/mm/debug.c b/mm/debug.c
index 4a9ded0d3504..ecccd9f17801 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -46,6 +46,13 @@ void __dump_page(struct page *page, const char *reason)
{
struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
+ /*
+ * Accessing the pageblock without the zone lock. It could change to
+ * "isolate" again in the meantime, but since we are just dumping the
+ * state for debugging, it should be fine to accept a bit of
+ * inaccuracy here due to racing.
+ */
+ bool page_cma = is_migrate_cma_page(page);
int mapcount;
char *type = "";
@@ -92,7 +99,8 @@ void __dump_page(struct page *page, const char *reason)
}
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags);
+ pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags,
+ page_cma ? " CMA" : "");
hex_only:
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3aec60ddd6d6..2a1a816c7992 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8185,13 +8185,17 @@ void *__init alloc_large_system_hash(const char *tablename,
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
* check without lock_page also may miss some movable non-lru pages at
* race condition. So you can't expect this function should be exact.
+ *
+ * Returns a page without holding a reference. If the caller wants to
+ * dereference that page (e.g., dumping), it has to make sure that that it
+ * cannot get removed (e.g., via memory unplug) concurrently.
+ *
*/
-bool has_unmovable_pages(struct zone *zone, struct page *page, int migratetype,
- int flags)
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+ int migratetype, int flags)
{
unsigned long iter = 0;
unsigned long pfn = page_to_pfn(page);
- const char *reason = "unmovable page";
/*
* TODO we could make this much more efficient by not checking every
@@ -8208,9 +8212,8 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int migratetype,
* so consider them movable here.
*/
if (is_migrate_cma(migratetype))
- return false;
+ return NULL;
- reason = "CMA page";
goto unmovable;
}
@@ -8285,12 +8288,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int migratetype,
*/
goto unmovable;
}
- return false;
+ return NULL;
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
- if (flags & REPORT_FAILURE)
- dump_page(pfn_to_page(pfn + iter), reason);
- return true;
+ return pfn_to_page(pfn + iter);
}
#ifdef CONFIG_CONTIG_ALLOC
@@ -8694,10 +8695,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
BUG_ON(!PageBuddy(page));
order = page_order(page);
offlined_pages += 1 << order;
-#ifdef CONFIG_DEBUG_VM
- pr_info("remove from free list %lx %d %lx\n",
- pfn, 1 << order, end_pfn);
-#endif
del_page_from_free_area(page, &zone->free_area[order]);
pfn += (1 << order);
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 1f8b9dfecbe8..e70586523ca3 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -17,6 +17,7 @@
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
{
+ struct page *unmovable = NULL;
struct zone *zone;
unsigned long flags;
int ret = -EBUSY;
@@ -37,7 +38,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (!has_unmovable_pages(zone, page, migratetype, isol_flags)) {
+ unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
+ if (!unmovable) {
unsigned long nr_pages;
int mt = get_pageblock_migratetype(page);
@@ -54,6 +56,13 @@ out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
drain_all_pages(zone);
+ else if ((isol_flags & REPORT_FAILURE) && unmovable)
+ /*
+ * printk() with zone->lock held will guarantee to trigger a
+ * lockdep splat, so defer it here.
+ */
+ dump_page(unmovable, "unmovable page");
+
return ret;
}