diff options
Diffstat (limited to 'mm')
57 files changed, 7798 insertions, 4697 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 3e2daef3c946..c0837845c17c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -440,6 +440,14 @@ choice endchoice # +# We don't deposit page tables on file THP mapping, +# but Power makes use of them to address MMU quirk. +# +config TRANSPARENT_HUGE_PAGECACHE + def_bool y + depends on TRANSPARENT_HUGEPAGE && !PPC + +# # UP and nommu archs use km based percpu allocator # config NEED_PER_CPU_KM @@ -673,7 +681,7 @@ config IDLE_PAGE_TRACKING See Documentation/vm/idle_page_tracking.txt for more details. config ZONE_DEVICE - bool "Device memory (pmem, etc...) hotplug support" if EXPERT + bool "Device memory (pmem, etc...) hotplug support" depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE depends on SPARSEMEM_VMEMMAP diff --git a/mm/Makefile b/mm/Makefile index 78c6f7dedb83..fc059666c760 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -74,7 +74,7 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o -obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ed173b8ae8f2..efe237742074 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -947,24 +947,24 @@ long congestion_wait(int sync, long timeout) EXPORT_SYMBOL(congestion_wait); /** - * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes - * @zone: A zone to check if it is heavily congested + * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes + * @pgdat: A pgdat to check if it is heavily congested * @sync: SYNC or ASYNC IO * @timeout: timeout in jiffies * * In the event of a congested backing_dev (any backing_dev) and the given - * @zone has experienced recent congestion, this waits for up to @timeout + * @pgdat has experienced recent congestion, this waits for up to @timeout * jiffies for either a BDI to exit congestion of the given @sync queue * or a write to complete. * - * In the absence of zone congestion, cond_resched() is called to yield + * In the absence of pgdat congestion, cond_resched() is called to yield * the processor if necessary but otherwise does not sleep. * * The return value is 0 if the sleep is for the full timeout. Otherwise, * it is the number of jiffies that were still remaining when the function * returned. return_value == timeout implies the function did not sleep. */ -long wait_iff_congested(struct zone *zone, int sync, long timeout) +long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout) { long ret; unsigned long start = jiffies; @@ -973,12 +973,13 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout) /* * If there is no congestion, or heavy congestion is not being - * encountered in the current zone, yield if necessary instead + * encountered in the current pgdat, yield if necessary instead * of sleeping on the congestion queue */ if (atomic_read(&nr_wb_congested[sync]) == 0 || - !test_bit(ZONE_CONGESTED, &zone->flags)) { + !test_bit(PGDAT_CONGESTED, &pgdat->flags)) { cond_resched(); + /* In case we scheduled, work out time remaining */ ret = timeout - (jiffies - start); if (ret < 0) diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 57b3e9bd6bc5..da91df50ba31 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -70,7 +70,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) */ if (trylock_page(page)) { #ifdef CONFIG_BALLOON_COMPACTION - if (!PagePrivate(page)) { + if (PageIsolated(page)) { /* raced with isolation */ unlock_page(page); continue; @@ -106,110 +106,50 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION -static inline void __isolate_balloon_page(struct page *page) +bool balloon_page_isolate(struct page *page, isolate_mode_t mode) + { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); - ClearPagePrivate(page); list_del(&page->lru); b_dev_info->isolated_pages++; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); + + return true; } -static inline void __putback_balloon_page(struct page *page) +void balloon_page_putback(struct page *page) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); - SetPagePrivate(page); list_add(&page->lru, &b_dev_info->pages); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } -/* __isolate_lru_page() counterpart for a ballooned page */ -bool balloon_page_isolate(struct page *page) -{ - /* - * Avoid burning cycles with pages that are yet under __free_pages(), - * or just got freed under us. - * - * In case we 'win' a race for a balloon page being freed under us and - * raise its refcount preventing __free_pages() from doing its job - * the put_page() at the end of this block will take care of - * release this page, thus avoiding a nasty leakage. - */ - if (likely(get_page_unless_zero(page))) { - /* - * As balloon pages are not isolated from LRU lists, concurrent - * compaction threads can race against page migration functions - * as well as race against the balloon driver releasing a page. - * - * In order to avoid having an already isolated balloon page - * being (wrongly) re-isolated while it is under migration, - * or to avoid attempting to isolate pages being released by - * the balloon driver, lets be sure we have the page lock - * before proceeding with the balloon page isolation steps. - */ - if (likely(trylock_page(page))) { - /* - * A ballooned page, by default, has PagePrivate set. - * Prevent concurrent compaction threads from isolating - * an already isolated balloon page by clearing it. - */ - if (balloon_page_movable(page)) { - __isolate_balloon_page(page); - unlock_page(page); - return true; - } - unlock_page(page); - } - put_page(page); - } - return false; -} - -/* putback_lru_page() counterpart for a ballooned page */ -void balloon_page_putback(struct page *page) -{ - /* - * 'lock_page()' stabilizes the page and prevents races against - * concurrent isolation threads attempting to re-isolate it. - */ - lock_page(page); - - if (__is_movable_balloon_page(page)) { - __putback_balloon_page(page); - /* drop the extra ref count taken for page isolation */ - put_page(page); - } else { - WARN_ON(1); - dump_page(page, "not movable balloon page"); - } - unlock_page(page); -} /* move_to_new_page() counterpart for a ballooned page */ -int balloon_page_migrate(struct page *newpage, - struct page *page, enum migrate_mode mode) +int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode) { struct balloon_dev_info *balloon = balloon_page_device(page); - int rc = -EAGAIN; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); - if (WARN_ON(!__is_movable_balloon_page(page))) { - dump_page(page, "not movable balloon page"); - return rc; - } + return balloon->migratepage(balloon, newpage, page, mode); +} - if (balloon && balloon->migratepage) - rc = balloon->migratepage(balloon, newpage, page, mode); +const struct address_space_operations balloon_aops = { + .migratepage = balloon_page_migrate, + .isolate_page = balloon_page_isolate, + .putback_page = balloon_page_putback, +}; +EXPORT_SYMBOL_GPL(balloon_aops); - return rc; -} #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/mm/compaction.c b/mm/compaction.c index 79bfe0e06907..9affb2908304 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -15,11 +15,11 @@ #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> -#include <linux/balloon_compaction.h> #include <linux/page-isolation.h> #include <linux/kasan.h> #include <linux/kthread.h> #include <linux/freezer.h> +#include <linux/page_owner.h> #include "internal.h" #ifdef CONFIG_COMPACTION @@ -65,13 +65,27 @@ static unsigned long release_freepages(struct list_head *freelist) static void map_pages(struct list_head *list) { - struct page *page; + unsigned int i, order, nr_pages; + struct page *page, *next; + LIST_HEAD(tmp_list); + + list_for_each_entry_safe(page, next, list, lru) { + list_del(&page->lru); + + order = page_private(page); + nr_pages = 1 << order; - list_for_each_entry(page, list, lru) { - arch_alloc_page(page, 0); - kernel_map_pages(page, 1, 1); - kasan_alloc_pages(page, 0); + post_alloc_hook(page, order, __GFP_MOVABLE); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } } + + list_splice(&tmp_list, list); } static inline bool migrate_async_suitable(int migratetype) @@ -81,6 +95,44 @@ static inline bool migrate_async_suitable(int migratetype) #ifdef CONFIG_COMPACTION +int PageMovable(struct page *page) +{ + struct address_space *mapping; + + VM_BUG_ON_PAGE(!PageLocked(page), page); + if (!__PageMovable(page)) + return 0; + + mapping = page_mapping(page); + if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) + return 1; + + return 0; +} +EXPORT_SYMBOL(PageMovable); + +void __SetPageMovable(struct page *page, struct address_space *mapping) +{ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); + page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); +} +EXPORT_SYMBOL(__SetPageMovable); + +void __ClearPageMovable(struct page *page) +{ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageMovable(page), page); + /* + * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE + * flag so that VM can catch up released page by driver after isolation. + * With it, VM migration doesn't try to put it back. + */ + page->mapping = (void *)((unsigned long)page->mapping & + PAGE_MAPPING_MOVABLE); +} +EXPORT_SYMBOL(__ClearPageMovable); + /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -279,7 +331,7 @@ static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, { if (cc->mode == MIGRATE_ASYNC) { if (!spin_trylock_irqsave(lock, *flags)) { - cc->contended = COMPACT_CONTENDED_LOCK; + cc->contended = true; return false; } } else { @@ -313,13 +365,13 @@ static bool compact_unlock_should_abort(spinlock_t *lock, } if (fatal_signal_pending(current)) { - cc->contended = COMPACT_CONTENDED_SCHED; + cc->contended = true; return true; } if (need_resched()) { if (cc->mode == MIGRATE_ASYNC) { - cc->contended = COMPACT_CONTENDED_SCHED; + cc->contended = true; return true; } cond_resched(); @@ -342,7 +394,7 @@ static inline bool compact_should_abort(struct compact_control *cc) /* async compaction aborts if contended */ if (need_resched()) { if (cc->mode == MIGRATE_ASYNC) { - cc->contended = COMPACT_CONTENDED_SCHED; + cc->contended = true; return true; } @@ -368,12 +420,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; + unsigned int order; cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn++, cursor++) { - int isolated, i; + int isolated; struct page *page = cursor; /* @@ -439,17 +492,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, goto isolate_fail; } - /* Found a free page, break it into order-0 pages */ - isolated = split_free_page(page); + /* Found a free page, will break it into order-0 pages */ + order = page_order(page); + isolated = __isolate_free_page(page, order); if (!isolated) break; + set_page_private(page, order); total_isolated += isolated; cc->nr_freepages += isolated; - for (i = 0; i < isolated; i++) { - list_add(&page->lru, freelist); - page++; - } + list_add_tail(&page->lru, freelist); + if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; @@ -568,7 +621,7 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(&freelist); if (pfn < end_pfn) { @@ -593,8 +646,8 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) list_for_each_entry(page, &cc->migratepages, lru) count[!!page_is_file_cache(page)]++; - mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); - mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]); + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]); } /* Similar to reclaim, but different enough that they don't share logic */ @@ -602,12 +655,12 @@ static bool too_many_isolated(struct zone *zone) { unsigned long active, inactive, isolated; - inactive = zone_page_state(zone, NR_INACTIVE_FILE) + - zone_page_state(zone, NR_INACTIVE_ANON); - active = zone_page_state(zone, NR_ACTIVE_FILE) + - zone_page_state(zone, NR_ACTIVE_ANON); - isolated = zone_page_state(zone, NR_ISOLATED_FILE) + - zone_page_state(zone, NR_ISOLATED_ANON); + inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + + node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); + active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + + node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); + isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + + node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); return isolated > (inactive + active) / 2; } @@ -670,7 +723,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { - bool is_lru; if (skip_on_failure && low_pfn >= next_skip_pfn) { /* @@ -700,7 +752,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * if contended. */ if (!(low_pfn % SWAP_CLUSTER_MAX) - && compact_unlock_should_abort(&zone->lru_lock, flags, + && compact_unlock_should_abort(zone_lru_lock(zone), flags, &locked, cc)) break; @@ -733,21 +785,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Check may be lockless but that's ok as we recheck later. - * It's possible to migrate LRU pages and balloon pages - * Skip any other type of page - */ - is_lru = PageLRU(page); - if (!is_lru) { - if (unlikely(balloon_page_movable(page))) { - if (balloon_page_isolate(page)) { - /* Successfully isolated */ - goto isolate_success; - } - } - } - - /* * Regardless of being on LRU, compound pages such as THP and * hugetlbfs are not to be compacted. We can potentially save * a lot of iterations if we skip them at once. The check is @@ -763,8 +800,30 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail; } - if (!is_lru) + /* + * Check may be lockless but that's ok as we recheck later. + * It's possible to migrate LRU and non-lru movable pages. + * Skip any other type of page + */ + if (!PageLRU(page)) { + /* + * __PageMovable can return false positive so we need + * to verify it under page_lock. + */ + if (unlikely(__PageMovable(page)) && + !PageIsolated(page)) { + if (locked) { + spin_unlock_irqrestore(zone_lru_lock(zone), + flags); + locked = false; + } + + if (isolate_movable_page(page, isolate_mode)) + goto isolate_success; + } + goto isolate_fail; + } /* * Migration will fail if an anonymous page is pinned in memory, @@ -777,7 +836,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* If we already hold the lock, we can skip some rechecking */ if (!locked) { - locked = compact_trylock_irqsave(&zone->lru_lock, + locked = compact_trylock_irqsave(zone_lru_lock(zone), &flags, cc); if (!locked) break; @@ -797,7 +856,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } } - lruvec = mem_cgroup_page_lruvec(page, zone); + lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); /* Try isolate the page */ if (__isolate_lru_page(page, isolate_mode) != 0) @@ -840,7 +899,7 @@ isolate_fail: */ if (nr_isolated) { if (locked) { - spin_unlock_irqrestore(&zone->lru_lock, flags); + spin_unlock_irqrestore(zone_lru_lock(zone), flags); locked = false; } acct_isolated(zone, cc); @@ -868,7 +927,7 @@ isolate_fail: low_pfn = end_pfn; if (locked) - spin_unlock_irqrestore(&zone->lru_lock, flags); + spin_unlock_irqrestore(zone_lru_lock(zone), flags); /* * Update the pageblock-skip information and cached scanner pfn, @@ -1009,8 +1068,6 @@ static void isolate_freepages(struct compact_control *cc) block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { - unsigned long isolated; - /* * This can iterate a massively long zone without finding any * suitable migration targets, so periodically check if we need @@ -1034,40 +1091,34 @@ static void isolate_freepages(struct compact_control *cc) continue; /* Found a block suitable for isolating free pages from. */ - isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, freelist, false); - /* If isolation failed early, do not continue needlessly */ - if (!isolated && isolate_start_pfn < block_end_pfn && - cc->nr_migratepages > cc->nr_freepages) - break; + isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, + freelist, false); /* - * If we isolated enough freepages, or aborted due to async - * compaction being contended, terminate the loop. - * Remember where the free scanner should restart next time, - * which is where isolate_freepages_block() left off. - * But if it scanned the whole pageblock, isolate_start_pfn - * now points at block_end_pfn, which is the start of the next - * pageblock. - * In that case we will however want to restart at the start - * of the previous pageblock. + * If we isolated enough freepages, or aborted due to lock + * contention, terminate. */ if ((cc->nr_freepages >= cc->nr_migratepages) || cc->contended) { - if (isolate_start_pfn >= block_end_pfn) + if (isolate_start_pfn >= block_end_pfn) { + /* + * Restart at previous pageblock if more + * freepages can be isolated next time. + */ isolate_start_pfn = block_start_pfn - pageblock_nr_pages; + } break; - } else { + } else if (isolate_start_pfn < block_end_pfn) { /* - * isolate_freepages_block() should not terminate - * prematurely unless contended, or isolated enough + * If isolation failed early, do not continue + * needlessly. */ - VM_BUG_ON(isolate_start_pfn < block_end_pfn); + break; } } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(freelist); /* @@ -1149,7 +1200,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | - (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); + (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); /* * Start at where we last stopped, or beginning of the zone as @@ -1568,14 +1619,11 @@ out: trace_mm_compaction_end(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync, ret); - if (ret == COMPACT_CONTENDED) - ret = COMPACT_PARTIAL; - return ret; } static enum compact_result compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, enum migrate_mode mode, int *contended, + gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int classzone_idx) { enum compact_result ret; @@ -1585,7 +1633,8 @@ static enum compact_result co |