diff options
Diffstat (limited to 'mm')
71 files changed, 4398 insertions, 1829 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index afcc550877ff..79d0fd13b5b3 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -90,3 +90,9 @@ config DEBUG_PAGE_REF careful when enabling this feature because it adds about 30 KB to the kernel code. However the runtime performance overhead is virtually nil until the tracepoints are actually enabled. + +config DEBUG_RODATA_TEST + bool "Testcase for the marking rodata read-only" + depends on STRICT_KERNEL_RWX + ---help--- + This option enables a testcase for the setting rodata read-only. diff --git a/mm/Makefile b/mm/Makefile index 295bd7a9f76b..026f6a828a50 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -23,8 +23,10 @@ KCOV_INSTRUMENT_vmstat.o := n mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ - mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ - vmalloc.o pagewalk.o pgtable-generic.o + mlock.o mmap.o mprotect.o mremap.o msync.o \ + page_vma_mapped.o pagewalk.o pgtable-generic.o \ + rmap.o vmalloc.o + ifdef CONFIG_CROSS_MEMORY_ATTACH mmu-$(CONFIG_MMU) += process_vm_access.o @@ -35,7 +37,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ - compaction.o vmacache.o \ + compaction.o vmacache.o swap_slots.o \ interval_tree.o list_lru.o workingset.o \ debug.o $(mmu-y) @@ -83,6 +85,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o +obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 3bfed5ab2475..6d861d090e9f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -237,6 +237,7 @@ static __init int bdi_class_init(void) bdi_class->dev_groups = bdi_dev_groups; bdi_debug_init(); + return 0; } postcore_initcall(bdi_class_init); @@ -410,8 +411,8 @@ retry: while (*node != NULL) { parent = *node; - congested = container_of(parent, struct bdi_writeback_congested, - rb_node); + congested = rb_entry(parent, struct bdi_writeback_congested, + rb_node); if (congested->blkcg_id < blkcg_id) node = &parent->rb_left; else if (congested->blkcg_id > blkcg_id) @@ -758,15 +759,20 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) if (!bdi->wb_congested) return -ENOMEM; + atomic_set(&bdi->wb_congested->refcnt, 1); + err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); if (err) { - kfree(bdi->wb_congested); + wb_congested_put(bdi->wb_congested); return err; } return 0; } -static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } +static void cgwb_bdi_destroy(struct backing_dev_info *bdi) +{ + wb_congested_put(bdi->wb_congested); +} #endif /* CONFIG_CGROUP_WRITEBACK */ @@ -776,6 +782,7 @@ int bdi_init(struct backing_dev_info *bdi) bdi->dev = NULL; + kref_init(&bdi->refcnt); bdi->min_ratio = 0; bdi->max_ratio = 100; bdi->max_prop_frac = FPROP_FRAC_BASE; @@ -791,6 +798,22 @@ int bdi_init(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_init); +struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id) +{ + struct backing_dev_info *bdi; + + bdi = kmalloc_node(sizeof(struct backing_dev_info), + gfp_mask | __GFP_ZERO, node_id); + if (!bdi) + return NULL; + + if (bdi_init(bdi)) { + kfree(bdi); + return NULL; + } + return bdi; +} + int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...) { @@ -871,12 +894,26 @@ void bdi_unregister(struct backing_dev_info *bdi) } } -void bdi_exit(struct backing_dev_info *bdi) +static void bdi_exit(struct backing_dev_info *bdi) { WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); } +static void release_bdi(struct kref *ref) +{ + struct backing_dev_info *bdi = + container_of(ref, struct backing_dev_info, refcnt); + + bdi_exit(bdi); + kfree(bdi); +} + +void bdi_put(struct backing_dev_info *bdi) +{ + kref_put(&bdi->refcnt, release_bdi); +} + void bdi_destroy(struct backing_dev_info *bdi) { bdi_unregister(bdi); diff --git a/mm/bootmem.c b/mm/bootmem.c index e8a55a3c9feb..9fedb27c6451 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -53,7 +53,7 @@ early_param("bootmem_debug", bootmem_debug_setup); static unsigned long __init bootmap_bytes(unsigned long pages) { - unsigned long bytes = DIV_ROUND_UP(pages, 8); + unsigned long bytes = DIV_ROUND_UP(pages, BITS_PER_BYTE); return ALIGN(bytes, sizeof(long)); } @@ -235,18 +235,13 @@ int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t highmem_start; int ret = 0; -#ifdef CONFIG_X86 /* - * high_memory isn't direct mapped memory so retrieving its physical - * address isn't appropriate. But it would be useful to check the - * physical address of the highmem boundary so it's justifiable to get - * the physical address from it. On x86 there is a validation check for - * this case, so the following workaround is needed to avoid it. + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. */ - highmem_start = __pa_nodebug(high_memory); -#else - highmem_start = __pa(high_memory); -#endif + highmem_start = __pa(high_memory - 1) + 1; pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); @@ -353,6 +348,32 @@ err: return ret; } +#ifdef CONFIG_CMA_DEBUG +static void cma_debug_show_areas(struct cma *cma) +{ + unsigned long next_zero_bit, next_set_bit; + unsigned long start = 0; + unsigned int nr_zero, nr_total = 0; + + mutex_lock(&cma->lock); + pr_info("number of available pages: "); + for (;;) { + next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); + if (next_zero_bit >= cma->count) + break; + next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); + nr_zero = next_set_bit - next_zero_bit; + pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); + nr_total += nr_zero; + start = next_zero_bit + nr_zero; + } + pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); + mutex_unlock(&cma->lock); +} +#else +static inline void cma_debug_show_areas(struct cma *cma) { } +#endif + /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. @@ -362,14 +383,15 @@ err: * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) +struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + gfp_t gfp_mask) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; - int ret; + int ret = -ENOMEM; if (!cma || !cma->count) return NULL; @@ -407,7 +429,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); mutex_lock(&cma_mutex); - ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); + ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, + gfp_mask); mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); @@ -426,6 +449,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) trace_cma_alloc(pfn, page, count, align); + if (ret) { + pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", + __func__, count, ret); + cma_debug_show_areas(cma); + } + pr_debug("%s(): returned %p\n", __func__, page); return page; } diff --git a/mm/cma_debug.c b/mm/cma_debug.c index f8e4b60db167..ffc0c3d0ae64 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count) if (!mem) return -ENOMEM; - p = cma_alloc(cma, count, 0); + p = cma_alloc(cma, count, 0, GFP_KERNEL); if (!p) { kfree(mem); return -ENOMEM; diff --git a/mm/compaction.c b/mm/compaction.c index 949198d01260..81e1eaa2a2cf 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -12,6 +12,7 @@ #include <linux/migrate.h> #include <linux/compaction.h> #include <linux/mm_inline.h> +#include <linux/sched/signal.h> #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> @@ -548,7 +549,7 @@ isolate_fail: if (blockpfn == end_pfn) update_pageblock_skip(cc, valid_page, total_isolated, false); - count_compact_events(COMPACTFREE_SCANNED, nr_scanned); + cc->total_free_scanned += nr_scanned; if (total_isolated) count_compact_events(COMPACTISOLATED, total_isolated); return total_isolated; @@ -802,7 +803,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = false; } - if (isolate_movable_page(page, isolate_mode)) + if (!isolate_movable_page(page, isolate_mode)) goto isolate_success; } @@ -931,7 +932,7 @@ isolate_fail: trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, nr_scanned, nr_isolated); - count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); + cc->total_migrate_scanned += nr_scanned; if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); @@ -1631,6 +1632,9 @@ out: zone->compact_cached_free_pfn = free_pfn; } + count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); + count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); + trace_mm_compaction_end(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync, ret); @@ -1645,6 +1649,8 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, struct compact_control cc = { .nr_freepages = 0, .nr_migratepages = 0, + .total_migrate_scanned = 0, + .total_free_scanned = 0, .order = order, .gfp_mask = gfp_mask, .zone = zone, @@ -1757,6 +1763,8 @@ static void compact_node(int nid) struct zone *zone; struct compact_control cc = { .order = -1, + .total_migrate_scanned = 0, + .total_free_scanned = 0, .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, @@ -1883,6 +1891,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) struct zone *zone; struct compact_control cc = { .order = pgdat->kcompactd_max_order, + .total_migrate_scanned = 0, + .total_free_scanned = 0, .classzone_idx = pgdat->kcompactd_classzone_idx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = true, @@ -1891,7 +1901,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) }; trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, cc.classzone_idx); - count_vm_event(KCOMPACTD_WAKE); + count_compact_event(KCOMPACTD_WAKE); for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { int status; @@ -1909,6 +1919,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) cc.nr_freepages = 0; cc.nr_migratepages = 0; + cc.total_migrate_scanned = 0; + cc.total_free_scanned = 0; cc.zone = zone; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -1927,6 +1939,11 @@ static void kcompactd_do_work(pg_data_t *pgdat) defer_compaction(zone, cc.order); } + count_compact_events(KCOMPACTD_MIGRATE_SCANNED, + cc.total_migrate_scanned); + count_compact_events(KCOMPACTD_FREE_SCANNED, + cc.total_free_scanned); + VM_BUG_ON(!list_empty(&cc.freepages)); VM_BUG_ON(!list_empty(&cc.migratepages)); } @@ -1950,6 +1967,13 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) if (pgdat->kcompactd_max_order < order) pgdat->kcompactd_max_order = order; + /* + * Pairs with implicit barrier in wait_event_freezable() + * such that wakeups are not missed in the lockless + * waitqueue_active() call. + */ + smp_acquire__after_ctrl_dep(); + if (pgdat->kcompactd_classzone_idx > classzone_idx) pgdat->kcompactd_classzone_idx = classzone_idx; diff --git a/mm/dmapool.c b/mm/dmapool.c index abcbfe86c25a..4d90a64b2fdc 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -93,7 +93,7 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf) spin_unlock_irq(&pool->lock); /* per-pool info, no real statistics yet */ - temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", + temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", pool->name, blocks, pages * (pool->allocation / pool->size), pool->size, pages); @@ -434,11 +434,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) spin_unlock_irqrestore(&pool->lock, flags); if (pool->dev) dev_err(pool->dev, - "dma_pool_free %s, %p (bad vaddr)/%Lx\n", - pool->name, vaddr, (unsigned long long)dma); + "dma_pool_free %s, %p (bad vaddr)/%pad\n", + pool->name, vaddr, &dma); else - pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n", - pool->name, vaddr, (unsigned long long)dma); + pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n", + pool->name, vaddr, &dma); return; } { @@ -450,11 +450,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) } spin_unlock_irqrestore(&pool->lock, flags); if (pool->dev) - dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", - pool->name, (unsigned long long)dma); + dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", + pool->name, &dma); else - pr_err("dma_pool_free %s, dma %Lx already free\n", - pool->name, (unsigned long long)dma); + pr_err("dma_pool_free %s, dma %pad already free\n", + pool->name, &dma); return; } } diff --git a/mm/filemap.c b/mm/filemap.c index 3f9afded581b..1694623a6289 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -13,6 +13,7 @@ #include <linux/compiler.h> #include <linux/dax.h> #include <linux/fs.h> +#include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/capability.h> #include <linux/kernel_stat.h> @@ -788,7 +789,7 @@ static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void return autoremove_wake_function(wait, mode, sync, key); } -void wake_up_page_bit(struct page *page, int bit_nr) +static void wake_up_page_bit(struct page *page, int bit_nr) { wait_queue_head_t *q = page_waitqueue(page); struct wait_page_key key; @@ -821,7 +822,13 @@ void wake_up_page_bit(struct page *page, int bit_nr) } spin_unlock_irqrestore(&q->lock, flags); } -EXPORT_SYMBOL(wake_up_page_bit); + +static void wake_up_page(struct page *page, int bit) +{ + if (!PageWaiters(page)) + return; + wake_up_page_bit(page, bit); +} static inline int wait_on_page_bit_common(wait_queue_head_t *q, struct page *page, int bit_nr, int state, bool lock) @@ -1002,9 +1009,12 @@ void page_endio(struct page *page, bool is_write, int err) unlock_page(page); } else { if (err) { + struct address_space *mapping; + SetPageError(page); - if (page->mapping) - mapping_set_error(page->mapping, err); + mapping = page_mapping(page); + if (mapping) + mapping_set_error(mapping, err); } end_page_writeback(page); } @@ -1013,7 +1023,7 @@ EXPORT_SYMBOL_GPL(page_endio); /** * __lock_page - get a lock on the page, assuming we need to sleep to get it - * @page: the page to lock + * @__page: the page to lock */ void __lock_page(struct page *__page) { @@ -2163,7 +2173,6 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, /** * filemap_fault - read in file data for page fault handling - * @vma: vma in which the fault was taken * @vmf: struct vm_fault containing details of the fault * * filemap_fault() is invoked via the vma operations vector for a @@ -2185,10 +2194,10 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, * * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. */ -int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int filemap_fault(struct vm_fault *vmf) { int error; - struct file *file = vma->vm_file; + struct file *file = vmf->vma->vm_file; struct address_space *mapping = file->f_mapping; struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; @@ -2210,12 +2219,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * We found the page, so try async readahead before * waiting for the lock. */ - do_async_mmap_readahead(vma, ra, file, page, offset); + do_async_mmap_readahead(vmf->vma, ra, file, page, offset); } else if (!page) { /* No page in the page cache at all */ - do_sync_mmap_readahead(vma, ra, file, offset); + do_sync_mmap_readahead(vmf->vma, ra, file, offset); count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); ret = VM_FAULT_MAJOR; retry_find: page = find_get_page(mapping, offset); @@ -2223,7 +2232,7 @@ retry_find: goto no_cached_page; } - if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { + if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { put_page(page); return ret | VM_FAULT_RETRY; } @@ -2390,14 +2399,14 @@ next: } EXPORT_SYMBOL(filemap_map_pages); -int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +int filemap_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; - struct inode *inode = file_inode(vma->vm_file); + struct inode *inode = file_inode(vmf->vma->vm_file); int ret = VM_FAULT_LOCKED; sb_start_pagefault(inode->i_sb); - file_update_time(vma->vm_file); + file_update_time(vmf->vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping) { unlock_page(page); @@ -10,7 +10,7 @@ #include <linux/swap.h> #include <linux/swapops.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> @@ -253,6 +253,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma, return page; return no_page_table(vma, flags); } + if (pud_devmap(*pud)) { + ptl = pud_lock(mm, pud); + page = follow_devmap_pud(vma, address, pud, flags); + spin_unlock(ptl); + if (page) + return page; + } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); |