From 0e3c9a2284f5417f196e327c254d0b84c9ee8929 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 1 Jun 2010 11:08:43 +0200 Subject: Revert "writeback: fix WB_SYNC_NONE writeback from umount" This reverts commit e913fc825dc685a444cb4c1d0f9d32f372f59861. We are investigating a hang associated with the WB_SYNC_NONE changes, so revert them for now. Conflicts: fs/fs-writeback.c mm/page-writeback.c Signed-off-by: Jens Axboe --- mm/page-writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b289310e2c89..5fa63bdf52e4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping, (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS)) > background_thresh))) - bdi_start_writeback(bdi, NULL, 0, 0); + bdi_start_writeback(bdi, NULL, 0); } void set_page_dirty_balance(struct page *page, int page_mkwrite) @@ -707,7 +707,7 @@ void laptop_mode_timer_fn(unsigned long data) */ if (bdi_has_dirty_io(&q->backing_dev_info)) - bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0); + bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); } /* -- cgit v1.2.3 From af5a30d8cfcfc561336f982b06345d6b815e0bb3 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 3 Jun 2010 22:01:46 +1000 Subject: fix truncate inode time modification breakage mtime and ctime should be changed only if the file size has actually changed. Patches changing ext2 and tmpfs from vmtruncate to new truncate sequence has caused regressions where they always update timestamps. There is some strange cases in POSIX where truncate(2) must not update times unless the size has acutally changed, see 6e656be89. This area is all still rather buggy in different ways in a lot of filesystems and needs a cleanup and audit (ideally the vfs will provide a simple attribute or call to direct all filesystems exactly which attributes to change). But coming up with the best solution will take a while and is not appropriate for rc anyway. So fix recent regression for now. Signed-off-by: Nick Piggin Signed-off-by: Al Viro --- mm/shmem.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 7e5030ae18ff..f65f84062db5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -764,10 +764,11 @@ done2: static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; + loff_t newsize = attr->ia_size; int error; - if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { - loff_t newsize = attr->ia_size; + if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) + && newsize != inode->i_size) { struct page *page = NULL; if (newsize < inode->i_size) { -- cgit v1.2.3 From bb21c7ce18eff8e6e7877ca1d06c6db719376e3c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 4 Jun 2010 14:15:05 -0700 Subject: vmscan: fix do_try_to_free_pages() return value when priority==0 reclaim failure Greg Thelen reported recent Johannes's stack diet patch makes kernel hang. His test is following. mount -t cgroup none /cgroups -o memory mkdir /cgroups/cg1 echo $$ > /cgroups/cg1/tasks dd bs=1024 count=1024 if=/dev/null of=/data/foo echo $$ > /cgroups/tasks echo 1 > /cgroups/cg1/memory.force_empty Actually, This OOM hard to try logic have been corrupted since following two years old patch. commit a41f24ea9fd6169b147c53c2392e2887cc1d9247 Author: Nishanth Aravamudan Date: Tue Apr 29 00:58:25 2008 -0700 page allocator: smarter retry of costly-order allocations Original intention was "return success if the system have shrinkable zones though priority==0 reclaim was failure". But the above patch changed to "return nr_reclaimed if .....". Oh, That forgot nr_reclaimed may be 0 if priority==0 reclaim failure. And Johannes's patch 0aeb2339e54e ("vmscan: remove all_unreclaimable scan control") made it more corrupt. Originally, priority==0 reclaim failure on memcg return 0, but this patch changed to return 1. It totally confused memcg. This patch fixes it completely. Reported-by: Greg Thelen Signed-off-by: KOSAKI Motohiro Acked-by: Johannes Weiner Acked-by: KAMEZAWA Hiroyuki Tested-by: Greg Thelen Acked-by: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 915dceb487c1..9c7e57cc63a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1724,13 +1724,13 @@ static void shrink_zone(int priority, struct zone *zone, * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static int shrink_zones(int priority, struct zonelist *zonelist, +static bool shrink_zones(int priority, struct zonelist *zonelist, struct scan_control *sc) { enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); struct zoneref *z; struct zone *zone; - int progress = 0; + bool all_unreclaimable = true; for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, sc->nodemask) { @@ -1757,9 +1757,9 @@ static int shrink_zones(int priority, struct zonelist *zonelist, } shrink_zone(priority, zone, sc); - progress = 1; + all_unreclaimable = false; } - return progress; + return all_unreclaimable; } /* @@ -1782,7 +1782,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int priority; - unsigned long ret = 0; + bool all_unreclaimable; unsigned long total_scanned = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; @@ -1813,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, sc->nr_scanned = 0; if (!priority) disable_swap_token(); - ret = shrink_zones(priority, zonelist, sc); + all_unreclaimable = shrink_zones(priority, zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups @@ -1826,10 +1826,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, } } total_scanned += sc->nr_scanned; - if (sc->nr_reclaimed >= sc->nr_to_reclaim) { - ret = sc->nr_reclaimed; + if (sc->nr_reclaimed >= sc->nr_to_reclaim) goto out; - } /* * Try to write back as many pages as we just scanned. This @@ -1849,9 +1847,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, priority < DEF_PRIORITY - 2) congestion_wait(BLK_RW_ASYNC, HZ/10); } - /* top priority shrink_zones still had more to do? don't OOM, then */ - if (ret && scanning_global_lru(sc)) - ret = sc->nr_reclaimed; + out: /* * Now that we've scanned all the zones at this priority level, note @@ -1877,7 +1873,14 @@ out: delayacct_freepages_end(); put_mems_allowed(); - return ret; + if (sc->nr_reclaimed) + return sc->nr_reclaimed; + + /* top priority shrink_zones still had more to do? don't OOM, then */ + if (scanning_global_lru(sc) && !all_unreclaimable) + return 1; + + return 0; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, -- cgit v1.2.3 From 0b5649278e39a068aaf91399941bab1b4a4a3cc2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 9 Jun 2010 10:37:18 +1000 Subject: writeback: pay attention to wbc->nr_to_write in write_cache_pages If a filesystem writes more than one page in ->writepage, write_cache_pages fails to notice this and continues to attempt writeback when wbc->nr_to_write has gone negative - this trace was captured from XFS: wbc_writeback_start: towrt=1024 wbc_writepage: towrt=1024 wbc_writepage: towrt=0 wbc_writepage: towrt=-1 wbc_writepage: towrt=-5 wbc_writepage: towrt=-21 wbc_writepage: towrt=-85 This has adverse effects on filesystem writeback behaviour. write_cache_pages() needs to terminate after a certain number of pages are written, not after a certain number of calls to ->writepage are made. This is a regression introduced by 17bc6c30cf6bfffd816bdc53682dd46fc34a2cf4 ("vfs: Add no_nrwrite_index_update writeback control flag"), but cannot be reverted directly due to subsequent bug fixes that have gone in on top of it. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5fa63bdf52e4..b3dbb8040ed5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -835,7 +835,6 @@ int write_cache_pages(struct address_space *mapping, pgoff_t done_index; int cycled; int range_whole = 0; - long nr_to_write = wbc->nr_to_write; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { @@ -935,11 +934,10 @@ continue_unlock: done = 1; break; } - } + } - if (nr_to_write > 0) { - nr_to_write--; - if (nr_to_write == 0 && + if (wbc->nr_to_write > 0) { + if (--wbc->nr_to_write == 0 && wbc->sync_mode == WB_SYNC_NONE) { /* * We stop writing back only if we are @@ -970,11 +968,8 @@ continue_unlock: end = writeback_index - 1; goto retry; } - if (!wbc->no_nrwrite_index_update) { - if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) - mapping->writeback_index = done_index; - wbc->nr_to_write = nr_to_write; - } + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + mapping->writeback_index = done_index; return ret; } -- cgit v1.2.3 From d87815cb2090e07b0b0b2d73dc9740706e92c80c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 9 Jun 2010 10:37:20 +1000 Subject: writeback: limit write_cache_pages integrity scanning to current EOF sync can currently take a really long time if a concurrent writer is extending a file. The problem is that the dirty pages on the address space grow in the same direction as write_cache_pages scans, so if the writer keeps ahead of writeback, the writeback will not terminate until the writer stops adding dirty pages. For a data integrity sync, we only need to write the pages dirty at the time we start the writeback, so we can stop scanning once we get to the page that was at the end of the file at the time the scan started. This will prevent operations like copying a large file preventing sync from completing as it will not write back pages that were dirtied after the sync was started. This does not impact the existing integrity guarantees, as any dirty page (old or new) within the EOF range at the start of the scan will still be captured. This patch will not prevent sync from blocking on large writes into holes. That requires more complex intervention while this patch only addresses the common append-case of this sync holdoff. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b3dbb8040ed5..bbd396ac9546 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -851,7 +851,22 @@ int write_cache_pages(struct address_space *mapping, if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ + + /* + * If this is a data integrity sync, cap the writeback to the + * current end of file. Any extension to the file that occurs + * after this is a new write and we don't need to write those + * pages out to fulfil our data integrity requirements. If we + * try to write them out, we can get stuck in this scan until + * the concurrent writer stops adding dirty pages and extending + * EOF. + */ + if (wbc->sync_mode == WB_SYNC_ALL && + wbc->range_end == LLONG_MAX) { + end = i_size_read(mapping->host) >> PAGE_CACHE_SHIFT; + } } + retry: done_index = index; while (!done && (index <= end)) { -- cgit v1.2.3