summaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c132
1 files changed, 65 insertions, 67 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a92874a98b60..3ac807c1f4f3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -985,26 +985,73 @@ static int __get_hwpoison_page(struct page *page)
return 0;
}
-static int get_hwpoison_page(struct page *p)
+/*
+ * Safely get reference count of an arbitrary page.
+ *
+ * Returns 0 for a free page, 1 for an in-use page,
+ * -EIO for a page-type we cannot handle and -EBUSY if we raced with an
+ * allocation.
+ * We only incremented refcount in case the page was already in-use and it
+ * is a known type we can handle.
+ */
+static int get_any_page(struct page *p, unsigned long flags)
{
- int ret;
- bool drained = false;
+ int ret = 0, pass = 0;
+ bool count_increased = false;
-retry:
- ret = __get_hwpoison_page(p);
- if (!ret && !is_free_buddy_page(p) && !page_count(p) && !drained) {
- /*
- * The page might be in a pcplist, so try to drain those
- * and see if we are lucky.
- */
- drain_all_pages(page_zone(p));
- drained = true;
- goto retry;
+ if (flags & MF_COUNT_INCREASED)
+ count_increased = true;
+
+try_again:
+ if (!count_increased && !__get_hwpoison_page(p)) {
+ if (page_count(p)) {
+ /* We raced with an allocation, retry. */
+ if (pass++ < 3)
+ goto try_again;
+ ret = -EBUSY;
+ } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
+ /* We raced with put_page, retry. */
+ if (pass++ < 3)
+ goto try_again;
+ ret = -EIO;
+ }
+ } else {
+ if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
+ ret = 1;
+ } else {
+ /*
+ * A page we cannot handle. Check whether we can turn
+ * it into something we can handle.
+ */
+ if (pass++ < 3) {
+ put_page(p);
+ shake_page(p, 1);
+ count_increased = false;
+ goto try_again;
+ }
+ put_page(p);
+ ret = -EIO;
+ }
}
return ret;
}
+static int get_hwpoison_page(struct page *p, unsigned long flags,
+ enum mf_flags ctxt)
+{
+ int ret;
+
+ zone_pcp_disable(page_zone(p));
+ if (ctxt == MF_SOFT_OFFLINE)
+ ret = get_any_page(p, flags);
+ else
+ ret = __get_hwpoison_page(p);
+ zone_pcp_enable(page_zone(p));
+
+ return ret;
+}
+
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
@@ -1185,7 +1232,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
num_poisoned_pages_inc();
- if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
+ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
/*
* Check "filter hit" and "race with other subpage."
*/
@@ -1387,7 +1434,7 @@ try_again:
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
*/
- if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
+ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
if (is_free_buddy_page(p)) {
if (take_page_off_buddy(p)) {
page_ref_inc(p);
@@ -1630,6 +1677,7 @@ int unpoison_memory(unsigned long pfn)
struct page *page;
struct page *p;
int freeit = 0;
+ unsigned long flags = 0;
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -1674,7 +1722,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- if (!get_hwpoison_page(p)) {
+ if (!get_hwpoison_page(p, flags, 0)) {
if (TestClearPageHWPoison(p))
num_poisoned_pages_dec();
unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
@@ -1705,56 +1753,6 @@ int unpoison_memory(unsigned long pfn)
}
EXPORT_SYMBOL(unpoison_memory);
-/*
- * Safely get reference count of an arbitrary page.
- * Returns 0 for a free page, 1 for an in-use page, -EIO for a page-type we
- * cannot handle and -EBUSY if we raced with an allocation.
- * We only incremented refcount in case the page was already in-use and it is
- * a known type we can handle.
- */
-static int get_any_page(struct page *p, int flags)
-{
- int ret = 0, pass = 0;
- bool count_increased = false;
-
- if (flags & MF_COUNT_INCREASED)
- count_increased = true;
-
-try_again:
- if (!count_increased && !get_hwpoison_page(p)) {
- if (page_count(p)) {
- /* We raced with an allocation, retry. */
- if (pass++ < 3)
- goto try_again;
- ret = -EBUSY;
- } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
- /* We raced with put_page, retry. */
- if (pass++ < 3)
- goto try_again;
- ret = -EIO;
- }
- } else {
- if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
- ret = 1;
- } else {
- /*
- * A page we cannot handle. Check whether we can turn
- * it into something we can handle.
- */
- if (pass++ < 3) {
- put_page(p);
- shake_page(p, 1);
- count_increased = false;
- goto try_again;
- }
- put_page(p);
- ret = -EIO;
- }
- }
-
- return ret;
-}
-
static bool isolate_page(struct page *page, struct list_head *pagelist)
{
bool isolated = false;
@@ -1928,7 +1926,7 @@ int soft_offline_page(unsigned long pfn, int flags)
retry:
get_online_mems();
- ret = get_any_page(page, flags);
+ ret = get_hwpoison_page(page, flags, MF_SOFT_OFFLINE);
put_online_mems();
if (ret > 0) {