From 7af446a841a264a1a9675001005b29ce01d1fc57 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 28 May 2010 09:29:17 +0900 Subject: HWPOISON, hugetlb: enable error handling path for hugepage This patch just enables handling path. Real containing and recovering operation will be implemented in following patches. Dependency: "hugetlb, rmap: add reverse mapping for hugepage." Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Andrew Morton Acked-by: Fengguang Wu Signed-off-by: Andi Kleen --- mm/memory-failure.c | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'mm/memory-failure.c') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 620b0b4..1ec68c8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; @@ -837,6 +838,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, int ret; int i; int kill = 1; + struct page *hpage = compound_head(p); if (PageReserved(p) || PageSlab(p)) return SWAP_SUCCESS; @@ -845,10 +847,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * This check implies we don't kill processes if their pages * are in the swap cache early. Those are always late kills. */ - if (!page_mapped(p)) + if (!page_mapped(hpage)) return SWAP_SUCCESS; - if (PageCompound(p) || PageKsm(p)) + if (PageKsm(p)) return SWAP_FAIL; if (PageSwapCache(p)) { @@ -863,10 +865,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * XXX: the dirty test could be racy: set_page_dirty() may not always * be called inside page lock (it's recommended but not enforced). */ - mapping = page_mapping(p); - if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) { - if (page_mkclean(p)) { - SetPageDirty(p); + mapping = page_mapping(hpage); + if (!PageDirty(hpage) && mapping && + mapping_cap_writeback_dirty(mapping)) { + if (page_mkclean(hpage)) { + SetPageDirty(hpage); } else { kill = 0; ttu |= TTU_IGNORE_HWPOISON; @@ -885,14 +888,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * there's nothing that can be done. */ if (kill) - collect_procs(p, &tokill); + collect_procs(hpage, &tokill); /* * try_to_unmap can fail temporarily due to races. * Try a few times (RED-PEN better strategy?) */ for (i = 0; i < N_UNMAP_TRIES; i++) { - ret = try_to_unmap(p, ttu); + ret = try_to_unmap(hpage, ttu); if (ret == SWAP_SUCCESS) break; pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); @@ -900,7 +903,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, if (ret != SWAP_SUCCESS) printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", - pfn, page_mapcount(p)); + pfn, page_mapcount(hpage)); /* * Now that the dirty bit has been propagated to the @@ -911,7 +914,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * use a more force-full uncatchable kill to prevent * any accesses to the poisoned memory. */ - kill_procs_ao(&tokill, !!PageDirty(p), trapno, + kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, ret != SWAP_SUCCESS, pfn); return ret; @@ -921,6 +924,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) { struct page_state *ps; struct page *p; + struct page *hpage; int res; if (!sysctl_memory_failure_recovery) @@ -934,6 +938,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) } p = pfn_to_page(pfn); + hpage = compound_head(p); if (TestSetPageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); return 0; @@ -953,7 +958,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. */ if (!(flags & MF_COUNT_INCREASED) && - !get_page_unless_zero(compound_head(p))) { + !get_page_unless_zero(hpage)) { if (is_free_buddy_page(p)) { action_result(pfn, "free buddy", DELAYED); return 0; @@ -971,9 +976,9 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ - if (!PageLRU(p)) + if (!PageLRU(p) && !PageHuge(p)) shake_page(p, 0); - if (!PageLRU(p)) { + if (!PageLRU(p) && !PageHuge(p)) { /* * shake_page could have turned it free. */ @@ -991,7 +996,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) * It's very difficult to mess with pages currently under IO * and in many cases impossible, so we just avoid it here. */ - lock_page_nosync(p); + lock_page_nosync(hpage); /* * unpoison always clear PG_hwpoison inside page lock @@ -1004,8 +1009,8 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) atomic_long_dec(&mce_bad_pages); - unlock_page(p); - put_page(p); + unlock_page(hpage); + put_page(hpage); return 0; } @@ -1038,7 +1043,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) } } out: - unlock_page(p); + unlock_page(hpage); return res; } EXPORT_SYMBOL_GPL(__memory_failure); -- cgit v1.1 From 7013febc8940960eaaba039bac0f80910f679ce1 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 28 May 2010 09:29:18 +0900 Subject: HWPOISON, hugetlb: set/clear PG_hwpoison bits on hugepage To avoid race condition between concurrent memory errors on identified hugepage, we atomically test and set PG_hwpoison bit on the head page. All pages in the error hugepage are considered as hwpoisoned for now, so set and clear all PG_hwpoison bits in the hugepage with page lock of the head page held. Dependency: "HWPOISON, hugetlb: enable error handling path for hugepage" Signed-off-by: Naoya Horiguchi Cc: Andrew Morton Acked-by: Fengguang Wu Signed-off-by: Andi Kleen --- mm/memory-failure.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'mm/memory-failure.c') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1ec68c8..fee648b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -920,6 +920,22 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, return ret; } +static void set_page_hwpoison_huge_page(struct page *hpage) +{ + int i; + int nr_pages = 1 << compound_order(hpage); + for (i = 0; i < nr_pages; i++) + SetPageHWPoison(hpage + i); +} + +static void clear_page_hwpoison_huge_page(struct page *hpage) +{ + int i; + int nr_pages = 1 << compound_order(hpage); + for (i = 0; i < nr_pages; i++) + ClearPageHWPoison(hpage + i); +} + int __memory_failure(unsigned long pfn, int trapno, int flags) { struct page_state *ps; @@ -1014,6 +1030,26 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) return 0; } + /* + * For error on the tail page, we should set PG_hwpoison + * on the head page to show that the hugepage is hwpoisoned + */ + if (PageTail(p) && TestSetPageHWPoison(hpage)) { + action_result(pfn, "hugepage already hardware poisoned", + IGNORED); + unlock_page(hpage); + put_page(hpage); + return 0; + } + /* + * Set PG_hwpoison on all pages in an error hugepage, + * because containment is done in hugepage unit for now. + * Since we have done TestSetPageHWPoison() for the head page with + * page lock held, we can safely set PG_hwpoison bits on tail pages. + */ + if (PageHuge(p)) + set_page_hwpoison_huge_page(hpage); + wait_on_page_writeback(p); /* @@ -1118,6 +1154,8 @@ int unpoison_memory(unsigned long pfn) atomic_long_dec(&mce_bad_pages); freeit = 1; } + if (PageHuge(p)) + clear_page_hwpoison_huge_page(page); unlock_page(page); put_page(page); -- cgit v1.1 From c9fbdd5f131440981b124883656ea21fb12cde4a Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 28 May 2010 09:29:19 +0900 Subject: HWPOISON, hugetlb: maintain mce_bad_pages in handling hugepage error For now all pages in the error hugepage are considered as hwpoisoned, so count all of them in mce_bad_pages. Dependency: "HWPOISON, hugetlb: enable error handling path for hugepage" Signed-off-by: Naoya Horiguchi Cc: Andrew Morton Acked-by: Fengguang Wu Signed-off-by: Andi Kleen --- mm/memory-failure.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'mm/memory-failure.c') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fee648b..473f15a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -942,6 +942,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) struct page *p; struct page *hpage; int res; + unsigned int nr_pages; if (!sysctl_memory_failure_recovery) panic("Memory failure from trap %d on page %lx", trapno, pfn); @@ -960,7 +961,8 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) return 0; } - atomic_long_add(1, &mce_bad_pages); + nr_pages = 1 << compound_order(hpage); + atomic_long_add(nr_pages, &mce_bad_pages); /* * We need/can do nothing about count=0 pages. @@ -1024,7 +1026,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) - atomic_long_dec(&mce_bad_pages); + atomic_long_sub(nr_pages, &mce_bad_pages); unlock_page(hpage); put_page(hpage); return 0; @@ -1123,6 +1125,7 @@ int unpoison_memory(unsigned long pfn) struct page *page; struct page *p; int freeit = 0; + unsigned int nr_pages; if (!pfn_valid(pfn)) return -ENXIO; @@ -1135,9 +1138,11 @@ int unpoison_memory(unsigned long pfn) return 0; } + nr_pages = 1 << compound_order(page); + if (!get_page_unless_zero(page)) { if (TestClearPageHWPoison(p)) - atomic_long_dec(&mce_bad_pages); + atomic_long_sub(nr_pages, &mce_bad_pages); pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); return 0; } @@ -1149,9 +1154,9 @@ int unpoison_memory(unsigned long pfn) * the PG_hwpoison page will be caught and isolated on the entrance to * the free buddy page pool. */ - if (TestClearPageHWPoison(p)) { + if (TestClearPageHWPoison(page)) { pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); - atomic_long_dec(&mce_bad_pages); + atomic_long_sub(nr_pages, &mce_bad_pages); freeit = 1; } if (PageHuge(p)) -- cgit v1.1 From 93f70f900da36fbc19c13c2aa04b2e468c8d00fb Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 28 May 2010 09:29:20 +0900 Subject: HWPOISON, hugetlb: isolate corrupted hugepage If error hugepage is not in-use, we can fully recovery from error by dequeuing it from freelist, so return RECOVERY. Otherwise whether or not we can recovery depends on user processes, so return DELAYED. Dependency: "HWPOISON, hugetlb: enable error handling path for hugepage" Signed-off-by: Naoya Horiguchi Cc: Andrew Morton Acked-by: Fengguang Wu Signed-off-by: Andi Kleen --- mm/memory-failure.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'mm/memory-failure.c') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 473f15a..d0b420a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -690,17 +690,29 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn) /* * Huge pages. Needs work. * Issues: - * No rmap support so we cannot find the original mapper. In theory could walk - * all MMs and look for the mappings, but that would be non atomic and racy. - * Need rmap for hugepages for this. Alternatively we could employ a heuristic, - * like just walking the current process and hoping it has it mapped (that - * should be usually true for the common "shared database cache" case) - * Should handle free huge pages and dequeue them too, but this needs to - * handle huge page accounting correctly. + * - Error on hugepage is contained in hugepage unit (not in raw page unit.) + * To narrow down kill region to one page, we need to break up pmd. + * - To support soft-offlining for hugepage, we need to support hugepage + * migration. */ static int me_huge_page(struct page *p, unsigned long pfn) { - return FAILED; + struct page *hpage = compound_head(p); + /* + * We can safely recover from error on free or reserved (i.e. + * not in-use) hugepage by dequeuing it from freelist. + * To check whether a hugepage is in-use or not, we can't use + * page->lru because it can be used in other hugepage operations, + * such as __unmap_hugepage_range() and gather_surplus_pages(). + * So instead we use page_mapping() and PageAnon(). + * We assume that this function is called with page lock held, + * so there is no race between isolation and mapping/unmapping. + */ + if (!(page_mapping(hpage) || PageAnon(hpage))) { + __isolate_hwpoisoned_huge_page(hpage); + return RECOVERED; + } + return DELAYED; } /* -- cgit v1.1