diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-11-17 07:51:02 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-17 07:51:07 +0100 |
commit | 99f4c9de2b707795acb215e2e94df7ea266042b5 (patch) | |
tree | 6123682a8e0d880feebb690a55b3fb442ef3dee8 /mm | |
parent | 62ad33f67003b9a7b6013f0511579b9805e11626 (diff) | |
parent | 156171c71a0dc4bce12b4408bb1591f8fe32dc1a (diff) | |
download | kernel_samsung_smdk4412-99f4c9de2b707795acb215e2e94df7ea266042b5.zip kernel_samsung_smdk4412-99f4c9de2b707795acb215e2e94df7ea266042b5.tar.gz kernel_samsung_smdk4412-99f4c9de2b707795acb215e2e94df7ea266042b5.tar.bz2 |
Merge commit 'v2.6.32-rc7' into core/iommu
Merge reason: Add fixes we'll depend on.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 17 | ||||
-rw-r--r-- | mm/ksm.c | 1 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 |
4 files changed, 16 insertions, 8 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index 25878cc..9c1e627 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -426,16 +426,21 @@ void __init page_address_init(void) void debug_kmap_atomic(enum km_type type) { - static unsigned warn_count = 10; + static int warn_count = 10; - if (unlikely(warn_count == 0)) + if (unlikely(warn_count < 0)) return; if (unlikely(in_interrupt())) { - if (in_irq()) { + if (in_nmi()) { + if (type != KM_NMI && type != KM_NMI_PTE) { + WARN_ON(1); + warn_count--; + } + } else if (in_irq()) { if (type != KM_IRQ0 && type != KM_IRQ1 && type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && - type != KM_BOUNCE_READ) { + type != KM_BOUNCE_READ && type != KM_IRQ_PTE) { WARN_ON(1); warn_count--; } @@ -452,7 +457,9 @@ void debug_kmap_atomic(enum km_type type) } if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || - type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { + type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ || + type == KM_IRQ_PTE || type == KM_NMI || + type == KM_NMI_PTE ) { if (!irqs_disabled()) { WARN_ON(1); warn_count--; @@ -1012,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page, struct rmap_item *tree_rmap_item; int ret; + cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); page2[0] = get_mergeable_page(tree_rmap_item); if (!page2[0]) diff --git a/mm/migrate.c b/mm/migrate.c index 1a4bf48..7dbcb22 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -602,7 +602,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; int charge = 0; - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; if (!newpage) return -ENOMEM; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cdcedf6..2bc2ac6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1769,7 +1769,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(rt_task(p))) + } else if (unlikely(rt_task(p)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { @@ -1817,9 +1817,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) goto nopage; +restart: wake_all_kswapd(order, zonelist, high_zoneidx); -restart: /* * OK, we're below the kswapd watermark and have kicked background * reclaim. Now things get more complex, so set up alloc_flags according |