aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSeth, Rohit <rohit.seth@intel.com>2005-10-29 18:15:47 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:35 -0700
commitba56e91c940146e99ac694c4c7cd7f2b4aaa565d (patch)
tree5254f1ce8e2f39ccb58a74d0ff0120817b3b5c05 /mm
parentfcdae29aa7a5c79f245110f6680afdc1858d3626 (diff)
downloadkernel_samsung_smdk4412-ba56e91c940146e99ac694c4c7cd7f2b4aaa565d.zip
kernel_samsung_smdk4412-ba56e91c940146e99ac694c4c7cd7f2b4aaa565d.tar.gz
kernel_samsung_smdk4412-ba56e91c940146e99ac694c4c7cd7f2b4aaa565d.tar.bz2
[PATCH] mm: page_alloc: increase size of per-cpu-pages
Increase the page allocator's per-cpu magazines from 1/4MB to 1/2MB. Over 100+ runs for a workload, the difference in mean is about 2%. The best results for both are almost same. Though the max variation in results with 1/2MB is only 2.2%, whereas with 1/4MB it is 12%. Signed-off-by: Rohit Seth <rohit.seth@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 94c864e..f799217 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1721,29 +1721,29 @@ static int __devinit zone_batchsize(struct zone *zone)
/*
* The per-cpu-pages pools are set to around 1000th of the
- * size of the zone. But no more than 1/4 of a meg - there's
- * no point in going beyond the size of L2 cache.
+ * size of the zone. But no more than 1/2 of a meg.
*
* OK, so we don't know how big the cache is. So guess.
*/
batch = zone->present_pages / 1024;
- if (batch * PAGE_SIZE > 256 * 1024)
- batch = (256 * 1024) / PAGE_SIZE;
+ if (batch * PAGE_SIZE > 512 * 1024)
+ batch = (512 * 1024) / PAGE_SIZE;
batch /= 4; /* We effectively *= 4 below */
if (batch < 1)
batch = 1;
/*
- * Clamp the batch to a 2^n - 1 value. Having a power
- * of 2 value was found to be more likely to have
- * suboptimal cache aliasing properties in some cases.
+ * We will be trying to allcoate bigger chunks of contiguous
+ * memory of the order of fls(batch). This should result in
+ * better cache coloring.
*
- * For example if 2 tasks are alternately allocating
- * batches of pages, one task can end up with a lot
- * of pages of one half of the possible page colors
- * and the other with pages of the other colors.
+ * A sanity check also to ensure that batch is still in limits.
*/
- batch = (1 << fls(batch + batch/2)) - 1;
+ batch = (1 << fls(batch + batch/2));
+
+ if (fls(batch) >= (PAGE_SHIFT + MAX_ORDER - 2))
+ batch = PAGE_SHIFT + ((MAX_ORDER - 1 - PAGE_SHIFT)/2);
+
return batch;
}