aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 15:32:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:33 -0700
commit728ec980fb9fa2d65d9e05444079a53615985e7b (patch)
treed98dca98cd46fc28a871135cc9bd95168e4667b3 /mm/page_alloc.c
parenta56f57ff94c25d5d80def06f3ed8fe7f99147762 (diff)
downloadkernel_samsung_smdk4412-728ec980fb9fa2d65d9e05444079a53615985e7b.zip
kernel_samsung_smdk4412-728ec980fb9fa2d65d9e05444079a53615985e7b.tar.gz
kernel_samsung_smdk4412-728ec980fb9fa2d65d9e05444079a53615985e7b.tar.bz2
page allocator: inline __rmqueue_smallest()
Inline __rmqueue_smallest by altering flow very slightly so that there is only one call site. Because there is only one call-site, this function can then be inlined without causing text bloat. On an x86-based config, this patch reduces text by 16 bytes. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 94f33e2..04713f6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -661,7 +661,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
* Go through the free lists for the given migratetype and remove
* the smallest available page from the freelists
*/
-static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+static inline
+struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
@@ -831,8 +832,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
}
}
- /* Use MIGRATE_RESERVE rather than fail an allocation */
- return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
+ return NULL;
}
/*
@@ -844,11 +844,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
{
struct page *page;
+retry_reserve:
page = __rmqueue_smallest(zone, order, migratetype);
- if (unlikely(!page))
+ if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
page = __rmqueue_fallback(zone, order, migratetype);
+ /*
+ * Use MIGRATE_RESERVE rather than fail an allocation. goto
+ * is used because __rmqueue_smallest is an inline function
+ * and we want just one call site
+ */
+ if (!page) {
+ migratetype = MIGRATE_RESERVE;
+ goto retry_reserve;
+ }
+ }
+
return page;
}