aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/jbd2.h
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2009-12-07 10:36:20 -0500
committerTheodore Ts'o <tytso@mit.edu>2009-12-07 10:36:20 -0500
commitd2eecb03936878ec574ade5532fa83df7d75dde7 (patch)
treef7ef8bd29096ba6c073308cd3b674857be1545d4 /include/linux/jbd2.h
parentf8ec9d6837241865cf99bed97bb99f4399fd5a03 (diff)
downloadkernel_samsung_smdk4412-d2eecb03936878ec574ade5532fa83df7d75dde7.zip
kernel_samsung_smdk4412-d2eecb03936878ec574ade5532fa83df7d75dde7.tar.gz
kernel_samsung_smdk4412-d2eecb03936878ec574ade5532fa83df7d75dde7.tar.bz2
ext4: Use slab allocator for sub-page sized allocations
Now that the SLUB seems to be fixed so that it respects the requested alignment, use kmem_cache_alloc() to allocator if the block size of the buffer heads to be allocated is less than the page size. Previously, we were using 16k page on a Power system for each buffer, even when the file system was using 1k or 4k block size. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'include/linux/jbd2.h')
-rw-r--r--include/linux/jbd2.h11
1 files changed, 2 insertions, 9 deletions
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 638ce45..8ada2a1 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -69,15 +69,8 @@ extern u8 jbd2_journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-static inline void *jbd2_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd2_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-};
+extern void *jbd2_alloc(size_t size, gfp_t flags);
+extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024