diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-16 22:10:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 05:23:04 -0700 |
commit | a35afb830f8d71ec211531aeb9a621b09a2efb39 (patch) | |
tree | 198280081e1f8b2f6c450742a5075cc7904a3d58 /mm | |
parent | 5577bd8a85c8b7643a241789b14fafa9c8a6c7db (diff) | |
download | kernel_samsung_smdk4412-a35afb830f8d71ec211531aeb9a621b09a2efb39.zip kernel_samsung_smdk4412-a35afb830f8d71ec211531aeb9a621b09a2efb39.tar.gz kernel_samsung_smdk4412-a35afb830f8d71ec211531aeb9a621b09a2efb39.tar.bz2 |
Remove SLAB_CTOR_CONSTRUCTOR
SLAB_CTOR_CONSTRUCTOR is always specified. No point in checking it.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@ucw.cz>
Cc: David Chinner <dgc@sgi.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 8 | ||||
-rw-r--r-- | mm/shmem.c | 8 | ||||
-rw-r--r-- | mm/slab.c | 12 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 |
5 files changed, 13 insertions, 19 deletions
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma) static void anon_vma_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { - struct anon_vma *anon_vma = data; + struct anon_vma *anon_vma = data; - spin_lock_init(&anon_vma->lock); - INIT_LIST_HEAD(&anon_vma->head); - } + spin_lock_init(&anon_vma->lock); + INIT_LIST_HEAD(&anon_vma->head); } void __init anon_vma_init(void) @@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep, { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { - inode_init_once(&p->vfs_inode); + inode_init_once(&p->vfs_inode); #ifdef CONFIG_TMPFS_POSIX_ACL - p->i_acl = NULL; - p->i_default_acl = NULL; + p->i_acl = NULL; + p->i_default_acl = NULL; #endif - } } static int init_inodecache(void) @@ -2610,7 +2610,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp, unsigned long ctor_flags) + struct slab *slabp) { int i; @@ -2634,7 +2634,7 @@ static void cache_init_objs(struct kmem_cache *cachep, */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) cachep->ctor(objp + obj_offset(cachep), cachep, - ctor_flags); + 0); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2650,7 +2650,7 @@ static void cache_init_objs(struct kmem_cache *cachep, cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + cachep->ctor(objp, cachep, 0); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -2739,7 +2739,6 @@ static int cache_grow(struct kmem_cache *cachep, struct slab *slabp; size_t offset; gfp_t local_flags; - unsigned long ctor_flags; struct kmem_list3 *l3; /* @@ -2748,7 +2747,6 @@ static int cache_grow(struct kmem_cache *cachep, */ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); - ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); @@ -2793,7 +2791,7 @@ static int cache_grow(struct kmem_cache *cachep, slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -3077,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); + cachep->ctor(objp, cachep, 0); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", @@ -327,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) b = (void *)__get_free_pages(flags, get_order(c->size)); if (c->ctor) - c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); + c->ctor(b, c, 0); return b; } @@ -994,7 +994,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, } if (unlikely(s->ctor)) - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); + s->ctor(object, s, 0); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |