diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/.gitignore | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 20 | ||||
-rw-r--r-- | lib/bitmap.c | 22 | ||||
-rw-r--r-- | lib/genalloc.c | 190 | ||||
-rw-r--r-- | lib/klist.c | 2 | ||||
-rw-r--r-- | lib/list_debug.c | 33 | ||||
-rw-r--r-- | lib/plist.c | 7 | ||||
-rw-r--r-- | lib/raid6/.gitignore | 4 | ||||
-rw-r--r-- | lib/ts_fsm.c | 2 |
9 files changed, 170 insertions, 116 deletions
diff --git a/lib/.gitignore b/lib/.gitignore deleted file mode 100644 index 3bef1ea..0000000 --- a/lib/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# -# Generated files -# -gen_crc32table -crc32table.h - diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dd373c8..26af27a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -10,6 +10,23 @@ config PRINTK_TIME in kernel startup. Or add printk.time=1 at boot-time. See Documentation/kernel-parameters.txt +config PRINTK_CPU_ID + bool "Show cpu id on printks" + depends on PRINTK + help + Selecting this option causes current printk cpu id to be + included in printk output. Or add printk.cpu=1 at boot-time. + See Documentation/kernel-parameters.txt + +config PRINTK_PID + bool "Show pid on printks" + depends on PRINTK + help + Selecting this option causes current task's pid to be + included in printk output. Or add printk.pid=1 at boot-time. + This allows you to see the running process with kernel log. + See Documentation/kernel-parameters.txt + config DEFAULT_MESSAGE_LOGLEVEL int "Default message log level (1-7)" range 1 7 @@ -667,8 +684,9 @@ config DEBUG_LOCKING_API_SELFTESTS mutexes and rwsems. config STACKTRACE - bool + bool "Stacktrace" depends on STACKTRACE_SUPPORT + default y config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" diff --git a/lib/bitmap.c b/lib/bitmap.c index 3f3b681..cf12bb8 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -315,30 +315,32 @@ void bitmap_clear(unsigned long *map, int start, int nr) } EXPORT_SYMBOL(bitmap_clear); -/* +/** * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area + * @align_offset: Alignment offset for zero area. * * The @align_mask should be one less than a power of 2; the effect is that - * the bit offset of all zero areas this function finds is multiples of that - * power of 2. A @align_mask of 0 means no alignment is required. + * the bit offset of all zero areas this function finds plus @align_offset + * is multiple of that power of 2. */ -unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask) +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ - index = __ALIGN_MASK(index, align_mask); + index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; end = index + nr; if (end > size) @@ -350,7 +352,7 @@ again: } return index; } -EXPORT_SYMBOL(bitmap_find_next_zero_area); +EXPORT_SYMBOL(bitmap_find_next_zero_area_off); /* * Bitmap printing & parsing functions: first version by Bill Irwin, diff --git a/lib/genalloc.c b/lib/genalloc.c index 577ddf8..b41dd90 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -16,23 +16,46 @@ #include <linux/genalloc.h> +/* General purpose special memory pool descriptor. */ +struct gen_pool { + rwlock_t lock; /* protects chunks list */ + struct list_head chunks; /* list of chunks in this pool */ + unsigned order; /* minimum allocation order */ +}; + +/* General purpose special memory pool chunk descriptor. */ +struct gen_pool_chunk { + spinlock_t lock; /* protects bits */ + struct list_head next_chunk; /* next chunk in pool */ + phys_addr_t phys_addr; /* physical starting address of memory chunk */ + unsigned long start; /* start of memory chunk */ + unsigned long size; /* number of bits */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ +}; + + /** - * gen_pool_create - create a new special memory pool - * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on, or -1 + * gen_pool_create() - create a new special memory pool + * @order: Log base 2 of number of bytes each bitmap bit + * represents. + * @nid: Node id of the node the pool structure should be allocated + * on, or -1. This will be also used for other allocations. * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ -struct gen_pool *gen_pool_create(int min_alloc_order, int nid) +struct gen_pool *__must_check gen_pool_create(unsigned order, int nid) { struct gen_pool *pool; - pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); - if (pool != NULL) { + if (WARN_ON(order >= BITS_PER_LONG)) + return NULL; + + pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid); + if (pool) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); - pool->min_alloc_order = min_alloc_order; + pool->order = order; } return pool; } @@ -40,33 +63,41 @@ EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_virt - add a new chunk of special memory to the pool - * @pool: pool to add new memory chunk to - * @virt: virtual starting address of memory chunk to add to pool - * @phys: physical starting address of memory chunk to add to pool - * @size: size in bytes of the memory chunk to add to pool - * @nid: node id of the node the chunk structure and bitmap should be - * allocated on, or -1 + * @pool: Pool to add new memory chunk to + * @virt: Virtual starting address of memory chunk to add to pool + * @phys: Physical starting address of memory chunk to add to pool + * @size: Size in bytes of the memory chunk to add to pool + * @nid: Node id of the node the chunk structure and bitmap should be + * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ -int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, - size_t size, int nid) +int __must_check +gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, + size_t size, int nid) { struct gen_pool_chunk *chunk; - int nbits = size >> pool->min_alloc_order; - int nbytes = sizeof(struct gen_pool_chunk) + - (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + size_t nbytes; - chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); - if (unlikely(chunk == NULL)) + if (WARN_ON(!virt || virt + size < virt || + (virt & ((1 << pool->order) - 1)))) + return -EINVAL; + + size = size >> pool->order; + if (WARN_ON(!size)) + return -EINVAL; + + nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits; + chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + if (!chunk) return -ENOMEM; spin_lock_init(&chunk->lock); + chunk->start = virt >> pool->order; + chunk->size = size; chunk->phys_addr = phys; - chunk->start_addr = virt; - chunk->end_addr = virt + size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); @@ -90,10 +121,12 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { + unsigned long start_addr; chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); - if (addr >= chunk->start_addr && addr < chunk->end_addr) - return chunk->phys_addr + addr - chunk->start_addr; + start_addr = chunk->start << pool->order; + if (addr >= start_addr && addr < start_addr + chunk->size) + return chunk->phys_addr + addr - start_addr; } read_unlock(&pool->lock); @@ -102,115 +135,116 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) EXPORT_SYMBOL(gen_pool_virt_to_phys); /** - * gen_pool_destroy - destroy a special memory pool - * @pool: pool to destroy + * gen_pool_destroy() - destroy a special memory pool + * @pool: Pool to destroy. * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { - struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; - int order = pool->min_alloc_order; - int bit, end_bit; - + int bit; - list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + while (!list_empty(&pool->chunks)) { + chunk = list_entry(pool->chunks.next, struct gen_pool_chunk, + next_chunk); list_del(&chunk->next_chunk); - end_bit = (chunk->end_addr - chunk->start_addr) >> order; - bit = find_next_bit(chunk->bits, end_bit, 0); - BUG_ON(bit < end_bit); + bit = find_next_bit(chunk->bits, chunk->size, 0); + BUG_ON(bit < chunk->size); kfree(chunk); } kfree(pool); - return; } EXPORT_SYMBOL(gen_pool_destroy); /** - * gen_pool_alloc - allocate special memory from the pool - * @pool: pool to allocate from - * @size: number of bytes to allocate from the pool + * gen_pool_alloc_aligned() - allocate special memory from the pool + * @pool: Pool to allocate from. + * @size: Number of bytes to allocate from the pool. + * @alignment_order: Order the allocated space should be + * aligned to (eg. 20 means allocated space + * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ -unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +unsigned long __must_check +gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, + unsigned alignment_order) { - struct list_head *_chunk; + unsigned long addr, align_mask = 0, flags, start; struct gen_pool_chunk *chunk; - unsigned long addr, flags; - int order = pool->min_alloc_order; - int nbits, start_bit, end_bit; if (size == 0) return 0; - nbits = (size + (1UL << order) - 1) >> order; + if (alignment_order > pool->order) + align_mask = (1 << (alignment_order - pool->order)) - 1; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + size = (size + (1UL << pool->order) - 1) >> pool->order; - end_bit = (chunk->end_addr - chunk->start_addr) >> order; + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) { + if (chunk->size < size) + continue; spin_lock_irqsave(&chunk->lock, flags); - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, - nbits, 0); - if (start_bit >= end_bit) { + start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size, + 0, size, align_mask, + chunk->start); + if (start >= chunk->size) { spin_unlock_irqrestore(&chunk->lock, flags); continue; } - addr = chunk->start_addr + ((unsigned long)start_bit << order); - - bitmap_set(chunk->bits, start_bit, nbits); + bitmap_set(chunk->bits, start, size); spin_unlock_irqrestore(&chunk->lock, flags); - read_unlock(&pool->lock); - return addr; + addr = (chunk->start + start) << pool->order; + goto done; } + + addr = 0; +done: read_unlock(&pool->lock); - return 0; + return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_aligned); /** - * gen_pool_free - free allocated special memory back to the pool - * @pool: pool to free to - * @addr: starting address of memory to free back to pool - * @size: size in bytes of memory to free + * gen_pool_free() - free allocated special memory back to the pool + * @pool: Pool to free to. + * @addr: Starting address of memory to free back to pool. + * @size: Size in bytes of memory to free. * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { - struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; - int order = pool->min_alloc_order; - int bit, nbits; - nbits = (size + (1UL << order) - 1) >> order; + if (!size) + return; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + addr = addr >> pool->order; + size = (size + (1UL << pool->order) - 1) >> pool->order; - if (addr >= chunk->start_addr && addr < chunk->end_addr) { - BUG_ON(addr + size > chunk->end_addr); + BUG_ON(addr + size < addr); + + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) + if (addr >= chunk->start && + addr + size <= chunk->start + chunk->size) { spin_lock_irqsave(&chunk->lock, flags); - bit = (addr - chunk->start_addr) >> order; - while (nbits--) - __clear_bit(bit++, chunk->bits); + bitmap_clear(chunk->bits, addr - chunk->start, size); spin_unlock_irqrestore(&chunk->lock, flags); - break; + goto done; } - } - BUG_ON(nbits > 0); + BUG_ON(1); +done: read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free); diff --git a/lib/klist.c b/lib/klist.c index 573d606..d0b4b18 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -193,10 +193,10 @@ static void klist_release(struct kref *kref) if (waiter->node != n) continue; + list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); - list_del(&waiter->list); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); diff --git a/lib/list_debug.c b/lib/list_debug.c index b8029a5..50cac9e 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -9,6 +9,12 @@ #include <linux/module.h> #include <linux/list.h> +#ifdef CONFIG_SEC_DEBUG_LIST_CORRUPTION +static int list_debug = 0x00000100UL; +#else +static int list_debug; +#endif + /* * Insert a new entry between two known consecutive entries. * @@ -20,14 +26,20 @@ void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { - WARN(next->prev != prev, - "list_add corruption. next->prev should be " - "prev (%p), but was %p. (next=%p).\n", - prev, next->prev, next); - WARN(prev->next != next, - "list_add corruption. prev->next should be " - "next (%p), but was %p. (prev=%p).\n", - next, prev->next, prev); + if (list_debug) + BUG_ON(next->prev != prev); + else + WARN(next->prev != prev, + "list_add corruption. next->prev should be " + "prev (%p), but was %p. (next=%p).\n", + prev, next->prev, next); + if (list_debug) + BUG_ON(prev->next != next); + else + WARN(prev->next != next, + "list_add corruption. prev->next should be " + "next (%p), but was %p. (prev=%p).\n", + next, prev->next, prev); next->prev = new; new->next = next; new->prev = prev; @@ -53,8 +65,11 @@ void __list_del_entry(struct list_head *entry) "but was %p\n", entry, prev->next) || WARN(next->prev != entry, "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, next->prev)) + "but was %p\n", entry, next->prev)) { + if (list_debug) + BUG(); return; + } __list_del(prev, next); } diff --git a/lib/plist.c b/lib/plist.c index 0ae7e64..a0a4da4 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -56,11 +56,6 @@ static void plist_check_list(struct list_head *top) static void plist_check_head(struct plist_head *head) { - WARN_ON(head != &test_head && !head->rawlock && !head->spinlock); - if (head->rawlock) - WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); - if (head->spinlock) - WARN_ON_SMP(!spin_is_locked(head->spinlock)); if (!plist_head_empty(head)) plist_check_list(&plist_first(head)->prio_list); plist_check_list(&head->node_list); @@ -180,7 +175,7 @@ static int __init plist_test(void) unsigned int r = local_clock(); printk(KERN_INFO "start plist test\n"); - plist_head_init(&test_head, NULL); + plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore deleted file mode 100644 index 162beca..0000000 --- a/lib/raid6/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -mktables -altivec*.c -int*.c -tables.c diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index 5696a35..1f8dfea 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -141,7 +141,7 @@ static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state) struct ts_fsm_token *cur = NULL, *next; unsigned int match_start, block_idx = 0, tok_idx; unsigned block_len = 0, strict, consumed = state->offset; - const u8 *data; + const u8 *data = NULL; #define GET_NEXT_BLOCK() \ ({ consumed += block_idx; \ |