diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 21 | ||||
-rw-r--r-- | mm/filemap_xip.c | 22 | ||||
-rw-r--r-- | mm/hugetlb.c | 7 | ||||
-rw-r--r-- | mm/memory.c | 13 | ||||
-rw-r--r-- | mm/mmap.c | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 24 | ||||
-rw-r--r-- | mm/shmem.c | 42 | ||||
-rw-r--r-- | mm/slab.c | 33 | ||||
-rw-r--r-- | mm/slub.c | 37 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
11 files changed, 86 insertions, 126 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index edb1b0b..c6ebd9f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1245,26 +1245,6 @@ int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long o return written; } -ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, - size_t count, read_actor_t actor, void *target) -{ - read_descriptor_t desc; - - if (!count) - return 0; - - desc.written = 0; - desc.count = count; - desc.arg.data = target; - desc.error = 0; - - do_generic_file_read(in_file, ppos, &desc, actor); - if (desc.written) - return desc.written; - return desc.error; -} -EXPORT_SYMBOL(generic_file_sendfile); - static ssize_t do_readahead(struct address_space *mapping, struct file *filp, unsigned long index, unsigned long nr) @@ -1786,7 +1766,6 @@ retry: page = __read_cache_page(mapping, index, filler, data); if (IS_ERR(page)) return page; - mark_page_accessed(page); if (PageUptodate(page)) goto out; diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index fa360e5..65ffc32 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -159,28 +159,6 @@ xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) } EXPORT_SYMBOL_GPL(xip_file_read); -ssize_t -xip_file_sendfile(struct file *in_file, loff_t *ppos, - size_t count, read_actor_t actor, void *target) -{ - read_descriptor_t desc; - - if (!count) - return 0; - - desc.written = 0; - desc.count = count; - desc.arg.data = target; - desc.error = 0; - - do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file, - ppos, &desc, actor); - if (desc.written) - return desc.written; - return desc.error; -} -EXPORT_SYMBOL_GPL(xip_file_sendfile); - /* * __xip_unmap is invoked from xip_unmap and * xip_write diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eb7180d..a45d1f0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -326,9 +326,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, pte_t entry; entry = pte_mkwrite(pte_mkdirty(*ptep)); - ptep_set_access_flags(vma, address, ptep, entry, 1); - update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); + if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); + } } diff --git a/mm/memory.c b/mm/memory.c index cb94488..f64cbf9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1691,9 +1691,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); - ptep_set_access_flags(vma, address, page_table, entry, 1); - update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); + if (ptep_set_access_flags(vma, address, page_table, entry,1)) { + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); + } ret |= VM_FAULT_WRITE; goto unlock; } @@ -2525,10 +2526,9 @@ static inline int handle_pte_fault(struct mm_struct *mm, pte_t *pte, pmd_t *pmd, int write_access) { pte_t entry; - pte_t old_entry; spinlock_t *ptl; - old_entry = entry = *pte; + entry = *pte; if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) { @@ -2561,8 +2561,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); - if (!pte_same(old_entry, entry)) { - ptep_set_access_flags(vma, address, pte, entry, write_access); + if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); } else { @@ -1536,9 +1536,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. + * Also guard against wrapping around to address 0. */ - address += 4 + PAGE_SIZE - 1; - address &= PAGE_MASK; + if (address < PAGE_ALIGN(address+4)) + address = PAGE_ALIGN(address+4); + else { + anon_vma_unlock(vma); + return -ENOMEM; + } error = 0; /* Somebody else might have raced and expanded it already */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bd8e335..05ace44 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1968,7 +1968,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif -static int __cpuinit zone_batchsize(struct zone *zone) +static int __devinit zone_batchsize(struct zone *zone) { int batch; @@ -53,24 +53,6 @@ struct kmem_cache *anon_vma_cachep; -static inline void validate_anon_vma(struct vm_area_struct *find_vma) -{ -#ifdef CONFIG_DEBUG_VM - struct anon_vma *anon_vma = find_vma->anon_vma; - struct vm_area_struct *vma; - unsigned int mapcount = 0; - int found = 0; - - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - mapcount++; - BUG_ON(mapcount > 100000); - if (vma == find_vma) - found = 1; - } - BUG_ON(!found); -#endif -} - /* This must be called under the mmap_sem. */ int anon_vma_prepare(struct vm_area_struct *vma) { @@ -121,10 +103,8 @@ void __anon_vma_link(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) { + if (anon_vma) list_add_tail(&vma->anon_vma_node, &anon_vma->head); - validate_anon_vma(vma); - } } void anon_vma_link(struct vm_area_struct *vma) @@ -134,7 +114,6 @@ void anon_vma_link(struct vm_area_struct *vma) if (anon_vma) { spin_lock(&anon_vma->lock); list_add_tail(&vma->anon_vma_node, &anon_vma->head); - validate_anon_vma(vma); spin_unlock(&anon_vma->lock); } } @@ -148,7 +127,6 @@ void anon_vma_unlink(struct vm_area_struct *vma) return; spin_lock(&anon_vma->lock); - validate_anon_vma(vma); list_del(&vma->anon_vma_node); /* We must garbage collect the anon_vma if it's empty */ @@ -1100,9 +1100,9 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, * Normally, filepage is NULL on entry, and either found * uptodate immediately, or allocated and zeroed, or read * in under swappage, which is then assigned to filepage. - * But shmem_prepare_write passes in a locked filepage, - * which may be found not uptodate by other callers too, - * and may need to be copied from the swappage read in. + * But shmem_readpage and shmem_prepare_write pass in a locked + * filepage, which may be found not uptodate by other callers + * too, and may need to be copied from the swappage read in. */ repeat: if (!filepage) @@ -1485,9 +1485,18 @@ static const struct inode_operations shmem_symlink_inode_operations; static const struct inode_operations shmem_symlink_inline_operations; /* - * Normally tmpfs makes no use of shmem_prepare_write, but it - * lets a tmpfs file be used read-write below the loop driver. + * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write; + * but providing them allows a tmpfs file to be used for splice, sendfile, and + * below the loop driver, in the generic fashion that many filesystems support. */ +static int shmem_readpage(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); + unlock_page(page); + return error; +} + static int shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) { @@ -1711,25 +1720,6 @@ static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count return desc.error; } -static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, - size_t count, read_actor_t actor, void *target) -{ - read_descriptor_t desc; - - if (!count) - return 0; - - desc.written = 0; - desc.count = count; - desc.arg.data = target; - desc.error = 0; - - do_shmem_file_read(in_file, ppos, &desc, actor); - if (desc.written) - return desc.written; - return desc.error; -} - static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) { struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); @@ -2386,6 +2376,7 @@ static const struct address_space_operations shmem_aops = { .writepage = shmem_writepage, .set_page_dirty = __set_page_dirty_no_writeback, #ifdef CONFIG_TMPFS + .readpage = shmem_readpage, .prepare_write = shmem_prepare_write, .commit_write = simple_commit_write, #endif @@ -2399,7 +2390,8 @@ static const struct file_operations shmem_file_operations = { .read = shmem_file_read, .write = shmem_file_write, .fsync = simple_sync_file, - .sendfile = shmem_file_sendfile, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, #endif }; @@ -137,6 +137,7 @@ /* Shouldn't this be in a header file somewhere? */ #define BYTES_PER_WORD sizeof(void *) +#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) #ifndef cache_line_size #define cache_line_size() L1_CACHE_BYTES @@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) if (cachep->flags & SLAB_STORE_USER) return (unsigned long long *)(objp + cachep->buffer_size - sizeof(unsigned long long) - - BYTES_PER_WORD); + REDZONE_ALIGN); return (unsigned long long *) (objp + cachep->buffer_size - sizeof(unsigned long long)); } @@ -774,7 +775,6 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif - WARN_ON_ONCE(size == 0); while (size > csizep->cs_size) csizep++; @@ -2179,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */ - if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) + if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + + 2 * sizeof(unsigned long long))) flags |= SLAB_RED_ZONE | SLAB_STORE_USER; if (!(flags & SLAB_DESTROY_BY_RCU)) flags |= SLAB_POISON; @@ -2220,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align, } /* - * Redzoning and user store require word alignment. Note this will be - * overridden by architecture or caller mandated alignment if either - * is greater than BYTES_PER_WORD. + * Redzoning and user store require word alignment or possibly larger. + * Note this will be overridden by architecture or caller mandated + * alignment if either is greater than BYTES_PER_WORD. */ - if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) - ralign = __alignof__(unsigned long long); + if (flags & SLAB_STORE_USER) + ralign = BYTES_PER_WORD; + + if (flags & SLAB_RED_ZONE) { + ralign = REDZONE_ALIGN; + /* If redzoning, ensure that the second redzone is suitably + * aligned, by adjusting the object size accordingly. */ + size += REDZONE_ALIGN - 1; + size &= ~(REDZONE_ALIGN - 1); + } /* 2) arch mandated alignment */ if (ralign < ARCH_SLAB_MINALIGN) { @@ -2262,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of - * the real object. + * the real object. But if the second red zone needs to be + * aligned to 64 bits, we must allow that much space. */ - size += BYTES_PER_WORD; + if (flags & SLAB_RED_ZONE) + size += REDZONE_ALIGN; + else + size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size @@ -1798,8 +1798,6 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); - /* new_slab() disables interupts */ - local_irq_enable(); BUG_ON(!page); n = page->freelist; @@ -1811,6 +1809,12 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); add_partial(n, page); + + /* + * new_slab() disables interupts. If we do not reenable interrupts here + * then bootup would continue with interrupts disabled. + */ + local_irq_enable(); return n; } @@ -2016,7 +2020,6 @@ error: s->offset, flags); return 0; } -EXPORT_SYMBOL(kmem_cache_open); /* * Check if a given pointer is valid @@ -2436,6 +2439,7 @@ EXPORT_SYMBOL(krealloc); void __init kmem_cache_init(void) { int i; + int caches = 0; #ifdef CONFIG_NUMA /* @@ -2446,20 +2450,29 @@ void __init kmem_cache_init(void) create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); kmalloc_caches[0].refcount = -1; + caches++; #endif /* Able to allocate the per node structures */ slab_state = PARTIAL; /* Caches that are not of the two-to-the-power-of size */ - create_kmalloc_cache(&kmalloc_caches[1], + if (KMALLOC_MIN_SIZE <= 64) { + create_kmalloc_cache(&kmalloc_caches[1], "kmalloc-96", 96, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[2], + caches++; + } + if (KMALLOC_MIN_SIZE <= 128) { + create_kmalloc_cache(&kmalloc_caches[2], "kmalloc-192", 192, GFP_KERNEL); + caches++; + } - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); + caches++; + } slab_state = UP; @@ -2476,8 +2489,8 @@ void __init kmem_cache_init(void) nr_cpu_ids * sizeof(struct page *); printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," - " Processors=%d, Nodes=%d\n", - KMALLOC_SHIFT_HIGH, cache_line_size(), + " CPUs=%d, Nodes=%d\n", + caches, cache_line_size(), slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); } @@ -2867,7 +2880,7 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max) order = get_order(sizeof(struct location) * max); - l = (void *)__get_free_pages(GFP_KERNEL, order); + l = (void *)__get_free_pages(GFP_ATOMIC, order); if (!l) return 0; @@ -3032,13 +3045,15 @@ static int list_locations(struct kmem_cache *s, char *buf, n += sprintf(buf + n, " pid=%ld", l->min_pid); - if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) { + if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && + n < PAGE_SIZE - 60) { n += sprintf(buf + n, " cpus="); n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50, l->cpus); } - if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) { + if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && + n < PAGE_SIZE - 60) { n += sprintf(buf + n, " nodes="); n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50, l->nodes); diff --git a/mm/vmstat.c b/mm/vmstat.c index 3825429..eceaf49 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -477,8 +477,8 @@ const struct seq_operations fragmentation_op = { static const char * const vmstat_text[] = { /* Zoned VM counters */ "nr_free_pages", - "nr_active", "nr_inactive", + "nr_active", "nr_anon_pages", "nr_mapped", "nr_file_pages", |