From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- fs/proc/task_nommu.c | 164 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 fs/proc/task_nommu.c (limited to 'fs/proc/task_nommu.c') diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c new file mode 100644 index 0000000..8f68827 --- /dev/null +++ b/fs/proc/task_nommu.c @@ -0,0 +1,164 @@ + +#include +#include +#include +#include +#include "internal.h" + +/* + * Logic: we've got two memory sums for each process, "shared", and + * "non-shared". Shared memory may get counted more then once, for + * each process that owns it. Non-shared memory is counted + * accurately. + */ +char *task_mem(struct mm_struct *mm, char *buffer) +{ + struct vm_list_struct *vml; + unsigned long bytes = 0, sbytes = 0, slack = 0; + + down_read(&mm->mmap_sem); + for (vml = mm->context.vmlist; vml; vml = vml->next) { + if (!vml->vma) + continue; + + bytes += kobjsize(vml); + if (atomic_read(&mm->mm_count) > 1 || + atomic_read(&vml->vma->vm_usage) > 1 + ) { + sbytes += kobjsize((void *) vml->vma->vm_start); + sbytes += kobjsize(vml->vma); + } else { + bytes += kobjsize((void *) vml->vma->vm_start); + bytes += kobjsize(vml->vma); + slack += kobjsize((void *) vml->vma->vm_start) - + (vml->vma->vm_end - vml->vma->vm_start); + } + } + + if (atomic_read(&mm->mm_count) > 1) + sbytes += kobjsize(mm); + else + bytes += kobjsize(mm); + + if (current->fs && atomic_read(¤t->fs->count) > 1) + sbytes += kobjsize(current->fs); + else + bytes += kobjsize(current->fs); + + if (current->files && atomic_read(¤t->files->count) > 1) + sbytes += kobjsize(current->files); + else + bytes += kobjsize(current->files); + + if (current->sighand && atomic_read(¤t->sighand->count) > 1) + sbytes += kobjsize(current->sighand); + else + bytes += kobjsize(current->sighand); + + bytes += kobjsize(current); /* includes kernel stack */ + + buffer += sprintf(buffer, + "Mem:\t%8lu bytes\n" + "Slack:\t%8lu bytes\n" + "Shared:\t%8lu bytes\n", + bytes, slack, sbytes); + + up_read(&mm->mmap_sem); + return buffer; +} + +unsigned long task_vsize(struct mm_struct *mm) +{ + struct vm_list_struct *tbp; + unsigned long vsize = 0; + + down_read(&mm->mmap_sem); + for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { + if (tbp->vma) + vsize += kobjsize((void *) tbp->vma->vm_start); + } + up_read(&mm->mmap_sem); + return vsize; +} + +int task_statm(struct mm_struct *mm, int *shared, int *text, + int *data, int *resident) +{ + struct vm_list_struct *tbp; + int size = kobjsize(mm); + + down_read(&mm->mmap_sem); + for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { + size += kobjsize(tbp); + if (tbp->vma) { + size += kobjsize(tbp->vma); + size += kobjsize((void *) tbp->vma->vm_start); + } + } + + size += (*text = mm->end_code - mm->start_code); + size += (*data = mm->start_stack - mm->start_data); + up_read(&mm->mmap_sem); + *resident = size; + return size; +} + +int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) +{ + struct vm_list_struct *vml; + struct vm_area_struct *vma; + struct task_struct *task = proc_task(inode); + struct mm_struct *mm = get_task_mm(task); + int result = -ENOENT; + + if (!mm) + goto out; + down_read(&mm->mmap_sem); + + vml = mm->context.vmlist; + vma = NULL; + while (vml) { + if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) { + vma = vml->vma; + break; + } + vml = vml->next; + } + + if (vma) { + *mnt = mntget(vma->vm_file->f_vfsmnt); + *dentry = dget(vma->vm_file->f_dentry); + result = 0; + } + + up_read(&mm->mmap_sem); + mmput(mm); +out: + return result; +} + +/* + * Albert D. Cahalan suggested to fake entries for the traditional + * sections here. This might be worth investigating. + */ +static int show_map(struct seq_file *m, void *v) +{ + return 0; +} +static void *m_start(struct seq_file *m, loff_t *pos) +{ + return NULL; +} +static void m_stop(struct seq_file *m, void *v) +{ +} +static void *m_next(struct seq_file *m, void *v, loff_t *pos) +{ + return NULL; +} +struct seq_operations proc_pid_maps_op = { + .start = m_start, + .next = m_next, + .stop = m_stop, + .show = show_map +}; -- cgit v1.1