task_nommu.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. #include <linux/mm.h>
  2. #include <linux/file.h>
  3. #include <linux/mount.h>
  4. #include <linux/seq_file.h>
  5. #include "internal.h"
  6. /*
  7. * Logic: we've got two memory sums for each process, "shared", and
  8. * "non-shared". Shared memory may get counted more then once, for
  9. * each process that owns it. Non-shared memory is counted
  10. * accurately.
  11. */
  12. char *task_mem(struct mm_struct *mm, char *buffer)
  13. {
  14. struct vm_list_struct *vml;
  15. unsigned long bytes = 0, sbytes = 0, slack = 0;
  16. down_read(&mm->mmap_sem);
  17. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  18. if (!vml->vma)
  19. continue;
  20. bytes += kobjsize(vml);
  21. if (atomic_read(&mm->mm_count) > 1 ||
  22. atomic_read(&vml->vma->vm_usage) > 1
  23. ) {
  24. sbytes += kobjsize((void *) vml->vma->vm_start);
  25. sbytes += kobjsize(vml->vma);
  26. } else {
  27. bytes += kobjsize((void *) vml->vma->vm_start);
  28. bytes += kobjsize(vml->vma);
  29. slack += kobjsize((void *) vml->vma->vm_start) -
  30. (vml->vma->vm_end - vml->vma->vm_start);
  31. }
  32. }
  33. if (atomic_read(&mm->mm_count) > 1)
  34. sbytes += kobjsize(mm);
  35. else
  36. bytes += kobjsize(mm);
  37. if (current->fs && atomic_read(&current->fs->count) > 1)
  38. sbytes += kobjsize(current->fs);
  39. else
  40. bytes += kobjsize(current->fs);
  41. if (current->files && atomic_read(&current->files->count) > 1)
  42. sbytes += kobjsize(current->files);
  43. else
  44. bytes += kobjsize(current->files);
  45. if (current->sighand && atomic_read(&current->sighand->count) > 1)
  46. sbytes += kobjsize(current->sighand);
  47. else
  48. bytes += kobjsize(current->sighand);
  49. bytes += kobjsize(current); /* includes kernel stack */
  50. buffer += sprintf(buffer,
  51. "Mem:\t%8lu bytes\n"
  52. "Slack:\t%8lu bytes\n"
  53. "Shared:\t%8lu bytes\n",
  54. bytes, slack, sbytes);
  55. up_read(&mm->mmap_sem);
  56. return buffer;
  57. }
  58. unsigned long task_vsize(struct mm_struct *mm)
  59. {
  60. struct vm_list_struct *tbp;
  61. unsigned long vsize = 0;
  62. down_read(&mm->mmap_sem);
  63. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  64. if (tbp->vma)
  65. vsize += kobjsize((void *) tbp->vma->vm_start);
  66. }
  67. up_read(&mm->mmap_sem);
  68. return vsize;
  69. }
  70. int task_statm(struct mm_struct *mm, int *shared, int *text,
  71. int *data, int *resident)
  72. {
  73. struct vm_list_struct *tbp;
  74. int size = kobjsize(mm);
  75. down_read(&mm->mmap_sem);
  76. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  77. size += kobjsize(tbp);
  78. if (tbp->vma) {
  79. size += kobjsize(tbp->vma);
  80. size += kobjsize((void *) tbp->vma->vm_start);
  81. }
  82. }
  83. size += (*text = mm->end_code - mm->start_code);
  84. size += (*data = mm->start_stack - mm->start_data);
  85. up_read(&mm->mmap_sem);
  86. *resident = size;
  87. return size;
  88. }
  89. int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
  90. {
  91. struct vm_list_struct *vml;
  92. struct vm_area_struct *vma;
  93. struct task_struct *task = get_proc_task(inode);
  94. struct mm_struct *mm = get_task_mm(task);
  95. int result = -ENOENT;
  96. if (!mm)
  97. goto out;
  98. down_read(&mm->mmap_sem);
  99. vml = mm->context.vmlist;
  100. vma = NULL;
  101. while (vml) {
  102. if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
  103. vma = vml->vma;
  104. break;
  105. }
  106. vml = vml->next;
  107. }
  108. if (vma) {
  109. *mnt = mntget(vma->vm_file->f_path.mnt);
  110. *dentry = dget(vma->vm_file->f_path.dentry);
  111. result = 0;
  112. }
  113. up_read(&mm->mmap_sem);
  114. mmput(mm);
  115. out:
  116. return result;
  117. }
  118. /*
  119. * display mapping lines for a particular process's /proc/pid/maps
  120. */
  121. static int show_map(struct seq_file *m, void *_vml)
  122. {
  123. struct vm_list_struct *vml = _vml;
  124. return nommu_vma_show(m, vml->vma);
  125. }
  126. static void *m_start(struct seq_file *m, loff_t *pos)
  127. {
  128. struct proc_maps_private *priv = m->private;
  129. struct vm_list_struct *vml;
  130. struct mm_struct *mm;
  131. loff_t n = *pos;
  132. /* pin the task and mm whilst we play with them */
  133. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  134. if (!priv->task)
  135. return NULL;
  136. mm = get_task_mm(priv->task);
  137. if (!mm) {
  138. put_task_struct(priv->task);
  139. priv->task = NULL;
  140. return NULL;
  141. }
  142. down_read(&mm->mmap_sem);
  143. /* start from the Nth VMA */
  144. for (vml = mm->context.vmlist; vml; vml = vml->next)
  145. if (n-- == 0)
  146. return vml;
  147. return NULL;
  148. }
  149. static void m_stop(struct seq_file *m, void *_vml)
  150. {
  151. struct proc_maps_private *priv = m->private;
  152. if (priv->task) {
  153. struct mm_struct *mm = priv->task->mm;
  154. up_read(&mm->mmap_sem);
  155. mmput(mm);
  156. put_task_struct(priv->task);
  157. }
  158. }
  159. static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
  160. {
  161. struct vm_list_struct *vml = _vml;
  162. (*pos)++;
  163. return vml ? vml->next : NULL;
  164. }
  165. static struct seq_operations proc_pid_maps_ops = {
  166. .start = m_start,
  167. .next = m_next,
  168. .stop = m_stop,
  169. .show = show_map
  170. };
  171. static int maps_open(struct inode *inode, struct file *file)
  172. {
  173. struct proc_maps_private *priv;
  174. int ret = -ENOMEM;
  175. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  176. if (priv) {
  177. priv->pid = proc_pid(inode);
  178. ret = seq_open(file, &proc_pid_maps_ops);
  179. if (!ret) {
  180. struct seq_file *m = file->private_data;
  181. m->private = priv;
  182. } else {
  183. kfree(priv);
  184. }
  185. }
  186. return ret;
  187. }
  188. const struct file_operations proc_maps_operations = {
  189. .open = maps_open,
  190. .read = seq_read,
  191. .llseek = seq_lseek,
  192. .release = seq_release_private,
  193. };