mlock.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * linux/mm/mlock.c
  3. *
  4. * (C) Copyright 1995 Linus Torvalds
  5. * (C) Copyright 2002 Christoph Hellwig
  6. */
  7. #include <linux/capability.h>
  8. #include <linux/mman.h>
  9. #include <linux/mm.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/syscalls.h>
  12. static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
  13. unsigned long start, unsigned long end, unsigned int newflags)
  14. {
  15. struct mm_struct * mm = vma->vm_mm;
  16. pgoff_t pgoff;
  17. int pages;
  18. int ret = 0;
  19. if (newflags == vma->vm_flags) {
  20. *prev = vma;
  21. goto out;
  22. }
  23. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  24. *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
  25. vma->vm_file, pgoff, vma_policy(vma));
  26. if (*prev) {
  27. vma = *prev;
  28. goto success;
  29. }
  30. *prev = vma;
  31. if (start != vma->vm_start) {
  32. ret = split_vma(mm, vma, start, 1);
  33. if (ret)
  34. goto out;
  35. }
  36. if (end != vma->vm_end) {
  37. ret = split_vma(mm, vma, end, 0);
  38. if (ret)
  39. goto out;
  40. }
  41. success:
  42. /*
  43. * vm_flags is protected by the mmap_sem held in write mode.
  44. * It's okay if try_to_unmap_one unmaps a page just after we
  45. * set VM_LOCKED, make_pages_present below will bring it back.
  46. */
  47. vma->vm_flags = newflags;
  48. /*
  49. * Keep track of amount of locked VM.
  50. */
  51. pages = (end - start) >> PAGE_SHIFT;
  52. if (newflags & VM_LOCKED) {
  53. pages = -pages;
  54. if (!(newflags & VM_IO))
  55. ret = make_pages_present(start, end);
  56. }
  57. mm->locked_vm -= pages;
  58. out:
  59. if (ret == -ENOMEM)
  60. ret = -EAGAIN;
  61. return ret;
  62. }
  63. static int do_mlock(unsigned long start, size_t len, int on)
  64. {
  65. unsigned long nstart, end, tmp;
  66. struct vm_area_struct * vma, * prev;
  67. int error;
  68. len = PAGE_ALIGN(len);
  69. end = start + len;
  70. if (end < start)
  71. return -EINVAL;
  72. if (end == start)
  73. return 0;
  74. vma = find_vma_prev(current->mm, start, &prev);
  75. if (!vma || vma->vm_start > start)
  76. return -ENOMEM;
  77. if (start > vma->vm_start)
  78. prev = vma;
  79. for (nstart = start ; ; ) {
  80. unsigned int newflags;
  81. /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
  82. newflags = vma->vm_flags | VM_LOCKED;
  83. if (!on)
  84. newflags &= ~VM_LOCKED;
  85. tmp = vma->vm_end;
  86. if (tmp > end)
  87. tmp = end;
  88. error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
  89. if (error)
  90. break;
  91. nstart = tmp;
  92. if (nstart < prev->vm_end)
  93. nstart = prev->vm_end;
  94. if (nstart >= end)
  95. break;
  96. vma = prev->vm_next;
  97. if (!vma || vma->vm_start != nstart) {
  98. error = -ENOMEM;
  99. break;
  100. }
  101. }
  102. return error;
  103. }
  104. asmlinkage long sys_mlock(unsigned long start, size_t len)
  105. {
  106. unsigned long locked;
  107. unsigned long lock_limit;
  108. int error = -ENOMEM;
  109. if (!can_do_mlock())
  110. return -EPERM;
  111. down_write(&current->mm->mmap_sem);
  112. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  113. start &= PAGE_MASK;
  114. locked = len >> PAGE_SHIFT;
  115. locked += current->mm->locked_vm;
  116. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  117. lock_limit >>= PAGE_SHIFT;
  118. /* check against resource limits */
  119. if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
  120. error = do_mlock(start, len, 1);
  121. up_write(&current->mm->mmap_sem);
  122. return error;
  123. }
  124. asmlinkage long sys_munlock(unsigned long start, size_t len)
  125. {
  126. int ret;
  127. down_write(&current->mm->mmap_sem);
  128. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  129. start &= PAGE_MASK;
  130. ret = do_mlock(start, len, 0);
  131. up_write(&current->mm->mmap_sem);
  132. return ret;
  133. }
  134. static int do_mlockall(int flags)
  135. {
  136. struct vm_area_struct * vma, * prev = NULL;
  137. unsigned int def_flags = 0;
  138. if (flags & MCL_FUTURE)
  139. def_flags = VM_LOCKED;
  140. current->mm->def_flags = def_flags;
  141. if (flags == MCL_FUTURE)
  142. goto out;
  143. for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
  144. unsigned int newflags;
  145. newflags = vma->vm_flags | VM_LOCKED;
  146. if (!(flags & MCL_CURRENT))
  147. newflags &= ~VM_LOCKED;
  148. /* Ignore errors */
  149. mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
  150. }
  151. out:
  152. return 0;
  153. }
  154. asmlinkage long sys_mlockall(int flags)
  155. {
  156. unsigned long lock_limit;
  157. int ret = -EINVAL;
  158. if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
  159. goto out;
  160. ret = -EPERM;
  161. if (!can_do_mlock())
  162. goto out;
  163. down_write(&current->mm->mmap_sem);
  164. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  165. lock_limit >>= PAGE_SHIFT;
  166. ret = -ENOMEM;
  167. if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
  168. capable(CAP_IPC_LOCK))
  169. ret = do_mlockall(flags);
  170. up_write(&current->mm->mmap_sem);
  171. out:
  172. return ret;
  173. }
  174. asmlinkage long sys_munlockall(void)
  175. {
  176. int ret;
  177. down_write(&current->mm->mmap_sem);
  178. ret = do_mlockall(0);
  179. up_write(&current->mm->mmap_sem);
  180. return ret;
  181. }
  182. /*
  183. * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
  184. * shm segments) get accounted against the user_struct instead.
  185. */
  186. static DEFINE_SPINLOCK(shmlock_user_lock);
  187. int user_shm_lock(size_t size, struct user_struct *user)
  188. {
  189. unsigned long lock_limit, locked;
  190. int allowed = 0;
  191. locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  192. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  193. lock_limit >>= PAGE_SHIFT;
  194. spin_lock(&shmlock_user_lock);
  195. if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
  196. goto out;
  197. get_uid(user);
  198. user->locked_shm += locked;
  199. allowed = 1;
  200. out:
  201. spin_unlock(&shmlock_user_lock);
  202. return allowed;
  203. }
  204. void user_shm_unlock(size_t size, struct user_struct *user)
  205. {
  206. spin_lock(&shmlock_user_lock);
  207. user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  208. spin_unlock(&shmlock_user_lock);
  209. free_uid(user);
  210. }