shm.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/mutex.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. struct shm_file_data {
  43. int id;
  44. struct ipc_namespace *ns;
  45. struct file *file;
  46. const struct vm_operations_struct *vm_ops;
  47. };
  48. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  49. static const struct file_operations shm_file_operations;
  50. static struct vm_operations_struct shm_vm_ops;
  51. static struct ipc_ids init_shm_ids;
  52. #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
  53. #define shm_lock(ns, id) \
  54. ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
  55. #define shm_unlock(shp) \
  56. ipc_unlock(&(shp)->shm_perm)
  57. #define shm_get(ns, id) \
  58. ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
  59. #define shm_buildid(ns, id, seq) \
  60. ipc_buildid(&shm_ids(ns), id, seq)
  61. static int newseg (struct ipc_namespace *ns, key_t key,
  62. int shmflg, size_t size);
  63. static void shm_open(struct vm_area_struct *vma);
  64. static void shm_close(struct vm_area_struct *vma);
  65. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  66. #ifdef CONFIG_PROC_FS
  67. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  68. #endif
  69. static void __ipc_init __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
  70. {
  71. ns->ids[IPC_SHM_IDS] = ids;
  72. ns->shm_ctlmax = SHMMAX;
  73. ns->shm_ctlall = SHMALL;
  74. ns->shm_ctlmni = SHMMNI;
  75. ns->shm_tot = 0;
  76. ipc_init_ids(ids, 1);
  77. }
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  79. {
  80. if (shp->shm_nattch){
  81. shp->shm_perm.mode |= SHM_DEST;
  82. /* Do not find it any more */
  83. shp->shm_perm.key = IPC_PRIVATE;
  84. shm_unlock(shp);
  85. } else
  86. shm_destroy(ns, shp);
  87. }
  88. #ifdef CONFIG_IPC_NS
  89. int shm_init_ns(struct ipc_namespace *ns)
  90. {
  91. struct ipc_ids *ids;
  92. ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
  93. if (ids == NULL)
  94. return -ENOMEM;
  95. __shm_init_ns(ns, ids);
  96. return 0;
  97. }
  98. void shm_exit_ns(struct ipc_namespace *ns)
  99. {
  100. int i;
  101. struct shmid_kernel *shp;
  102. mutex_lock(&shm_ids(ns).mutex);
  103. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  104. shp = shm_lock(ns, i);
  105. if (shp == NULL)
  106. continue;
  107. do_shm_rmid(ns, shp);
  108. }
  109. mutex_unlock(&shm_ids(ns).mutex);
  110. ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
  111. kfree(ns->ids[IPC_SHM_IDS]);
  112. ns->ids[IPC_SHM_IDS] = NULL;
  113. }
  114. #endif
  115. void __init shm_init (void)
  116. {
  117. __shm_init_ns(&init_ipc_ns, &init_shm_ids);
  118. ipc_init_proc_interface("sysvipc/shm",
  119. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
  120. IPC_SHM_IDS, sysvipc_shm_proc_show);
  121. }
  122. static inline int shm_checkid(struct ipc_namespace *ns,
  123. struct shmid_kernel *s, int id)
  124. {
  125. if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id))
  126. return -EIDRM;
  127. return 0;
  128. }
  129. static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id)
  130. {
  131. return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id);
  132. }
  133. static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
  134. {
  135. return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  136. }
  137. /* This is called by fork, once for every shm attach. */
  138. static void shm_open(struct vm_area_struct *vma)
  139. {
  140. struct file *file = vma->vm_file;
  141. struct shm_file_data *sfd = shm_file_data(file);
  142. struct shmid_kernel *shp;
  143. shp = shm_lock(sfd->ns, sfd->id);
  144. BUG_ON(!shp);
  145. shp->shm_atim = get_seconds();
  146. shp->shm_lprid = current->tgid;
  147. shp->shm_nattch++;
  148. shm_unlock(shp);
  149. }
  150. /*
  151. * shm_destroy - free the struct shmid_kernel
  152. *
  153. * @shp: struct to free
  154. *
  155. * It has to be called with shp and shm_ids.mutex locked,
  156. * but returns with shp unlocked and freed.
  157. */
  158. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  159. {
  160. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  161. shm_rmid(ns, shp->id);
  162. shm_unlock(shp);
  163. if (!is_file_hugepages(shp->shm_file))
  164. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  165. else
  166. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  167. shp->mlock_user);
  168. fput (shp->shm_file);
  169. security_shm_free(shp);
  170. ipc_rcu_putref(shp);
  171. }
  172. /*
  173. * remove the attach descriptor vma.
  174. * free memory for segment if it is marked destroyed.
  175. * The descriptor has already been removed from the current->mm->mmap list
  176. * and will later be kfree()d.
  177. */
  178. static void shm_close(struct vm_area_struct *vma)
  179. {
  180. struct file * file = vma->vm_file;
  181. struct shm_file_data *sfd = shm_file_data(file);
  182. struct shmid_kernel *shp;
  183. struct ipc_namespace *ns = sfd->ns;
  184. mutex_lock(&shm_ids(ns).mutex);
  185. /* remove from the list of attaches of the shm segment */
  186. shp = shm_lock(ns, sfd->id);
  187. BUG_ON(!shp);
  188. shp->shm_lprid = current->tgid;
  189. shp->shm_dtim = get_seconds();
  190. shp->shm_nattch--;
  191. if(shp->shm_nattch == 0 &&
  192. shp->shm_perm.mode & SHM_DEST)
  193. shm_destroy(ns, shp);
  194. else
  195. shm_unlock(shp);
  196. mutex_unlock(&shm_ids(ns).mutex);
  197. }
  198. static struct page *shm_nopage(struct vm_area_struct *vma,
  199. unsigned long address, int *type)
  200. {
  201. struct file *file = vma->vm_file;
  202. struct shm_file_data *sfd = shm_file_data(file);
  203. return sfd->vm_ops->nopage(vma, address, type);
  204. }
  205. #ifdef CONFIG_NUMA
  206. int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  207. {
  208. struct file *file = vma->vm_file;
  209. struct shm_file_data *sfd = shm_file_data(file);
  210. int err = 0;
  211. if (sfd->vm_ops->set_policy)
  212. err = sfd->vm_ops->set_policy(vma, new);
  213. return err;
  214. }
  215. struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr)
  216. {
  217. struct file *file = vma->vm_file;
  218. struct shm_file_data *sfd = shm_file_data(file);
  219. struct mempolicy *pol = NULL;
  220. if (sfd->vm_ops->get_policy)
  221. pol = sfd->vm_ops->get_policy(vma, addr);
  222. else
  223. pol = vma->vm_policy;
  224. return pol;
  225. }
  226. #endif
  227. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  228. {
  229. struct shm_file_data *sfd = shm_file_data(file);
  230. int ret;
  231. ret = sfd->file->f_op->mmap(sfd->file, vma);
  232. if (ret != 0)
  233. return ret;
  234. sfd->vm_ops = vma->vm_ops;
  235. vma->vm_ops = &shm_vm_ops;
  236. shm_open(vma);
  237. return ret;
  238. }
  239. static int shm_release(struct inode *ino, struct file *file)
  240. {
  241. struct shm_file_data *sfd = shm_file_data(file);
  242. put_ipc_ns(sfd->ns);
  243. shm_file_data(file) = NULL;
  244. kfree(sfd);
  245. return 0;
  246. }
  247. static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
  248. {
  249. int (*fsync) (struct file *, struct dentry *, int datasync);
  250. struct shm_file_data *sfd = shm_file_data(file);
  251. int ret = -EINVAL;
  252. fsync = sfd->file->f_op->fsync;
  253. if (fsync)
  254. ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
  255. return ret;
  256. }
  257. static unsigned long shm_get_unmapped_area(struct file *file,
  258. unsigned long addr, unsigned long len, unsigned long pgoff,
  259. unsigned long flags)
  260. {
  261. struct shm_file_data *sfd = shm_file_data(file);
  262. return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
  263. }
  264. int is_file_shm_hugepages(struct file *file)
  265. {
  266. int ret = 0;
  267. if (file->f_op == &shm_file_operations) {
  268. struct shm_file_data *sfd;
  269. sfd = shm_file_data(file);
  270. ret = is_file_hugepages(sfd->file);
  271. }
  272. return ret;
  273. }
  274. static const struct file_operations shm_file_operations = {
  275. .mmap = shm_mmap,
  276. .fsync = shm_fsync,
  277. .release = shm_release,
  278. .get_unmapped_area = shm_get_unmapped_area,
  279. };
  280. static struct vm_operations_struct shm_vm_ops = {
  281. .open = shm_open, /* callback for a new vm-area open */
  282. .close = shm_close, /* callback for when the vm-area is released */
  283. .nopage = shm_nopage,
  284. #if defined(CONFIG_NUMA)
  285. .set_policy = shm_set_policy,
  286. .get_policy = shm_get_policy,
  287. #endif
  288. };
  289. static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size)
  290. {
  291. int error;
  292. struct shmid_kernel *shp;
  293. int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  294. struct file * file;
  295. char name[13];
  296. int id;
  297. if (size < SHMMIN || size > ns->shm_ctlmax)
  298. return -EINVAL;
  299. if (ns->shm_tot + numpages > ns->shm_ctlall)
  300. return -ENOSPC;
  301. shp = ipc_rcu_alloc(sizeof(*shp));
  302. if (!shp)
  303. return -ENOMEM;
  304. shp->shm_perm.key = key;
  305. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  306. shp->mlock_user = NULL;
  307. shp->shm_perm.security = NULL;
  308. error = security_shm_alloc(shp);
  309. if (error) {
  310. ipc_rcu_putref(shp);
  311. return error;
  312. }
  313. if (shmflg & SHM_HUGETLB) {
  314. /* hugetlb_zero_setup takes care of mlock user accounting */
  315. file = hugetlb_zero_setup(size);
  316. shp->mlock_user = current->user;
  317. } else {
  318. int acctflag = VM_ACCOUNT;
  319. /*
  320. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  321. * if it's asked for.
  322. */
  323. if ((shmflg & SHM_NORESERVE) &&
  324. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  325. acctflag = 0;
  326. sprintf (name, "SYSV%08x", key);
  327. file = shmem_file_setup(name, size, acctflag);
  328. }
  329. error = PTR_ERR(file);
  330. if (IS_ERR(file))
  331. goto no_file;
  332. error = -ENOSPC;
  333. id = shm_addid(ns, shp);
  334. if(id == -1)
  335. goto no_id;
  336. shp->shm_cprid = current->tgid;
  337. shp->shm_lprid = 0;
  338. shp->shm_atim = shp->shm_dtim = 0;
  339. shp->shm_ctim = get_seconds();
  340. shp->shm_segsz = size;
  341. shp->shm_nattch = 0;
  342. shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
  343. shp->shm_file = file;
  344. ns->shm_tot += numpages;
  345. shm_unlock(shp);
  346. return shp->id;
  347. no_id:
  348. fput(file);
  349. no_file:
  350. security_shm_free(shp);
  351. ipc_rcu_putref(shp);
  352. return error;
  353. }
  354. asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
  355. {
  356. struct shmid_kernel *shp;
  357. int err, id = 0;
  358. struct ipc_namespace *ns;
  359. ns = current->nsproxy->ipc_ns;
  360. mutex_lock(&shm_ids(ns).mutex);
  361. if (key == IPC_PRIVATE) {
  362. err = newseg(ns, key, shmflg, size);
  363. } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) {
  364. if (!(shmflg & IPC_CREAT))
  365. err = -ENOENT;
  366. else
  367. err = newseg(ns, key, shmflg, size);
  368. } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
  369. err = -EEXIST;
  370. } else {
  371. shp = shm_lock(ns, id);
  372. BUG_ON(shp==NULL);
  373. if (shp->shm_segsz < size)
  374. err = -EINVAL;
  375. else if (ipcperms(&shp->shm_perm, shmflg))
  376. err = -EACCES;
  377. else {
  378. int shmid = shm_buildid(ns, id, shp->shm_perm.seq);
  379. err = security_shm_associate(shp, shmflg);
  380. if (!err)
  381. err = shmid;
  382. }
  383. shm_unlock(shp);
  384. }
  385. mutex_unlock(&shm_ids(ns).mutex);
  386. return err;
  387. }
  388. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  389. {
  390. switch(version) {
  391. case IPC_64:
  392. return copy_to_user(buf, in, sizeof(*in));
  393. case IPC_OLD:
  394. {
  395. struct shmid_ds out;
  396. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  397. out.shm_segsz = in->shm_segsz;
  398. out.shm_atime = in->shm_atime;
  399. out.shm_dtime = in->shm_dtime;
  400. out.shm_ctime = in->shm_ctime;
  401. out.shm_cpid = in->shm_cpid;
  402. out.shm_lpid = in->shm_lpid;
  403. out.shm_nattch = in->shm_nattch;
  404. return copy_to_user(buf, &out, sizeof(out));
  405. }
  406. default:
  407. return -EINVAL;
  408. }
  409. }
  410. struct shm_setbuf {
  411. uid_t uid;
  412. gid_t gid;
  413. mode_t mode;
  414. };
  415. static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
  416. {
  417. switch(version) {
  418. case IPC_64:
  419. {
  420. struct shmid64_ds tbuf;
  421. if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
  422. return -EFAULT;
  423. out->uid = tbuf.shm_perm.uid;
  424. out->gid = tbuf.shm_perm.gid;
  425. out->mode = tbuf.shm_perm.mode;
  426. return 0;
  427. }
  428. case IPC_OLD:
  429. {
  430. struct shmid_ds tbuf_old;
  431. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  432. return -EFAULT;
  433. out->uid = tbuf_old.shm_perm.uid;
  434. out->gid = tbuf_old.shm_perm.gid;
  435. out->mode = tbuf_old.shm_perm.mode;
  436. return 0;
  437. }
  438. default:
  439. return -EINVAL;
  440. }
  441. }
  442. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  443. {
  444. switch(version) {
  445. case IPC_64:
  446. return copy_to_user(buf, in, sizeof(*in));
  447. case IPC_OLD:
  448. {
  449. struct shminfo out;
  450. if(in->shmmax > INT_MAX)
  451. out.shmmax = INT_MAX;
  452. else
  453. out.shmmax = (int)in->shmmax;
  454. out.shmmin = in->shmmin;
  455. out.shmmni = in->shmmni;
  456. out.shmseg = in->shmseg;
  457. out.shmall = in->shmall;
  458. return copy_to_user(buf, &out, sizeof(out));
  459. }
  460. default:
  461. return -EINVAL;
  462. }
  463. }
  464. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  465. unsigned long *swp)
  466. {
  467. int i;
  468. *rss = 0;
  469. *swp = 0;
  470. for (i = 0; i <= shm_ids(ns).max_id; i++) {
  471. struct shmid_kernel *shp;
  472. struct inode *inode;
  473. shp = shm_get(ns, i);
  474. if(!shp)
  475. continue;
  476. inode = shp->shm_file->f_path.dentry->d_inode;
  477. if (is_file_hugepages(shp->shm_file)) {
  478. struct address_space *mapping = inode->i_mapping;
  479. *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
  480. } else {
  481. struct shmem_inode_info *info = SHMEM_I(inode);
  482. spin_lock(&info->lock);
  483. *rss += inode->i_mapping->nrpages;
  484. *swp += info->swapped;
  485. spin_unlock(&info->lock);
  486. }
  487. }
  488. }
  489. asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
  490. {
  491. struct shm_setbuf setbuf;
  492. struct shmid_kernel *shp;
  493. int err, version;
  494. struct ipc_namespace *ns;
  495. if (cmd < 0 || shmid < 0) {
  496. err = -EINVAL;
  497. goto out;
  498. }
  499. version = ipc_parse_version(&cmd);
  500. ns = current->nsproxy->ipc_ns;
  501. switch (cmd) { /* replace with proc interface ? */
  502. case IPC_INFO:
  503. {
  504. struct shminfo64 shminfo;
  505. err = security_shm_shmctl(NULL, cmd);
  506. if (err)
  507. return err;
  508. memset(&shminfo,0,sizeof(shminfo));
  509. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  510. shminfo.shmmax = ns->shm_ctlmax;
  511. shminfo.shmall = ns->shm_ctlall;
  512. shminfo.shmmin = SHMMIN;
  513. if(copy_shminfo_to_user (buf, &shminfo, version))
  514. return -EFAULT;
  515. /* reading a integer is always atomic */
  516. err= shm_ids(ns).max_id;
  517. if(err<0)
  518. err = 0;
  519. goto out;
  520. }
  521. case SHM_INFO:
  522. {
  523. struct shm_info shm_info;
  524. err = security_shm_shmctl(NULL, cmd);
  525. if (err)
  526. return err;
  527. memset(&shm_info,0,sizeof(shm_info));
  528. mutex_lock(&shm_ids(ns).mutex);
  529. shm_info.used_ids = shm_ids(ns).in_use;
  530. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  531. shm_info.shm_tot = ns->shm_tot;
  532. shm_info.swap_attempts = 0;
  533. shm_info.swap_successes = 0;
  534. err = shm_ids(ns).max_id;
  535. mutex_unlock(&shm_ids(ns).mutex);
  536. if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
  537. err = -EFAULT;
  538. goto out;
  539. }
  540. err = err < 0 ? 0 : err;
  541. goto out;
  542. }
  543. case SHM_STAT:
  544. case IPC_STAT:
  545. {
  546. struct shmid64_ds tbuf;
  547. int result;
  548. memset(&tbuf, 0, sizeof(tbuf));
  549. shp = shm_lock(ns, shmid);
  550. if(shp==NULL) {
  551. err = -EINVAL;
  552. goto out;
  553. } else if(cmd==SHM_STAT) {
  554. err = -EINVAL;
  555. if (shmid > shm_ids(ns).max_id)
  556. goto out_unlock;
  557. result = shm_buildid(ns, shmid, shp->shm_perm.seq);
  558. } else {
  559. err = shm_checkid(ns, shp,shmid);
  560. if(err)
  561. goto out_unlock;
  562. result = 0;
  563. }
  564. err=-EACCES;
  565. if (ipcperms (&shp->shm_perm, S_IRUGO))
  566. goto out_unlock;
  567. err = security_shm_shmctl(shp, cmd);
  568. if (err)
  569. goto out_unlock;
  570. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  571. tbuf.shm_segsz = shp->shm_segsz;
  572. tbuf.shm_atime = shp->shm_atim;
  573. tbuf.shm_dtime = shp->shm_dtim;
  574. tbuf.shm_ctime = shp->shm_ctim;
  575. tbuf.shm_cpid = shp->shm_cprid;
  576. tbuf.shm_lpid = shp->shm_lprid;
  577. tbuf.shm_nattch = shp->shm_nattch;
  578. shm_unlock(shp);
  579. if(copy_shmid_to_user (buf, &tbuf, version))
  580. err = -EFAULT;
  581. else
  582. err = result;
  583. goto out;
  584. }
  585. case SHM_LOCK:
  586. case SHM_UNLOCK:
  587. {
  588. shp = shm_lock(ns, shmid);
  589. if(shp==NULL) {
  590. err = -EINVAL;
  591. goto out;
  592. }
  593. err = shm_checkid(ns, shp,shmid);
  594. if(err)
  595. goto out_unlock;
  596. err = audit_ipc_obj(&(shp->shm_perm));
  597. if (err)
  598. goto out_unlock;
  599. if (!capable(CAP_IPC_LOCK)) {
  600. err = -EPERM;
  601. if (current->euid != shp->shm_perm.uid &&
  602. current->euid != shp->shm_perm.cuid)
  603. goto out_unlock;
  604. if (cmd == SHM_LOCK &&
  605. !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
  606. goto out_unlock;
  607. }
  608. err = security_shm_shmctl(shp, cmd);
  609. if (err)
  610. goto out_unlock;
  611. if(cmd==SHM_LOCK) {
  612. struct user_struct * user = current->user;
  613. if (!is_file_hugepages(shp->shm_file)) {
  614. err = shmem_lock(shp->shm_file, 1, user);
  615. if (!err) {
  616. shp->shm_perm.mode |= SHM_LOCKED;
  617. shp->mlock_user = user;
  618. }
  619. }
  620. } else if (!is_file_hugepages(shp->shm_file)) {
  621. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  622. shp->shm_perm.mode &= ~SHM_LOCKED;
  623. shp->mlock_user = NULL;
  624. }
  625. shm_unlock(shp);
  626. goto out;
  627. }
  628. case IPC_RMID:
  629. {
  630. /*
  631. * We cannot simply remove the file. The SVID states
  632. * that the block remains until the last person
  633. * detaches from it, then is deleted. A shmat() on
  634. * an RMID segment is legal in older Linux and if
  635. * we change it apps break...
  636. *
  637. * Instead we set a destroyed flag, and then blow
  638. * the name away when the usage hits zero.
  639. */
  640. mutex_lock(&shm_ids(ns).mutex);
  641. shp = shm_lock(ns, shmid);
  642. err = -EINVAL;
  643. if (shp == NULL)
  644. goto out_up;
  645. err = shm_checkid(ns, shp, shmid);
  646. if(err)
  647. goto out_unlock_up;
  648. err = audit_ipc_obj(&(shp->shm_perm));
  649. if (err)
  650. goto out_unlock_up;
  651. if (current->euid != shp->shm_perm.uid &&
  652. current->euid != shp->shm_perm.cuid &&
  653. !capable(CAP_SYS_ADMIN)) {
  654. err=-EPERM;
  655. goto out_unlock_up;
  656. }
  657. err = security_shm_shmctl(shp, cmd);
  658. if (err)
  659. goto out_unlock_up;
  660. do_shm_rmid(ns, shp);
  661. mutex_unlock(&shm_ids(ns).mutex);
  662. goto out;
  663. }
  664. case IPC_SET:
  665. {
  666. if (copy_shmid_from_user (&setbuf, buf, version)) {
  667. err = -EFAULT;
  668. goto out;
  669. }
  670. mutex_lock(&shm_ids(ns).mutex);
  671. shp = shm_lock(ns, shmid);
  672. err=-EINVAL;
  673. if(shp==NULL)
  674. goto out_up;
  675. err = shm_checkid(ns, shp,shmid);
  676. if(err)
  677. goto out_unlock_up;
  678. err = audit_ipc_obj(&(shp->shm_perm));
  679. if (err)
  680. goto out_unlock_up;
  681. err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
  682. if (err)
  683. goto out_unlock_up;
  684. err=-EPERM;
  685. if (current->euid != shp->shm_perm.uid &&
  686. current->euid != shp->shm_perm.cuid &&
  687. !capable(CAP_SYS_ADMIN)) {
  688. goto out_unlock_up;
  689. }
  690. err = security_shm_shmctl(shp, cmd);
  691. if (err)
  692. goto out_unlock_up;
  693. shp->shm_perm.uid = setbuf.uid;
  694. shp->shm_perm.gid = setbuf.gid;
  695. shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
  696. | (setbuf.mode & S_IRWXUGO);
  697. shp->shm_ctim = get_seconds();
  698. break;
  699. }
  700. default:
  701. err = -EINVAL;
  702. goto out;
  703. }
  704. err = 0;
  705. out_unlock_up:
  706. shm_unlock(shp);
  707. out_up:
  708. mutex_unlock(&shm_ids(ns).mutex);
  709. goto out;
  710. out_unlock:
  711. shm_unlock(shp);
  712. out:
  713. return err;
  714. }
  715. /*
  716. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  717. *
  718. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  719. * "raddr" thing points to kernel space, and there has to be a wrapper around
  720. * this.
  721. */
  722. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
  723. {
  724. struct shmid_kernel *shp;
  725. unsigned long addr;
  726. unsigned long size;
  727. struct file * file;
  728. int err;
  729. unsigned long flags;
  730. unsigned long prot;
  731. int acc_mode;
  732. unsigned long user_addr;
  733. struct ipc_namespace *ns;
  734. struct shm_file_data *sfd;
  735. struct path path;
  736. mode_t f_mode;
  737. err = -EINVAL;
  738. if (shmid < 0)
  739. goto out;
  740. else if ((addr = (ulong)shmaddr)) {
  741. if (addr & (SHMLBA-1)) {
  742. if (shmflg & SHM_RND)
  743. addr &= ~(SHMLBA-1); /* round down */
  744. else
  745. #ifndef __ARCH_FORCE_SHMLBA
  746. if (addr & ~PAGE_MASK)
  747. #endif
  748. goto out;
  749. }
  750. flags = MAP_SHARED | MAP_FIXED;
  751. } else {
  752. if ((shmflg & SHM_REMAP))
  753. goto out;
  754. flags = MAP_SHARED;
  755. }
  756. if (shmflg & SHM_RDONLY) {
  757. prot = PROT_READ;
  758. acc_mode = S_IRUGO;
  759. f_mode = FMODE_READ;
  760. } else {
  761. prot = PROT_READ | PROT_WRITE;
  762. acc_mode = S_IRUGO | S_IWUGO;
  763. f_mode = FMODE_READ | FMODE_WRITE;
  764. }
  765. if (shmflg & SHM_EXEC) {
  766. prot |= PROT_EXEC;
  767. acc_mode |= S_IXUGO;
  768. }
  769. /*
  770. * We cannot rely on the fs check since SYSV IPC does have an
  771. * additional creator id...
  772. */
  773. ns = current->nsproxy->ipc_ns;
  774. shp = shm_lock(ns, shmid);
  775. if(shp == NULL)
  776. goto out;
  777. err = shm_checkid(ns, shp,shmid);
  778. if (err)
  779. goto out_unlock;
  780. err = -EACCES;
  781. if (ipcperms(&shp->shm_perm, acc_mode))
  782. goto out_unlock;
  783. err = security_shm_shmat(shp, shmaddr, shmflg);
  784. if (err)
  785. goto out_unlock;
  786. path.dentry = dget(shp->shm_file->f_path.dentry);
  787. path.mnt = mntget(shp->shm_file->f_path.mnt);
  788. shp->shm_nattch++;
  789. size = i_size_read(path.dentry->d_inode);
  790. shm_unlock(shp);
  791. err = -ENOMEM;
  792. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  793. if (!sfd)
  794. goto out_put_path;
  795. err = -ENOMEM;
  796. file = get_empty_filp();
  797. if (!file)
  798. goto out_free;
  799. file->f_op = &shm_file_operations;
  800. file->private_data = sfd;
  801. file->f_path = path;
  802. file->f_mapping = shp->shm_file->f_mapping;
  803. file->f_mode = f_mode;
  804. sfd->id = shp->id;
  805. sfd->ns = get_ipc_ns(ns);
  806. sfd->file = shp->shm_file;
  807. sfd->vm_ops = NULL;
  808. down_write(&current->mm->mmap_sem);
  809. if (addr && !(shmflg & SHM_REMAP)) {
  810. err = -EINVAL;
  811. if (find_vma_intersection(current->mm, addr, addr + size))
  812. goto invalid;
  813. /*
  814. * If shm segment goes below stack, make sure there is some
  815. * space left for the stack to grow (at least 4 pages).
  816. */
  817. if (addr < current->mm->start_stack &&
  818. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  819. goto invalid;
  820. }
  821. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  822. *raddr = user_addr;
  823. err = 0;
  824. if (IS_ERR_VALUE(user_addr))
  825. err = (long)user_addr;
  826. invalid:
  827. up_write(&current->mm->mmap_sem);
  828. fput(file);
  829. out_nattch:
  830. mutex_lock(&shm_ids(ns).mutex);
  831. shp = shm_lock(ns, shmid);
  832. BUG_ON(!shp);
  833. shp->shm_nattch--;
  834. if(shp->shm_nattch == 0 &&
  835. shp->shm_perm.mode & SHM_DEST)
  836. shm_destroy(ns, shp);
  837. else
  838. shm_unlock(shp);
  839. mutex_unlock(&shm_ids(ns).mutex);
  840. out:
  841. return err;
  842. out_unlock:
  843. shm_unlock(shp);
  844. goto out;
  845. out_free:
  846. kfree(sfd);
  847. out_put_path:
  848. dput(path.dentry);
  849. mntput(path.mnt);
  850. goto out_nattch;
  851. }
  852. asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
  853. {
  854. unsigned long ret;
  855. long err;
  856. err = do_shmat(shmid, shmaddr, shmflg, &ret);
  857. if (err)
  858. return err;
  859. force_successful_syscall_return();
  860. return (long)ret;
  861. }
  862. /*
  863. * detach and kill segment if marked destroyed.
  864. * The work is done in shm_close.
  865. */
  866. asmlinkage long sys_shmdt(char __user *shmaddr)
  867. {
  868. struct mm_struct *mm = current->mm;
  869. struct vm_area_struct *vma, *next;
  870. unsigned long addr = (unsigned long)shmaddr;
  871. loff_t size = 0;
  872. int retval = -EINVAL;
  873. if (addr & ~PAGE_MASK)
  874. return retval;
  875. down_write(&mm->mmap_sem);
  876. /*
  877. * This function tries to be smart and unmap shm segments that
  878. * were modified by partial mlock or munmap calls:
  879. * - It first determines the size of the shm segment that should be
  880. * unmapped: It searches for a vma that is backed by shm and that
  881. * started at address shmaddr. It records it's size and then unmaps
  882. * it.
  883. * - Then it unmaps all shm vmas that started at shmaddr and that
  884. * are within the initially determined size.
  885. * Errors from do_munmap are ignored: the function only fails if
  886. * it's called with invalid parameters or if it's called to unmap
  887. * a part of a vma. Both calls in this function are for full vmas,
  888. * the parameters are directly copied from the vma itself and always
  889. * valid - therefore do_munmap cannot fail. (famous last words?)
  890. */
  891. /*
  892. * If it had been mremap()'d, the starting address would not
  893. * match the usual checks anyway. So assume all vma's are
  894. * above the starting address given.
  895. */
  896. vma = find_vma(mm, addr);
  897. while (vma) {
  898. next = vma->vm_next;
  899. /*
  900. * Check if the starting address would match, i.e. it's
  901. * a fragment created by mprotect() and/or munmap(), or it
  902. * otherwise it starts at this address with no hassles.
  903. */
  904. if ((vma->vm_ops == &shm_vm_ops) &&
  905. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  906. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  907. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  908. /*
  909. * We discovered the size of the shm segment, so
  910. * break out of here and fall through to the next
  911. * loop that uses the size information to stop
  912. * searching for matching vma's.
  913. */
  914. retval = 0;
  915. vma = next;
  916. break;
  917. }
  918. vma = next;
  919. }
  920. /*
  921. * We need look no further than the maximum address a fragment
  922. * could possibly have landed at. Also cast things to loff_t to
  923. * prevent overflows and make comparisions vs. equal-width types.
  924. */
  925. size = PAGE_ALIGN(size);
  926. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  927. next = vma->vm_next;
  928. /* finding a matching vma now does not alter retval */
  929. if ((vma->vm_ops == &shm_vm_ops) &&
  930. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  931. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  932. vma = next;
  933. }
  934. up_write(&mm->mmap_sem);
  935. return retval;
  936. }
  937. #ifdef CONFIG_PROC_FS
  938. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  939. {
  940. struct shmid_kernel *shp = it;
  941. char *format;
  942. #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  943. #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
  944. if (sizeof(size_t) <= sizeof(int))
  945. format = SMALL_STRING;
  946. else
  947. format = BIG_STRING;
  948. return seq_printf(s, format,
  949. shp->shm_perm.key,
  950. shp->id,
  951. shp->shm_perm.mode,
  952. shp->shm_segsz,
  953. shp->shm_cprid,
  954. shp->shm_lprid,
  955. shp->shm_nattch,
  956. shp->shm_perm.uid,
  957. shp->shm_perm.gid,
  958. shp->shm_perm.cuid,
  959. shp->shm_perm.cgid,
  960. shp->shm_atim,
  961. shp->shm_dtim,
  962. shp->shm_ctim);
  963. }
  964. #endif