file.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/file.c
  4. *
  5. * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  6. *
  7. * Manage the dynamic fd arrays in the process files_struct.
  8. */
  9. #include <linux/syscalls.h>
  10. #include <linux/export.h>
  11. #include <linux/fs.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mm.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/slab.h>
  16. #include <linux/file.h>
  17. #include <linux/fdtable.h>
  18. #include <linux/bitops.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/rcupdate.h>
  21. #include <linux/close_range.h>
  22. #include <net/sock.h>
  23. unsigned int sysctl_nr_open __read_mostly = 1024*1024;
  24. unsigned int sysctl_nr_open_min = BITS_PER_LONG;
  25. /* our min() is unusable in constant expressions ;-/ */
  26. #define __const_min(x, y) ((x) < (y) ? (x) : (y))
  27. unsigned int sysctl_nr_open_max =
  28. __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
  29. static void __free_fdtable(struct fdtable *fdt)
  30. {
  31. kvfree(fdt->fd);
  32. kvfree(fdt->open_fds);
  33. kfree(fdt);
  34. }
  35. static void free_fdtable_rcu(struct rcu_head *rcu)
  36. {
  37. __free_fdtable(container_of(rcu, struct fdtable, rcu));
  38. }
  39. #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
  40. #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
  41. /*
  42. * Copy 'count' fd bits from the old table to the new table and clear the extra
  43. * space if any. This does not copy the file pointers. Called with the files
  44. * spinlock held for write.
  45. */
  46. static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
  47. unsigned int count)
  48. {
  49. unsigned int cpy, set;
  50. cpy = count / BITS_PER_BYTE;
  51. set = (nfdt->max_fds - count) / BITS_PER_BYTE;
  52. memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
  53. memset((char *)nfdt->open_fds + cpy, 0, set);
  54. memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
  55. memset((char *)nfdt->close_on_exec + cpy, 0, set);
  56. cpy = BITBIT_SIZE(count);
  57. set = BITBIT_SIZE(nfdt->max_fds) - cpy;
  58. memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
  59. memset((char *)nfdt->full_fds_bits + cpy, 0, set);
  60. }
  61. /*
  62. * Copy all file descriptors from the old table to the new, expanded table and
  63. * clear the extra space. Called with the files spinlock held for write.
  64. */
  65. static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
  66. {
  67. size_t cpy, set;
  68. BUG_ON(nfdt->max_fds < ofdt->max_fds);
  69. cpy = ofdt->max_fds * sizeof(struct file *);
  70. set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
  71. memcpy(nfdt->fd, ofdt->fd, cpy);
  72. memset((char *)nfdt->fd + cpy, 0, set);
  73. copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
  74. }
  75. /*
  76. * Note how the fdtable bitmap allocations very much have to be a multiple of
  77. * BITS_PER_LONG. This is not only because we walk those things in chunks of
  78. * 'unsigned long' in some places, but simply because that is how the Linux
  79. * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
  80. * they are very much "bits in an array of unsigned long".
  81. *
  82. * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
  83. * by that "1024/sizeof(ptr)" before, we already know there are sufficient
  84. * clear low bits. Clang seems to realize that, gcc ends up being confused.
  85. *
  86. * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
  87. * let's consider it documentation (and maybe a test-case for gcc to improve
  88. * its code generation ;)
  89. */
  90. static struct fdtable * alloc_fdtable(unsigned int nr)
  91. {
  92. struct fdtable *fdt;
  93. void *data;
  94. /*
  95. * Figure out how many fds we actually want to support in this fdtable.
  96. * Allocation steps are keyed to the size of the fdarray, since it
  97. * grows far faster than any of the other dynamic data. We try to fit
  98. * the fdarray into comfortable page-tuned chunks: starting at 1024B
  99. * and growing in powers of two from there on.
  100. */
  101. nr /= (1024 / sizeof(struct file *));
  102. nr = roundup_pow_of_two(nr + 1);
  103. nr *= (1024 / sizeof(struct file *));
  104. nr = ALIGN(nr, BITS_PER_LONG);
  105. /*
  106. * Note that this can drive nr *below* what we had passed if sysctl_nr_open
  107. * had been set lower between the check in expand_files() and here. Deal
  108. * with that in caller, it's cheaper that way.
  109. *
  110. * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
  111. * bitmaps handling below becomes unpleasant, to put it mildly...
  112. */
  113. if (unlikely(nr > sysctl_nr_open))
  114. nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
  115. fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
  116. if (!fdt)
  117. goto out;
  118. fdt->max_fds = nr;
  119. data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
  120. if (!data)
  121. goto out_fdt;
  122. fdt->fd = data;
  123. data = kvmalloc(max_t(size_t,
  124. 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
  125. GFP_KERNEL_ACCOUNT);
  126. if (!data)
  127. goto out_arr;
  128. fdt->open_fds = data;
  129. data += nr / BITS_PER_BYTE;
  130. fdt->close_on_exec = data;
  131. data += nr / BITS_PER_BYTE;
  132. fdt->full_fds_bits = data;
  133. return fdt;
  134. out_arr:
  135. kvfree(fdt->fd);
  136. out_fdt:
  137. kfree(fdt);
  138. out:
  139. return NULL;
  140. }
  141. /*
  142. * Expand the file descriptor table.
  143. * This function will allocate a new fdtable and both fd array and fdset, of
  144. * the given size.
  145. * Return <0 error code on error; 1 on successful completion.
  146. * The files->file_lock should be held on entry, and will be held on exit.
  147. */
  148. static int expand_fdtable(struct files_struct *files, unsigned int nr)
  149. __releases(files->file_lock)
  150. __acquires(files->file_lock)
  151. {
  152. struct fdtable *new_fdt, *cur_fdt;
  153. spin_unlock(&files->file_lock);
  154. new_fdt = alloc_fdtable(nr);
  155. /* make sure all __fd_install() have seen resize_in_progress
  156. * or have finished their rcu_read_lock_sched() section.
  157. */
  158. if (atomic_read(&files->count) > 1)
  159. synchronize_rcu();
  160. spin_lock(&files->file_lock);
  161. if (!new_fdt)
  162. return -ENOMEM;
  163. /*
  164. * extremely unlikely race - sysctl_nr_open decreased between the check in
  165. * caller and alloc_fdtable(). Cheaper to catch it here...
  166. */
  167. if (unlikely(new_fdt->max_fds <= nr)) {
  168. __free_fdtable(new_fdt);
  169. return -EMFILE;
  170. }
  171. cur_fdt = files_fdtable(files);
  172. BUG_ON(nr < cur_fdt->max_fds);
  173. copy_fdtable(new_fdt, cur_fdt);
  174. rcu_assign_pointer(files->fdt, new_fdt);
  175. if (cur_fdt != &files->fdtab)
  176. call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
  177. /* coupled with smp_rmb() in __fd_install() */
  178. smp_wmb();
  179. return 1;
  180. }
  181. /*
  182. * Expand files.
  183. * This function will expand the file structures, if the requested size exceeds
  184. * the current capacity and there is room for expansion.
  185. * Return <0 error code on error; 0 when nothing done; 1 when files were
  186. * expanded and execution may have blocked.
  187. * The files->file_lock should be held on entry, and will be held on exit.
  188. */
  189. static int expand_files(struct files_struct *files, unsigned int nr)
  190. __releases(files->file_lock)
  191. __acquires(files->file_lock)
  192. {
  193. struct fdtable *fdt;
  194. int expanded = 0;
  195. repeat:
  196. fdt = files_fdtable(files);
  197. /* Do we need to expand? */
  198. if (nr < fdt->max_fds)
  199. return expanded;
  200. /* Can we expand? */
  201. if (nr >= sysctl_nr_open)
  202. return -EMFILE;
  203. if (unlikely(files->resize_in_progress)) {
  204. spin_unlock(&files->file_lock);
  205. expanded = 1;
  206. wait_event(files->resize_wait, !files->resize_in_progress);
  207. spin_lock(&files->file_lock);
  208. goto repeat;
  209. }
  210. /* All good, so we try */
  211. files->resize_in_progress = true;
  212. expanded = expand_fdtable(files, nr);
  213. files->resize_in_progress = false;
  214. wake_up_all(&files->resize_wait);
  215. return expanded;
  216. }
  217. static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
  218. {
  219. __set_bit(fd, fdt->close_on_exec);
  220. }
  221. static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
  222. {
  223. if (test_bit(fd, fdt->close_on_exec))
  224. __clear_bit(fd, fdt->close_on_exec);
  225. }
  226. static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
  227. {
  228. __set_bit(fd, fdt->open_fds);
  229. fd /= BITS_PER_LONG;
  230. if (!~fdt->open_fds[fd])
  231. __set_bit(fd, fdt->full_fds_bits);
  232. }
  233. static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
  234. {
  235. __clear_bit(fd, fdt->open_fds);
  236. __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
  237. }
  238. static unsigned int count_open_files(struct fdtable *fdt)
  239. {
  240. unsigned int size = fdt->max_fds;
  241. unsigned int i;
  242. /* Find the last open fd */
  243. for (i = size / BITS_PER_LONG; i > 0; ) {
  244. if (fdt->open_fds[--i])
  245. break;
  246. }
  247. i = (i + 1) * BITS_PER_LONG;
  248. return i;
  249. }
  250. /*
  251. * Note that a sane fdtable size always has to be a multiple of
  252. * BITS_PER_LONG, since we have bitmaps that are sized by this.
  253. *
  254. * 'max_fds' will normally already be properly aligned, but it
  255. * turns out that in the close_range() -> __close_range() ->
  256. * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
  257. * up having a 'max_fds' value that isn't already aligned.
  258. *
  259. * Rather than make close_range() have to worry about this,
  260. * just make that BITS_PER_LONG alignment be part of a sane
  261. * fdtable size. Becuase that's really what it is.
  262. */
  263. static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
  264. {
  265. unsigned int count;
  266. count = count_open_files(fdt);
  267. if (max_fds < NR_OPEN_DEFAULT)
  268. max_fds = NR_OPEN_DEFAULT;
  269. return ALIGN(min(count, max_fds), BITS_PER_LONG);
  270. }
  271. /*
  272. * Allocate a new files structure and copy contents from the
  273. * passed in files structure.
  274. * errorp will be valid only when the returned files_struct is NULL.
  275. */
  276. struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
  277. {
  278. struct files_struct *newf;
  279. struct file **old_fds, **new_fds;
  280. unsigned int open_files, i;
  281. struct fdtable *old_fdt, *new_fdt;
  282. *errorp = -ENOMEM;
  283. newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
  284. if (!newf)
  285. goto out;
  286. atomic_set(&newf->count, 1);
  287. spin_lock_init(&newf->file_lock);
  288. newf->resize_in_progress = false;
  289. init_waitqueue_head(&newf->resize_wait);
  290. newf->next_fd = 0;
  291. new_fdt = &newf->fdtab;
  292. new_fdt->max_fds = NR_OPEN_DEFAULT;
  293. new_fdt->close_on_exec = newf->close_on_exec_init;
  294. new_fdt->open_fds = newf->open_fds_init;
  295. new_fdt->full_fds_bits = newf->full_fds_bits_init;
  296. new_fdt->fd = &newf->fd_array[0];
  297. spin_lock(&oldf->file_lock);
  298. old_fdt = files_fdtable(oldf);
  299. open_files = sane_fdtable_size(old_fdt, max_fds);
  300. /*
  301. * Check whether we need to allocate a larger fd array and fd set.
  302. */
  303. while (unlikely(open_files > new_fdt->max_fds)) {
  304. spin_unlock(&oldf->file_lock);
  305. if (new_fdt != &newf->fdtab)
  306. __free_fdtable(new_fdt);
  307. new_fdt = alloc_fdtable(open_files - 1);
  308. if (!new_fdt) {
  309. *errorp = -ENOMEM;
  310. goto out_release;
  311. }
  312. /* beyond sysctl_nr_open; nothing to do */
  313. if (unlikely(new_fdt->max_fds < open_files)) {
  314. __free_fdtable(new_fdt);
  315. *errorp = -EMFILE;
  316. goto out_release;
  317. }
  318. /*
  319. * Reacquire the oldf lock and a pointer to its fd table
  320. * who knows it may have a new bigger fd table. We need
  321. * the latest pointer.
  322. */
  323. spin_lock(&oldf->file_lock);
  324. old_fdt = files_fdtable(oldf);
  325. open_files = sane_fdtable_size(old_fdt, max_fds);
  326. }
  327. copy_fd_bitmaps(new_fdt, old_fdt, open_files);
  328. old_fds = old_fdt->fd;
  329. new_fds = new_fdt->fd;
  330. for (i = open_files; i != 0; i--) {
  331. struct file *f = *old_fds++;
  332. if (f) {
  333. get_file(f);
  334. } else {
  335. /*
  336. * The fd may be claimed in the fd bitmap but not yet
  337. * instantiated in the files array if a sibling thread
  338. * is partway through open(). So make sure that this
  339. * fd is available to the new process.
  340. */
  341. __clear_open_fd(open_files - i, new_fdt);
  342. }
  343. rcu_assign_pointer(*new_fds++, f);
  344. }
  345. spin_unlock(&oldf->file_lock);
  346. /* clear the remainder */
  347. memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
  348. rcu_assign_pointer(newf->fdt, new_fdt);
  349. return newf;
  350. out_release:
  351. kmem_cache_free(files_cachep, newf);
  352. out:
  353. return NULL;
  354. }
  355. static struct fdtable *close_files(struct files_struct * files)
  356. {
  357. /*
  358. * It is safe to dereference the fd table without RCU or
  359. * ->file_lock because this is the last reference to the
  360. * files structure.
  361. */
  362. struct fdtable *fdt = rcu_dereference_raw(files->fdt);
  363. unsigned int i, j = 0;
  364. for (;;) {
  365. unsigned long set;
  366. i = j * BITS_PER_LONG;
  367. if (i >= fdt->max_fds)
  368. break;
  369. set = fdt->open_fds[j++];
  370. while (set) {
  371. if (set & 1) {
  372. struct file * file = xchg(&fdt->fd[i], NULL);
  373. if (file) {
  374. filp_close(file, files);
  375. cond_resched();
  376. }
  377. }
  378. i++;
  379. set >>= 1;
  380. }
  381. }
  382. return fdt;
  383. }
  384. struct files_struct *get_files_struct(struct task_struct *task)
  385. {
  386. struct files_struct *files;
  387. task_lock(task);
  388. files = task->files;
  389. if (files)
  390. atomic_inc(&files->count);
  391. task_unlock(task);
  392. return files;
  393. }
  394. void put_files_struct(struct files_struct *files)
  395. {
  396. if (atomic_dec_and_test(&files->count)) {
  397. struct fdtable *fdt = close_files(files);
  398. /* free the arrays if they are not embedded */
  399. if (fdt != &files->fdtab)
  400. __free_fdtable(fdt);
  401. kmem_cache_free(files_cachep, files);
  402. }
  403. }
  404. void reset_files_struct(struct files_struct *files)
  405. {
  406. struct task_struct *tsk = current;
  407. struct files_struct *old;
  408. old = tsk->files;
  409. task_lock(tsk);
  410. tsk->files = files;
  411. task_unlock(tsk);
  412. put_files_struct(old);
  413. }
  414. void exit_files(struct task_struct *tsk)
  415. {
  416. struct files_struct * files = tsk->files;
  417. if (files) {
  418. task_lock(tsk);
  419. tsk->files = NULL;
  420. task_unlock(tsk);
  421. put_files_struct(files);
  422. }
  423. }
  424. struct files_struct init_files = {
  425. .count = ATOMIC_INIT(1),
  426. .fdt = &init_files.fdtab,
  427. .fdtab = {
  428. .max_fds = NR_OPEN_DEFAULT,
  429. .fd = &init_files.fd_array[0],
  430. .close_on_exec = init_files.close_on_exec_init,
  431. .open_fds = init_files.open_fds_init,
  432. .full_fds_bits = init_files.full_fds_bits_init,
  433. },
  434. .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
  435. .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
  436. };
  437. static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
  438. {
  439. unsigned int maxfd = fdt->max_fds;
  440. unsigned int maxbit = maxfd / BITS_PER_LONG;
  441. unsigned int bitbit = start / BITS_PER_LONG;
  442. bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
  443. if (bitbit > maxfd)
  444. return maxfd;
  445. if (bitbit > start)
  446. start = bitbit;
  447. return find_next_zero_bit(fdt->open_fds, maxfd, start);
  448. }
  449. /*
  450. * allocate a file descriptor, mark it busy.
  451. */
  452. int __alloc_fd(struct files_struct *files,
  453. unsigned start, unsigned end, unsigned flags)
  454. {
  455. unsigned int fd;
  456. int error;
  457. struct fdtable *fdt;
  458. spin_lock(&files->file_lock);
  459. repeat:
  460. fdt = files_fdtable(files);
  461. fd = start;
  462. if (fd < files->next_fd)
  463. fd = files->next_fd;
  464. if (fd < fdt->max_fds)
  465. fd = find_next_fd(fdt, fd);
  466. /*
  467. * N.B. For clone tasks sharing a files structure, this test
  468. * will limit the total number of files that can be opened.
  469. */
  470. error = -EMFILE;
  471. if (fd >= end)
  472. goto out;
  473. error = expand_files(files, fd);
  474. if (error < 0)
  475. goto out;
  476. /*
  477. * If we needed to expand the fs array we
  478. * might have blocked - try again.
  479. */
  480. if (error)
  481. goto repeat;
  482. if (start <= files->next_fd)
  483. files->next_fd = fd + 1;
  484. __set_open_fd(fd, fdt);
  485. if (flags & O_CLOEXEC)
  486. __set_close_on_exec(fd, fdt);
  487. else
  488. __clear_close_on_exec(fd, fdt);
  489. error = fd;
  490. #if 1
  491. /* Sanity check */
  492. if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
  493. printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
  494. rcu_assign_pointer(fdt->fd[fd], NULL);
  495. }
  496. #endif
  497. out:
  498. spin_unlock(&files->file_lock);
  499. return error;
  500. }
  501. static int alloc_fd(unsigned start, unsigned flags)
  502. {
  503. return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
  504. }
  505. int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
  506. {
  507. return __alloc_fd(current->files, 0, nofile, flags);
  508. }
  509. int get_unused_fd_flags(unsigned flags)
  510. {
  511. return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
  512. }
  513. EXPORT_SYMBOL(get_unused_fd_flags);
  514. static void __put_unused_fd(struct files_struct *files, unsigned int fd)
  515. {
  516. struct fdtable *fdt = files_fdtable(files);
  517. __clear_open_fd(fd, fdt);
  518. if (fd < files->next_fd)
  519. files->next_fd = fd;
  520. }
  521. void put_unused_fd(unsigned int fd)
  522. {
  523. struct files_struct *files = current->files;
  524. spin_lock(&files->file_lock);
  525. __put_unused_fd(files, fd);
  526. spin_unlock(&files->file_lock);
  527. }
  528. EXPORT_SYMBOL(put_unused_fd);
  529. /*
  530. * Install a file pointer in the fd array.
  531. *
  532. * The VFS is full of places where we drop the files lock between
  533. * setting the open_fds bitmap and installing the file in the file
  534. * array. At any such point, we are vulnerable to a dup2() race
  535. * installing a file in the array before us. We need to detect this and
  536. * fput() the struct file we are about to overwrite in this case.
  537. *
  538. * It should never happen - if we allow dup2() do it, _really_ bad things
  539. * will follow.
  540. *
  541. * NOTE: __fd_install() variant is really, really low-level; don't
  542. * use it unless you are forced to by truly lousy API shoved down
  543. * your throat. 'files' *MUST* be either current->files or obtained
  544. * by get_files_struct(current) done by whoever had given it to you,
  545. * or really bad things will happen. Normally you want to use
  546. * fd_install() instead.
  547. */
  548. void __fd_install(struct files_struct *files, unsigned int fd,
  549. struct file *file)
  550. {
  551. struct fdtable *fdt;
  552. rcu_read_lock_sched();
  553. if (unlikely(files->resize_in_progress)) {
  554. rcu_read_unlock_sched();
  555. spin_lock(&files->file_lock);
  556. fdt = files_fdtable(files);
  557. BUG_ON(fdt->fd[fd] != NULL);
  558. rcu_assign_pointer(fdt->fd[fd], file);
  559. spin_unlock(&files->file_lock);
  560. return;
  561. }
  562. /* coupled with smp_wmb() in expand_fdtable() */
  563. smp_rmb();
  564. fdt = rcu_dereference_sched(files->fdt);
  565. BUG_ON(fdt->fd[fd] != NULL);
  566. rcu_assign_pointer(fdt->fd[fd], file);
  567. rcu_read_unlock_sched();
  568. }
  569. /*
  570. * This consumes the "file" refcount, so callers should treat it
  571. * as if they had called fput(file).
  572. */
  573. void fd_install(unsigned int fd, struct file *file)
  574. {
  575. __fd_install(current->files, fd, file);
  576. }
  577. EXPORT_SYMBOL(fd_install);
  578. static struct file *pick_file(struct files_struct *files, unsigned fd)
  579. {
  580. struct file *file = NULL;
  581. struct fdtable *fdt;
  582. spin_lock(&files->file_lock);
  583. fdt = files_fdtable(files);
  584. if (fd >= fdt->max_fds)
  585. goto out_unlock;
  586. file = fdt->fd[fd];
  587. if (!file)
  588. goto out_unlock;
  589. rcu_assign_pointer(fdt->fd[fd], NULL);
  590. __put_unused_fd(files, fd);
  591. out_unlock:
  592. spin_unlock(&files->file_lock);
  593. return file;
  594. }
  595. /*
  596. * The same warnings as for __alloc_fd()/__fd_install() apply here...
  597. */
  598. int __close_fd(struct files_struct *files, unsigned fd)
  599. {
  600. struct file *file;
  601. file = pick_file(files, fd);
  602. if (!file)
  603. return -EBADF;
  604. return filp_close(file, files);
  605. }
  606. EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
  607. /**
  608. * __close_range() - Close all file descriptors in a given range.
  609. *
  610. * @fd: starting file descriptor to close
  611. * @max_fd: last file descriptor to close
  612. *
  613. * This closes a range of file descriptors. All file descriptors
  614. * from @fd up to and including @max_fd are closed.
  615. */
  616. int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
  617. {
  618. unsigned int cur_max;
  619. struct task_struct *me = current;
  620. struct files_struct *cur_fds = me->files, *fds = NULL;
  621. if (flags & ~CLOSE_RANGE_UNSHARE)
  622. return -EINVAL;
  623. if (fd > max_fd)
  624. return -EINVAL;
  625. rcu_read_lock();
  626. cur_max = files_fdtable(cur_fds)->max_fds;
  627. rcu_read_unlock();
  628. /* cap to last valid index into fdtable */
  629. cur_max--;
  630. if (flags & CLOSE_RANGE_UNSHARE) {
  631. int ret;
  632. unsigned int max_unshare_fds = NR_OPEN_MAX;
  633. /*
  634. * If the requested range is greater than the current maximum,
  635. * we're closing everything so only copy all file descriptors
  636. * beneath the lowest file descriptor.
  637. */
  638. if (max_fd >= cur_max)
  639. max_unshare_fds = fd;
  640. ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
  641. if (ret)
  642. return ret;
  643. /*
  644. * We used to share our file descriptor table, and have now
  645. * created a private one, make sure we're using it below.
  646. */
  647. if (fds)
  648. swap(cur_fds, fds);
  649. }
  650. max_fd = min(max_fd, cur_max);
  651. while (fd <= max_fd) {
  652. struct file *file;
  653. file = pick_file(cur_fds, fd++);
  654. if (!file)
  655. continue;
  656. filp_close(file, cur_fds);
  657. cond_resched();
  658. }
  659. if (fds) {
  660. /*
  661. * We're done closing the files we were supposed to. Time to install
  662. * the new file descriptor table and drop the old one.
  663. */
  664. task_lock(me);
  665. me->files = cur_fds;
  666. task_unlock(me);
  667. put_files_struct(fds);
  668. }
  669. return 0;
  670. }
  671. /*
  672. * variant of __close_fd that gets a ref on the file for later fput.
  673. * The caller must ensure that filp_close() called on the file, and then
  674. * an fput().
  675. */
  676. int __close_fd_get_file(unsigned int fd, struct file **res)
  677. {
  678. struct files_struct *files = current->files;
  679. struct file *file;
  680. struct fdtable *fdt;
  681. spin_lock(&files->file_lock);
  682. fdt = files_fdtable(files);
  683. if (fd >= fdt->max_fds)
  684. goto out_unlock;
  685. file = fdt->fd[fd];
  686. if (!file)
  687. goto out_unlock;
  688. rcu_assign_pointer(fdt->fd[fd], NULL);
  689. __put_unused_fd(files, fd);
  690. spin_unlock(&files->file_lock);
  691. get_file(file);
  692. *res = file;
  693. return 0;
  694. out_unlock:
  695. spin_unlock(&files->file_lock);
  696. *res = NULL;
  697. return -ENOENT;
  698. }
  699. void do_close_on_exec(struct files_struct *files)
  700. {
  701. unsigned i;
  702. struct fdtable *fdt;
  703. /* exec unshares first */
  704. spin_lock(&files->file_lock);
  705. for (i = 0; ; i++) {
  706. unsigned long set;
  707. unsigned fd = i * BITS_PER_LONG;
  708. fdt = files_fdtable(files);
  709. if (fd >= fdt->max_fds)
  710. break;
  711. set = fdt->close_on_exec[i];
  712. if (!set)
  713. continue;
  714. fdt->close_on_exec[i] = 0;
  715. for ( ; set ; fd++, set >>= 1) {
  716. struct file *file;
  717. if (!(set & 1))
  718. continue;
  719. file = fdt->fd[fd];
  720. if (!file)
  721. continue;
  722. rcu_assign_pointer(fdt->fd[fd], NULL);
  723. __put_unused_fd(files, fd);
  724. spin_unlock(&files->file_lock);
  725. filp_close(file, files);
  726. cond_resched();
  727. spin_lock(&files->file_lock);
  728. }
  729. }
  730. spin_unlock(&files->file_lock);
  731. }
  732. static inline struct file *__fget_files_rcu(struct files_struct *files,
  733. unsigned int fd, fmode_t mask, unsigned int refs)
  734. {
  735. for (;;) {
  736. struct file *file;
  737. struct fdtable *fdt = rcu_dereference_raw(files->fdt);
  738. struct file __rcu **fdentry;
  739. if (unlikely(fd >= fdt->max_fds))
  740. return NULL;
  741. fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
  742. file = rcu_dereference_raw(*fdentry);
  743. if (unlikely(!file))
  744. return NULL;
  745. if (unlikely(file->f_mode & mask))
  746. return NULL;
  747. /*
  748. * Ok, we have a file pointer. However, because we do
  749. * this all locklessly under RCU, we may be racing with
  750. * that file being closed.
  751. *
  752. * Such a race can take two forms:
  753. *
  754. * (a) the file ref already went down to zero,
  755. * and get_file_rcu_many() fails. Just try
  756. * again:
  757. */
  758. if (unlikely(!get_file_rcu_many(file, refs)))
  759. continue;
  760. /*
  761. * (b) the file table entry has changed under us.
  762. * Note that we don't need to re-check the 'fdt->fd'
  763. * pointer having changed, because it always goes
  764. * hand-in-hand with 'fdt'.
  765. *
  766. * If so, we need to put our refs and try again.
  767. */
  768. if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
  769. unlikely(rcu_dereference_raw(*fdentry) != file)) {
  770. fput_many(file, refs);
  771. continue;
  772. }
  773. /*
  774. * Ok, we have a ref to the file, and checked that it
  775. * still exists.
  776. */
  777. return file;
  778. }
  779. }
  780. static struct file *__fget_files(struct files_struct *files, unsigned int fd,
  781. fmode_t mask, unsigned int refs)
  782. {
  783. struct file *file;
  784. rcu_read_lock();
  785. file = __fget_files_rcu(files, fd, mask, refs);
  786. rcu_read_unlock();
  787. return file;
  788. }
  789. static inline struct file *__fget(unsigned int fd, fmode_t mask,
  790. unsigned int refs)
  791. {
  792. return __fget_files(current->files, fd, mask, refs);
  793. }
  794. struct file *fget_many(unsigned int fd, unsigned int refs)
  795. {
  796. return __fget(fd, FMODE_PATH, refs);
  797. }
  798. struct file *fget(unsigned int fd)
  799. {
  800. return __fget(fd, FMODE_PATH, 1);
  801. }
  802. EXPORT_SYMBOL(fget);
  803. struct file *fget_raw(unsigned int fd)
  804. {
  805. return __fget(fd, 0, 1);
  806. }
  807. EXPORT_SYMBOL(fget_raw);
  808. struct file *fget_task(struct task_struct *task, unsigned int fd)
  809. {
  810. struct file *file = NULL;
  811. task_lock(task);
  812. if (task->files)
  813. file = __fget_files(task->files, fd, 0, 1);
  814. task_unlock(task);
  815. return file;
  816. }
  817. /*
  818. * Lightweight file lookup - no refcnt increment if fd table isn't shared.
  819. *
  820. * You can use this instead of fget if you satisfy all of the following
  821. * conditions:
  822. * 1) You must call fput_light before exiting the syscall and returning control
  823. * to userspace (i.e. you cannot remember the returned struct file * after
  824. * returning to userspace).
  825. * 2) You must not call filp_close on the returned struct file * in between
  826. * calls to fget_light and fput_light.
  827. * 3) You must not clone the current task in between the calls to fget_light
  828. * and fput_light.
  829. *
  830. * The fput_needed flag returned by fget_light should be passed to the
  831. * corresponding fput_light.
  832. */
  833. static unsigned long __fget_light(unsigned int fd, fmode_t mask)
  834. {
  835. struct files_struct *files = current->files;
  836. struct file *file;
  837. if (atomic_read(&files->count) == 1) {
  838. file = __fcheck_files(files, fd);
  839. if (!file || unlikely(file->f_mode & mask))
  840. return 0;
  841. return (unsigned long)file;
  842. } else {
  843. file = __fget(fd, mask, 1);
  844. if (!file)
  845. return 0;
  846. return FDPUT_FPUT | (unsigned long)file;
  847. }
  848. }
  849. unsigned long __fdget(unsigned int fd)
  850. {
  851. return __fget_light(fd, FMODE_PATH);
  852. }
  853. EXPORT_SYMBOL(__fdget);
  854. unsigned long __fdget_raw(unsigned int fd)
  855. {
  856. return __fget_light(fd, 0);
  857. }
  858. unsigned long __fdget_pos(unsigned int fd)
  859. {
  860. unsigned long v = __fdget(fd);
  861. struct file *file = (struct file *)(v & ~3);
  862. if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
  863. if (file_count(file) > 1) {
  864. v |= FDPUT_POS_UNLOCK;
  865. mutex_lock(&file->f_pos_lock);
  866. }
  867. }
  868. return v;
  869. }
  870. void __f_unlock_pos(struct file *f)
  871. {
  872. mutex_unlock(&f->f_pos_lock);
  873. }
  874. /*
  875. * We only lock f_pos if we have threads or if the file might be
  876. * shared with another process. In both cases we'll have an elevated
  877. * file count (done either by fdget() or by fork()).
  878. */
  879. void set_close_on_exec(unsigned int fd, int flag)
  880. {
  881. struct files_struct *files = current->files;
  882. struct fdtable *fdt;
  883. spin_lock(&files->file_lock);
  884. fdt = files_fdtable(files);
  885. if (flag)
  886. __set_close_on_exec(fd, fdt);
  887. else
  888. __clear_close_on_exec(fd, fdt);
  889. spin_unlock(&files->file_lock);
  890. }
  891. bool get_close_on_exec(unsigned int fd)
  892. {
  893. struct files_struct *files = current->files;
  894. struct fdtable *fdt;
  895. bool res;
  896. rcu_read_lock();
  897. fdt = files_fdtable(files);
  898. res = close_on_exec(fd, fdt);
  899. rcu_read_unlock();
  900. return res;
  901. }
  902. static int do_dup2(struct files_struct *files,
  903. struct file *file, unsigned fd, unsigned flags)
  904. __releases(&files->file_lock)
  905. {
  906. struct file *tofree;
  907. struct fdtable *fdt;
  908. /*
  909. * We need to detect attempts to do dup2() over allocated but still
  910. * not finished descriptor. NB: OpenBSD avoids that at the price of
  911. * extra work in their equivalent of fget() - they insert struct
  912. * file immediately after grabbing descriptor, mark it larval if
  913. * more work (e.g. actual opening) is needed and make sure that
  914. * fget() treats larval files as absent. Potentially interesting,
  915. * but while extra work in fget() is trivial, locking implications
  916. * and amount of surgery on open()-related paths in VFS are not.
  917. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
  918. * deadlocks in rather amusing ways, AFAICS. All of that is out of
  919. * scope of POSIX or SUS, since neither considers shared descriptor
  920. * tables and this condition does not arise without those.
  921. */
  922. fdt = files_fdtable(files);
  923. tofree = fdt->fd[fd];
  924. if (!tofree && fd_is_open(fd, fdt))
  925. goto Ebusy;
  926. get_file(file);
  927. rcu_assign_pointer(fdt->fd[fd], file);
  928. __set_open_fd(fd, fdt);
  929. if (flags & O_CLOEXEC)
  930. __set_close_on_exec(fd, fdt);
  931. else
  932. __clear_close_on_exec(fd, fdt);
  933. spin_unlock(&files->file_lock);
  934. if (tofree)
  935. filp_close(tofree, files);
  936. return fd;
  937. Ebusy:
  938. spin_unlock(&files->file_lock);
  939. return -EBUSY;
  940. }
  941. int replace_fd(unsigned fd, struct file *file, unsigned flags)
  942. {
  943. int err;
  944. struct files_struct *files = current->files;
  945. if (!file)
  946. return __close_fd(files, fd);
  947. if (fd >= rlimit(RLIMIT_NOFILE))
  948. return -EBADF;
  949. spin_lock(&files->file_lock);
  950. err = expand_files(files, fd);
  951. if (unlikely(err < 0))
  952. goto out_unlock;
  953. return do_dup2(files, file, fd, flags);
  954. out_unlock:
  955. spin_unlock(&files->file_lock);
  956. return err;
  957. }
  958. /**
  959. * __receive_fd() - Install received file into file descriptor table
  960. *
  961. * @fd: fd to install into (if negative, a new fd will be allocated)
  962. * @file: struct file that was received from another process
  963. * @ufd: __user pointer to write new fd number to
  964. * @o_flags: the O_* flags to apply to the new fd entry
  965. *
  966. * Installs a received file into the file descriptor table, with appropriate
  967. * checks and count updates. Optionally writes the fd number to userspace, if
  968. * @ufd is non-NULL.
  969. *
  970. * This helper handles its own reference counting of the incoming
  971. * struct file.
  972. *
  973. * Returns newly install fd or -ve on error.
  974. */
  975. int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags)
  976. {
  977. int new_fd;
  978. int error;
  979. error = security_file_receive(file);
  980. if (error)
  981. return error;
  982. if (fd < 0) {
  983. new_fd = get_unused_fd_flags(o_flags);
  984. if (new_fd < 0)
  985. return new_fd;
  986. } else {
  987. new_fd = fd;
  988. }
  989. if (ufd) {
  990. error = put_user(new_fd, ufd);
  991. if (error) {
  992. if (fd < 0)
  993. put_unused_fd(new_fd);
  994. return error;
  995. }
  996. }
  997. if (fd < 0) {
  998. fd_install(new_fd, get_file(file));
  999. } else {
  1000. error = replace_fd(new_fd, file, o_flags);
  1001. if (error)
  1002. return error;
  1003. }
  1004. /* Bump the sock usage counts, if any. */
  1005. __receive_sock(file);
  1006. return new_fd;
  1007. }
  1008. static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
  1009. {
  1010. int err = -EBADF;
  1011. struct file *file;
  1012. struct files_struct *files = current->files;
  1013. if ((flags & ~O_CLOEXEC) != 0)
  1014. return -EINVAL;
  1015. if (unlikely(oldfd == newfd))
  1016. return -EINVAL;
  1017. if (newfd >= rlimit(RLIMIT_NOFILE))
  1018. return -EBADF;
  1019. spin_lock(&files->file_lock);
  1020. err = expand_files(files, newfd);
  1021. file = fcheck(oldfd);
  1022. if (unlikely(!file))
  1023. goto Ebadf;
  1024. if (unlikely(err < 0)) {
  1025. if (err == -EMFILE)
  1026. goto Ebadf;
  1027. goto out_unlock;
  1028. }
  1029. return do_dup2(files, file, newfd, flags);
  1030. Ebadf:
  1031. err = -EBADF;
  1032. out_unlock:
  1033. spin_unlock(&files->file_lock);
  1034. return err;
  1035. }
  1036. SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
  1037. {
  1038. return ksys_dup3(oldfd, newfd, flags);
  1039. }
  1040. SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
  1041. {
  1042. if (unlikely(newfd == oldfd)) { /* corner case */
  1043. struct files_struct *files = current->files;
  1044. int retval = oldfd;
  1045. rcu_read_lock();
  1046. if (!fcheck_files(files, oldfd))
  1047. retval = -EBADF;
  1048. rcu_read_unlock();
  1049. return retval;
  1050. }
  1051. return ksys_dup3(oldfd, newfd, 0);
  1052. }
  1053. SYSCALL_DEFINE1(dup, unsigned int, fildes)
  1054. {
  1055. int ret = -EBADF;
  1056. struct file *file = fget_raw(fildes);
  1057. if (file) {
  1058. ret = get_unused_fd_flags(0);
  1059. if (ret >= 0)
  1060. fd_install(ret, file);
  1061. else
  1062. fput(file);
  1063. }
  1064. return ret;
  1065. }
  1066. int f_dupfd(unsigned int from, struct file *file, unsigned flags)
  1067. {
  1068. int err;
  1069. if (from >= rlimit(RLIMIT_NOFILE))
  1070. return -EINVAL;
  1071. err = alloc_fd(from, flags);
  1072. if (err >= 0) {
  1073. get_file(file);
  1074. fd_install(err, file);
  1075. }
  1076. return err;
  1077. }
  1078. int iterate_fd(struct files_struct *files, unsigned n,
  1079. int (*f)(const void *, struct file *, unsigned),
  1080. const void *p)
  1081. {
  1082. struct fdtable *fdt;
  1083. int res = 0;
  1084. if (!files)
  1085. return 0;
  1086. spin_lock(&files->file_lock);
  1087. for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
  1088. struct file *file;
  1089. file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
  1090. if (!file)
  1091. continue;
  1092. res = f(p, file, n);
  1093. if (res)
  1094. break;
  1095. }
  1096. spin_unlock(&files->file_lock);
  1097. return res;
  1098. }
  1099. EXPORT_SYMBOL(iterate_fd);