pid.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic pidhash and scalable, time-bounded PID allocator
  4. *
  5. * (C) 2002-2003 Nadia Yvette Chambers, IBM
  6. * (C) 2004 Nadia Yvette Chambers, Oracle
  7. * (C) 2002-2004 Ingo Molnar, Red Hat
  8. *
  9. * pid-structures are backing objects for tasks sharing a given ID to chain
  10. * against. There is very little to them aside from hashing them and
  11. * parking tasks using given ID's on a list.
  12. *
  13. * The hash is always changed with the tasklist_lock write-acquired,
  14. * and the hash is only accessed with the tasklist_lock at least
  15. * read-acquired, so there's no additional SMP locking needed here.
  16. *
  17. * We have a list of bitmap pages, which bitmaps represent the PID space.
  18. * Allocating and freeing PIDs is completely lockless. The worst-case
  19. * allocation scenario when all but one out of 1 million PIDs possible are
  20. * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  21. * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  22. *
  23. * Pid namespaces:
  24. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  25. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  26. * Many thanks to Oleg Nesterov for comments and help
  27. *
  28. */
  29. #include <linux/mm.h>
  30. #include <linux/export.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/rculist.h>
  34. #include <linux/memblock.h>
  35. #include <linux/pid_namespace.h>
  36. #include <linux/init_task.h>
  37. #include <linux/syscalls.h>
  38. #include <linux/proc_ns.h>
  39. #include <linux/refcount.h>
  40. #include <linux/anon_inodes.h>
  41. #include <linux/sched/signal.h>
  42. #include <linux/sched/task.h>
  43. #include <linux/idr.h>
  44. #include <net/sock.h>
  45. #include <uapi/linux/pidfd.h>
  46. struct pid init_struct_pid = {
  47. .count = REFCOUNT_INIT(1),
  48. .tasks = {
  49. { .first = NULL },
  50. { .first = NULL },
  51. { .first = NULL },
  52. },
  53. .level = 0,
  54. .numbers = { {
  55. .nr = 0,
  56. .ns = &init_pid_ns,
  57. }, }
  58. };
  59. int pid_max = PID_MAX_DEFAULT;
  60. #define RESERVED_PIDS 300
  61. int pid_max_min = RESERVED_PIDS + 1;
  62. int pid_max_max = PID_MAX_LIMIT;
  63. /*
  64. * PID-map pages start out as NULL, they get allocated upon
  65. * first use and are never deallocated. This way a low pid_max
  66. * value does not cause lots of bitmaps to be allocated, but
  67. * the scheme scales to up to 4 million PIDs, runtime.
  68. */
  69. struct pid_namespace init_pid_ns = {
  70. .kref = KREF_INIT(2),
  71. .idr = IDR_INIT(init_pid_ns.idr),
  72. .pid_allocated = PIDNS_ADDING,
  73. .level = 0,
  74. .child_reaper = &init_task,
  75. .user_ns = &init_user_ns,
  76. .ns.inum = PROC_PID_INIT_INO,
  77. #ifdef CONFIG_PID_NS
  78. .ns.ops = &pidns_operations,
  79. #endif
  80. };
  81. EXPORT_SYMBOL_GPL(init_pid_ns);
  82. /*
  83. * Note: disable interrupts while the pidmap_lock is held as an
  84. * interrupt might come in and do read_lock(&tasklist_lock).
  85. *
  86. * If we don't disable interrupts there is a nasty deadlock between
  87. * detach_pid()->free_pid() and another cpu that does
  88. * spin_lock(&pidmap_lock) followed by an interrupt routine that does
  89. * read_lock(&tasklist_lock);
  90. *
  91. * After we clean up the tasklist_lock and know there are no
  92. * irq handlers that take it we can leave the interrupts enabled.
  93. * For now it is easier to be safe than to prove it can't happen.
  94. */
  95. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
  96. void put_pid(struct pid *pid)
  97. {
  98. struct pid_namespace *ns;
  99. if (!pid)
  100. return;
  101. ns = pid->numbers[pid->level].ns;
  102. if (refcount_dec_and_test(&pid->count)) {
  103. kmem_cache_free(ns->pid_cachep, pid);
  104. put_pid_ns(ns);
  105. }
  106. }
  107. EXPORT_SYMBOL_GPL(put_pid);
  108. static void delayed_put_pid(struct rcu_head *rhp)
  109. {
  110. struct pid *pid = container_of(rhp, struct pid, rcu);
  111. put_pid(pid);
  112. }
  113. void free_pid(struct pid *pid)
  114. {
  115. /* We can be called with write_lock_irq(&tasklist_lock) held */
  116. int i;
  117. unsigned long flags;
  118. spin_lock_irqsave(&pidmap_lock, flags);
  119. for (i = 0; i <= pid->level; i++) {
  120. struct upid *upid = pid->numbers + i;
  121. struct pid_namespace *ns = upid->ns;
  122. switch (--ns->pid_allocated) {
  123. case 2:
  124. case 1:
  125. /* When all that is left in the pid namespace
  126. * is the reaper wake up the reaper. The reaper
  127. * may be sleeping in zap_pid_ns_processes().
  128. */
  129. wake_up_process(ns->child_reaper);
  130. break;
  131. case PIDNS_ADDING:
  132. /* Handle a fork failure of the first process */
  133. WARN_ON(ns->child_reaper);
  134. ns->pid_allocated = 0;
  135. break;
  136. }
  137. idr_remove(&ns->idr, upid->nr);
  138. }
  139. spin_unlock_irqrestore(&pidmap_lock, flags);
  140. call_rcu(&pid->rcu, delayed_put_pid);
  141. }
  142. struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
  143. size_t set_tid_size)
  144. {
  145. struct pid *pid;
  146. enum pid_type type;
  147. int i, nr;
  148. struct pid_namespace *tmp;
  149. struct upid *upid;
  150. int retval = -ENOMEM;
  151. /*
  152. * set_tid_size contains the size of the set_tid array. Starting at
  153. * the most nested currently active PID namespace it tells alloc_pid()
  154. * which PID to set for a process in that most nested PID namespace
  155. * up to set_tid_size PID namespaces. It does not have to set the PID
  156. * for a process in all nested PID namespaces but set_tid_size must
  157. * never be greater than the current ns->level + 1.
  158. */
  159. if (set_tid_size > ns->level + 1)
  160. return ERR_PTR(-EINVAL);
  161. pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
  162. if (!pid)
  163. return ERR_PTR(retval);
  164. tmp = ns;
  165. pid->level = ns->level;
  166. for (i = ns->level; i >= 0; i--) {
  167. int tid = 0;
  168. if (set_tid_size) {
  169. tid = set_tid[ns->level - i];
  170. retval = -EINVAL;
  171. if (tid < 1 || tid >= pid_max)
  172. goto out_free;
  173. /*
  174. * Also fail if a PID != 1 is requested and
  175. * no PID 1 exists.
  176. */
  177. if (tid != 1 && !tmp->child_reaper)
  178. goto out_free;
  179. retval = -EPERM;
  180. if (!checkpoint_restore_ns_capable(tmp->user_ns))
  181. goto out_free;
  182. set_tid_size--;
  183. }
  184. idr_preload(GFP_KERNEL);
  185. spin_lock_irq(&pidmap_lock);
  186. if (tid) {
  187. nr = idr_alloc(&tmp->idr, NULL, tid,
  188. tid + 1, GFP_ATOMIC);
  189. /*
  190. * If ENOSPC is returned it means that the PID is
  191. * alreay in use. Return EEXIST in that case.
  192. */
  193. if (nr == -ENOSPC)
  194. nr = -EEXIST;
  195. } else {
  196. int pid_min = 1;
  197. /*
  198. * init really needs pid 1, but after reaching the
  199. * maximum wrap back to RESERVED_PIDS
  200. */
  201. if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
  202. pid_min = RESERVED_PIDS;
  203. /*
  204. * Store a null pointer so find_pid_ns does not find
  205. * a partially initialized PID (see below).
  206. */
  207. nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
  208. pid_max, GFP_ATOMIC);
  209. }
  210. spin_unlock_irq(&pidmap_lock);
  211. idr_preload_end();
  212. if (nr < 0) {
  213. retval = (nr == -ENOSPC) ? -EAGAIN : nr;
  214. goto out_free;
  215. }
  216. pid->numbers[i].nr = nr;
  217. pid->numbers[i].ns = tmp;
  218. tmp = tmp->parent;
  219. }
  220. /*
  221. * ENOMEM is not the most obvious choice especially for the case
  222. * where the child subreaper has already exited and the pid
  223. * namespace denies the creation of any new processes. But ENOMEM
  224. * is what we have exposed to userspace for a long time and it is
  225. * documented behavior for pid namespaces. So we can't easily
  226. * change it even if there were an error code better suited.
  227. */
  228. retval = -ENOMEM;
  229. get_pid_ns(ns);
  230. refcount_set(&pid->count, 1);
  231. spin_lock_init(&pid->lock);
  232. for (type = 0; type < PIDTYPE_MAX; ++type)
  233. INIT_HLIST_HEAD(&pid->tasks[type]);
  234. init_waitqueue_head(&pid->wait_pidfd);
  235. INIT_HLIST_HEAD(&pid->inodes);
  236. upid = pid->numbers + ns->level;
  237. spin_lock_irq(&pidmap_lock);
  238. if (!(ns->pid_allocated & PIDNS_ADDING))
  239. goto out_unlock;
  240. for ( ; upid >= pid->numbers; --upid) {
  241. /* Make the PID visible to find_pid_ns. */
  242. idr_replace(&upid->ns->idr, pid, upid->nr);
  243. upid->ns->pid_allocated++;
  244. }
  245. spin_unlock_irq(&pidmap_lock);
  246. return pid;
  247. out_unlock:
  248. spin_unlock_irq(&pidmap_lock);
  249. put_pid_ns(ns);
  250. out_free:
  251. spin_lock_irq(&pidmap_lock);
  252. while (++i <= ns->level) {
  253. upid = pid->numbers + i;
  254. idr_remove(&upid->ns->idr, upid->nr);
  255. }
  256. /* On failure to allocate the first pid, reset the state */
  257. if (ns->pid_allocated == PIDNS_ADDING)
  258. idr_set_cursor(&ns->idr, 0);
  259. spin_unlock_irq(&pidmap_lock);
  260. kmem_cache_free(ns->pid_cachep, pid);
  261. return ERR_PTR(retval);
  262. }
  263. void disable_pid_allocation(struct pid_namespace *ns)
  264. {
  265. spin_lock_irq(&pidmap_lock);
  266. ns->pid_allocated &= ~PIDNS_ADDING;
  267. spin_unlock_irq(&pidmap_lock);
  268. }
  269. struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
  270. {
  271. return idr_find(&ns->idr, nr);
  272. }
  273. EXPORT_SYMBOL_GPL(find_pid_ns);
  274. struct pid *find_vpid(int nr)
  275. {
  276. return find_pid_ns(nr, task_active_pid_ns(current));
  277. }
  278. EXPORT_SYMBOL_GPL(find_vpid);
  279. static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
  280. {
  281. return (type == PIDTYPE_PID) ?
  282. &task->thread_pid :
  283. &task->signal->pids[type];
  284. }
  285. /*
  286. * attach_pid() must be called with the tasklist_lock write-held.
  287. */
  288. void attach_pid(struct task_struct *task, enum pid_type type)
  289. {
  290. struct pid *pid = *task_pid_ptr(task, type);
  291. hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
  292. }
  293. static void __change_pid(struct task_struct *task, enum pid_type type,
  294. struct pid *new)
  295. {
  296. struct pid **pid_ptr = task_pid_ptr(task, type);
  297. struct pid *pid;
  298. int tmp;
  299. pid = *pid_ptr;
  300. hlist_del_rcu(&task->pid_links[type]);
  301. *pid_ptr = new;
  302. for (tmp = PIDTYPE_MAX; --tmp >= 0; )
  303. if (pid_has_task(pid, tmp))
  304. return;
  305. free_pid(pid);
  306. }
  307. void detach_pid(struct task_struct *task, enum pid_type type)
  308. {
  309. __change_pid(task, type, NULL);
  310. }
  311. void change_pid(struct task_struct *task, enum pid_type type,
  312. struct pid *pid)
  313. {
  314. __change_pid(task, type, pid);
  315. attach_pid(task, type);
  316. }
  317. void exchange_tids(struct task_struct *left, struct task_struct *right)
  318. {
  319. struct pid *pid1 = left->thread_pid;
  320. struct pid *pid2 = right->thread_pid;
  321. struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
  322. struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
  323. /* Swap the single entry tid lists */
  324. hlists_swap_heads_rcu(head1, head2);
  325. /* Swap the per task_struct pid */
  326. rcu_assign_pointer(left->thread_pid, pid2);
  327. rcu_assign_pointer(right->thread_pid, pid1);
  328. /* Swap the cached value */
  329. WRITE_ONCE(left->pid, pid_nr(pid2));
  330. WRITE_ONCE(right->pid, pid_nr(pid1));
  331. }
  332. /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
  333. void transfer_pid(struct task_struct *old, struct task_struct *new,
  334. enum pid_type type)
  335. {
  336. if (type == PIDTYPE_PID)
  337. new->thread_pid = old->thread_pid;
  338. hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
  339. }
  340. struct task_struct *pid_task(struct pid *pid, enum pid_type type)
  341. {
  342. struct task_struct *result = NULL;
  343. if (pid) {
  344. struct hlist_node *first;
  345. first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
  346. lockdep_tasklist_lock_is_held());
  347. if (first)
  348. result = hlist_entry(first, struct task_struct, pid_links[(type)]);
  349. }
  350. return result;
  351. }
  352. EXPORT_SYMBOL(pid_task);
  353. /*
  354. * Must be called under rcu_read_lock().
  355. */
  356. struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
  357. {
  358. RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
  359. "find_task_by_pid_ns() needs rcu_read_lock() protection");
  360. return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
  361. }
  362. struct task_struct *find_task_by_vpid(pid_t vnr)
  363. {
  364. return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
  365. }
  366. EXPORT_SYMBOL_GPL(find_task_by_vpid);
  367. struct task_struct *find_get_task_by_vpid(pid_t nr)
  368. {
  369. struct task_struct *task;
  370. rcu_read_lock();
  371. task = find_task_by_vpid(nr);
  372. if (task)
  373. get_task_struct(task);
  374. rcu_read_unlock();
  375. return task;
  376. }
  377. struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
  378. {
  379. struct pid *pid;
  380. rcu_read_lock();
  381. pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
  382. rcu_read_unlock();
  383. return pid;
  384. }
  385. EXPORT_SYMBOL_GPL(get_task_pid);
  386. struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
  387. {
  388. struct task_struct *result;
  389. rcu_read_lock();
  390. result = pid_task(pid, type);
  391. if (result)
  392. get_task_struct(result);
  393. rcu_read_unlock();
  394. return result;
  395. }
  396. EXPORT_SYMBOL_GPL(get_pid_task);
  397. struct pid *find_get_pid(pid_t nr)
  398. {
  399. struct pid *pid;
  400. rcu_read_lock();
  401. pid = get_pid(find_vpid(nr));
  402. rcu_read_unlock();
  403. return pid;
  404. }
  405. EXPORT_SYMBOL_GPL(find_get_pid);
  406. pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
  407. {
  408. struct upid *upid;
  409. pid_t nr = 0;
  410. if (pid && ns->level <= pid->level) {
  411. upid = &pid->numbers[ns->level];
  412. if (upid->ns == ns)
  413. nr = upid->nr;
  414. }
  415. return nr;
  416. }
  417. EXPORT_SYMBOL_GPL(pid_nr_ns);
  418. pid_t pid_vnr(struct pid *pid)
  419. {
  420. return pid_nr_ns(pid, task_active_pid_ns(current));
  421. }
  422. EXPORT_SYMBOL_GPL(pid_vnr);
  423. pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  424. struct pid_namespace *ns)
  425. {
  426. pid_t nr = 0;
  427. rcu_read_lock();
  428. if (!ns)
  429. ns = task_active_pid_ns(current);
  430. nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
  431. rcu_read_unlock();
  432. return nr;
  433. }
  434. EXPORT_SYMBOL(__task_pid_nr_ns);
  435. struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
  436. {
  437. return ns_of_pid(task_pid(tsk));
  438. }
  439. EXPORT_SYMBOL_GPL(task_active_pid_ns);
  440. /*
  441. * Used by proc to find the first pid that is greater than or equal to nr.
  442. *
  443. * If there is a pid at nr this function is exactly the same as find_pid_ns.
  444. */
  445. struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
  446. {
  447. return idr_get_next(&ns->idr, &nr);
  448. }
  449. struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
  450. {
  451. struct fd f;
  452. struct pid *pid;
  453. f = fdget(fd);
  454. if (!f.file)
  455. return ERR_PTR(-EBADF);
  456. pid = pidfd_pid(f.file);
  457. if (!IS_ERR(pid)) {
  458. get_pid(pid);
  459. *flags = f.file->f_flags;
  460. }
  461. fdput(f);
  462. return pid;
  463. }
  464. /**
  465. * pidfd_create() - Create a new pid file descriptor.
  466. *
  467. * @pid: struct pid that the pidfd will reference
  468. * @flags: flags to pass
  469. *
  470. * This creates a new pid file descriptor with the O_CLOEXEC flag set.
  471. *
  472. * Note, that this function can only be called after the fd table has
  473. * been unshared to avoid leaking the pidfd to the new process.
  474. *
  475. * Return: On success, a cloexec pidfd is returned.
  476. * On error, a negative errno number will be returned.
  477. */
  478. static int pidfd_create(struct pid *pid, unsigned int flags)
  479. {
  480. int fd;
  481. fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
  482. flags | O_RDWR | O_CLOEXEC);
  483. if (fd < 0)
  484. put_pid(pid);
  485. return fd;
  486. }
  487. /**
  488. * pidfd_open() - Open new pid file descriptor.
  489. *
  490. * @pid: pid for which to retrieve a pidfd
  491. * @flags: flags to pass
  492. *
  493. * This creates a new pid file descriptor with the O_CLOEXEC flag set for
  494. * the process identified by @pid. Currently, the process identified by
  495. * @pid must be a thread-group leader. This restriction currently exists
  496. * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
  497. * be used with CLONE_THREAD) and pidfd polling (only supports thread group
  498. * leaders).
  499. *
  500. * Return: On success, a cloexec pidfd is returned.
  501. * On error, a negative errno number will be returned.
  502. */
  503. SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
  504. {
  505. int fd;
  506. struct pid *p;
  507. if (flags & ~PIDFD_NONBLOCK)
  508. return -EINVAL;
  509. if (pid <= 0)
  510. return -EINVAL;
  511. p = find_get_pid(pid);
  512. if (!p)
  513. return -ESRCH;
  514. if (pid_has_task(p, PIDTYPE_TGID))
  515. fd = pidfd_create(p, flags);
  516. else
  517. fd = -EINVAL;
  518. put_pid(p);
  519. return fd;
  520. }
  521. void __init pid_idr_init(void)
  522. {
  523. /* Verify no one has done anything silly: */
  524. BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
  525. /* bump default and minimum pid_max based on number of cpus */
  526. pid_max = min(pid_max_max, max_t(int, pid_max,
  527. PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
  528. pid_max_min = max_t(int, pid_max_min,
  529. PIDS_PER_CPU_MIN * num_possible_cpus());
  530. pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
  531. idr_init(&init_pid_ns.idr);
  532. init_pid_ns.pid_cachep = KMEM_CACHE(pid,
  533. SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
  534. }
  535. static struct file *__pidfd_fget(struct task_struct *task, int fd)
  536. {
  537. struct file *file;
  538. int ret;
  539. ret = down_read_killable(&task->signal->exec_update_lock);
  540. if (ret)
  541. return ERR_PTR(ret);
  542. if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
  543. file = fget_task(task, fd);
  544. else
  545. file = ERR_PTR(-EPERM);
  546. up_read(&task->signal->exec_update_lock);
  547. return file ?: ERR_PTR(-EBADF);
  548. }
  549. static int pidfd_getfd(struct pid *pid, int fd)
  550. {
  551. struct task_struct *task;
  552. struct file *file;
  553. int ret;
  554. task = get_pid_task(pid, PIDTYPE_PID);
  555. if (!task)
  556. return -ESRCH;
  557. file = __pidfd_fget(task, fd);
  558. put_task_struct(task);
  559. if (IS_ERR(file))
  560. return PTR_ERR(file);
  561. ret = receive_fd(file, O_CLOEXEC);
  562. fput(file);
  563. return ret;
  564. }
  565. /**
  566. * sys_pidfd_getfd() - Get a file descriptor from another process
  567. *
  568. * @pidfd: the pidfd file descriptor of the process
  569. * @fd: the file descriptor number to get
  570. * @flags: flags on how to get the fd (reserved)
  571. *
  572. * This syscall gets a copy of a file descriptor from another process
  573. * based on the pidfd, and file descriptor number. It requires that
  574. * the calling process has the ability to ptrace the process represented
  575. * by the pidfd. The process which is having its file descriptor copied
  576. * is otherwise unaffected.
  577. *
  578. * Return: On success, a cloexec file descriptor is returned.
  579. * On error, a negative errno number will be returned.
  580. */
  581. SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
  582. unsigned int, flags)
  583. {
  584. struct pid *pid;
  585. struct fd f;
  586. int ret;
  587. /* flags is currently unused - make sure it's unset */
  588. if (flags)
  589. return -EINVAL;
  590. f = fdget(pidfd);
  591. if (!f.file)
  592. return -EBADF;
  593. pid = pidfd_pid(f.file);
  594. if (IS_ERR(pid))
  595. ret = PTR_ERR(pid);
  596. else
  597. ret = pidfd_getfd(pid, fd);
  598. fdput(f);
  599. return ret;
  600. }