umh.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * umh - the kernel usermode helper
  4. */
  5. #include <linux/module.h>
  6. #include <linux/sched.h>
  7. #include <linux/sched/task.h>
  8. #include <linux/binfmts.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/unistd.h>
  11. #include <linux/kmod.h>
  12. #include <linux/slab.h>
  13. #include <linux/completion.h>
  14. #include <linux/cred.h>
  15. #include <linux/file.h>
  16. #include <linux/fdtable.h>
  17. #include <linux/fs_struct.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/security.h>
  20. #include <linux/mount.h>
  21. #include <linux/kernel.h>
  22. #include <linux/init.h>
  23. #include <linux/resource.h>
  24. #include <linux/notifier.h>
  25. #include <linux/suspend.h>
  26. #include <linux/rwsem.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/async.h>
  29. #include <linux/uaccess.h>
  30. #include <trace/events/module.h>
  31. #define CAP_BSET (void *)1
  32. #define CAP_PI (void *)2
  33. static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
  34. static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
  35. static DEFINE_SPINLOCK(umh_sysctl_lock);
  36. static DECLARE_RWSEM(umhelper_sem);
  37. static void call_usermodehelper_freeinfo(struct subprocess_info *info)
  38. {
  39. if (info->cleanup)
  40. (*info->cleanup)(info);
  41. kfree(info);
  42. }
  43. static void umh_complete(struct subprocess_info *sub_info)
  44. {
  45. struct completion *comp = xchg(&sub_info->complete, NULL);
  46. /*
  47. * See call_usermodehelper_exec(). If xchg() returns NULL
  48. * we own sub_info, the UMH_KILLABLE caller has gone away
  49. * or the caller used UMH_NO_WAIT.
  50. */
  51. if (comp)
  52. complete(comp);
  53. else
  54. call_usermodehelper_freeinfo(sub_info);
  55. }
  56. /*
  57. * This is the task which runs the usermode application
  58. */
  59. static int call_usermodehelper_exec_async(void *data)
  60. {
  61. struct subprocess_info *sub_info = data;
  62. struct cred *new;
  63. int retval;
  64. spin_lock_irq(&current->sighand->siglock);
  65. flush_signal_handlers(current, 1);
  66. spin_unlock_irq(&current->sighand->siglock);
  67. /*
  68. * Initial kernel threads share ther FS with init, in order to
  69. * get the init root directory. But we've now created a new
  70. * thread that is going to execve a user process and has its own
  71. * 'struct fs_struct'. Reset umask to the default.
  72. */
  73. current->fs->umask = 0022;
  74. /*
  75. * Our parent (unbound workqueue) runs with elevated scheduling
  76. * priority. Avoid propagating that into the userspace child.
  77. */
  78. set_user_nice(current, 0);
  79. retval = -ENOMEM;
  80. new = prepare_kernel_cred(current);
  81. if (!new)
  82. goto out;
  83. spin_lock(&umh_sysctl_lock);
  84. new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
  85. new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
  86. new->cap_inheritable);
  87. spin_unlock(&umh_sysctl_lock);
  88. if (sub_info->init) {
  89. retval = sub_info->init(sub_info, new);
  90. if (retval) {
  91. abort_creds(new);
  92. goto out;
  93. }
  94. }
  95. commit_creds(new);
  96. retval = kernel_execve(sub_info->path,
  97. (const char *const *)sub_info->argv,
  98. (const char *const *)sub_info->envp);
  99. out:
  100. sub_info->retval = retval;
  101. /*
  102. * call_usermodehelper_exec_sync() will call umh_complete
  103. * if UHM_WAIT_PROC.
  104. */
  105. if (!(sub_info->wait & UMH_WAIT_PROC))
  106. umh_complete(sub_info);
  107. if (!retval)
  108. return 0;
  109. do_exit(0);
  110. }
  111. /* Handles UMH_WAIT_PROC. */
  112. static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
  113. {
  114. pid_t pid;
  115. /* If SIGCLD is ignored do_wait won't populate the status. */
  116. kernel_sigaction(SIGCHLD, SIG_DFL);
  117. pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
  118. if (pid < 0)
  119. sub_info->retval = pid;
  120. else
  121. kernel_wait(pid, &sub_info->retval);
  122. /* Restore default kernel sig handler */
  123. kernel_sigaction(SIGCHLD, SIG_IGN);
  124. umh_complete(sub_info);
  125. }
  126. /*
  127. * We need to create the usermodehelper kernel thread from a task that is affine
  128. * to an optimized set of CPUs (or nohz housekeeping ones) such that they
  129. * inherit a widest affinity irrespective of call_usermodehelper() callers with
  130. * possibly reduced affinity (eg: per-cpu workqueues). We don't want
  131. * usermodehelper targets to contend a busy CPU.
  132. *
  133. * Unbound workqueues provide such wide affinity and allow to block on
  134. * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
  135. *
  136. * Besides, workqueues provide the privilege level that caller might not have
  137. * to perform the usermodehelper request.
  138. *
  139. */
  140. static void call_usermodehelper_exec_work(struct work_struct *work)
  141. {
  142. struct subprocess_info *sub_info =
  143. container_of(work, struct subprocess_info, work);
  144. if (sub_info->wait & UMH_WAIT_PROC) {
  145. call_usermodehelper_exec_sync(sub_info);
  146. } else {
  147. pid_t pid;
  148. /*
  149. * Use CLONE_PARENT to reparent it to kthreadd; we do not
  150. * want to pollute current->children, and we need a parent
  151. * that always ignores SIGCHLD to ensure auto-reaping.
  152. */
  153. pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
  154. CLONE_PARENT | SIGCHLD);
  155. if (pid < 0) {
  156. sub_info->retval = pid;
  157. umh_complete(sub_info);
  158. }
  159. }
  160. }
  161. /*
  162. * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
  163. * (used for preventing user land processes from being created after the user
  164. * land has been frozen during a system-wide hibernation or suspend operation).
  165. * Should always be manipulated under umhelper_sem acquired for write.
  166. */
  167. static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
  168. /* Number of helpers running */
  169. static atomic_t running_helpers = ATOMIC_INIT(0);
  170. /*
  171. * Wait queue head used by usermodehelper_disable() to wait for all running
  172. * helpers to finish.
  173. */
  174. static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
  175. /*
  176. * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
  177. * to become 'false'.
  178. */
  179. static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
  180. /*
  181. * Time to wait for running_helpers to become zero before the setting of
  182. * usermodehelper_disabled in usermodehelper_disable() fails
  183. */
  184. #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
  185. int usermodehelper_read_trylock(void)
  186. {
  187. DEFINE_WAIT(wait);
  188. int ret = 0;
  189. down_read(&umhelper_sem);
  190. for (;;) {
  191. prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
  192. TASK_INTERRUPTIBLE);
  193. if (!usermodehelper_disabled)
  194. break;
  195. if (usermodehelper_disabled == UMH_DISABLED)
  196. ret = -EAGAIN;
  197. up_read(&umhelper_sem);
  198. if (ret)
  199. break;
  200. schedule();
  201. try_to_freeze();
  202. down_read(&umhelper_sem);
  203. }
  204. finish_wait(&usermodehelper_disabled_waitq, &wait);
  205. return ret;
  206. }
  207. EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
  208. long usermodehelper_read_lock_wait(long timeout)
  209. {
  210. DEFINE_WAIT(wait);
  211. if (timeout < 0)
  212. return -EINVAL;
  213. down_read(&umhelper_sem);
  214. for (;;) {
  215. prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
  216. TASK_UNINTERRUPTIBLE);
  217. if (!usermodehelper_disabled)
  218. break;
  219. up_read(&umhelper_sem);
  220. timeout = schedule_timeout(timeout);
  221. if (!timeout)
  222. break;
  223. down_read(&umhelper_sem);
  224. }
  225. finish_wait(&usermodehelper_disabled_waitq, &wait);
  226. return timeout;
  227. }
  228. EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
  229. void usermodehelper_read_unlock(void)
  230. {
  231. up_read(&umhelper_sem);
  232. }
  233. EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
  234. /**
  235. * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
  236. * @depth: New value to assign to usermodehelper_disabled.
  237. *
  238. * Change the value of usermodehelper_disabled (under umhelper_sem locked for
  239. * writing) and wakeup tasks waiting for it to change.
  240. */
  241. void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
  242. {
  243. down_write(&umhelper_sem);
  244. usermodehelper_disabled = depth;
  245. wake_up(&usermodehelper_disabled_waitq);
  246. up_write(&umhelper_sem);
  247. }
  248. /**
  249. * __usermodehelper_disable - Prevent new helpers from being started.
  250. * @depth: New value to assign to usermodehelper_disabled.
  251. *
  252. * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
  253. */
  254. int __usermodehelper_disable(enum umh_disable_depth depth)
  255. {
  256. long retval;
  257. if (!depth)
  258. return -EINVAL;
  259. down_write(&umhelper_sem);
  260. usermodehelper_disabled = depth;
  261. up_write(&umhelper_sem);
  262. /*
  263. * From now on call_usermodehelper_exec() won't start any new
  264. * helpers, so it is sufficient if running_helpers turns out to
  265. * be zero at one point (it may be increased later, but that
  266. * doesn't matter).
  267. */
  268. retval = wait_event_timeout(running_helpers_waitq,
  269. atomic_read(&running_helpers) == 0,
  270. RUNNING_HELPERS_TIMEOUT);
  271. if (retval)
  272. return 0;
  273. __usermodehelper_set_disable_depth(UMH_ENABLED);
  274. return -EAGAIN;
  275. }
  276. static void helper_lock(void)
  277. {
  278. atomic_inc(&running_helpers);
  279. smp_mb__after_atomic();
  280. }
  281. static void helper_unlock(void)
  282. {
  283. if (atomic_dec_and_test(&running_helpers))
  284. wake_up(&running_helpers_waitq);
  285. }
  286. /**
  287. * call_usermodehelper_setup - prepare to call a usermode helper
  288. * @path: path to usermode executable
  289. * @argv: arg vector for process
  290. * @envp: environment for process
  291. * @gfp_mask: gfp mask for memory allocation
  292. * @cleanup: a cleanup function
  293. * @init: an init function
  294. * @data: arbitrary context sensitive data
  295. *
  296. * Returns either %NULL on allocation failure, or a subprocess_info
  297. * structure. This should be passed to call_usermodehelper_exec to
  298. * exec the process and free the structure.
  299. *
  300. * The init function is used to customize the helper process prior to
  301. * exec. A non-zero return code causes the process to error out, exit,
  302. * and return the failure to the calling process
  303. *
  304. * The cleanup function is just before ethe subprocess_info is about to
  305. * be freed. This can be used for freeing the argv and envp. The
  306. * Function must be runnable in either a process context or the
  307. * context in which call_usermodehelper_exec is called.
  308. */
  309. struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
  310. char **envp, gfp_t gfp_mask,
  311. int (*init)(struct subprocess_info *info, struct cred *new),
  312. void (*cleanup)(struct subprocess_info *info),
  313. void *data)
  314. {
  315. struct subprocess_info *sub_info;
  316. sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
  317. if (!sub_info)
  318. goto out;
  319. INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
  320. #ifdef CONFIG_STATIC_USERMODEHELPER
  321. sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
  322. #else
  323. sub_info->path = path;
  324. #endif
  325. sub_info->argv = argv;
  326. sub_info->envp = envp;
  327. sub_info->cleanup = cleanup;
  328. sub_info->init = init;
  329. sub_info->data = data;
  330. out:
  331. return sub_info;
  332. }
  333. EXPORT_SYMBOL(call_usermodehelper_setup);
  334. /**
  335. * call_usermodehelper_exec - start a usermode application
  336. * @sub_info: information about the subprocessa
  337. * @wait: wait for the application to finish and return status.
  338. * when UMH_NO_WAIT don't wait at all, but you get no useful error back
  339. * when the program couldn't be exec'ed. This makes it safe to call
  340. * from interrupt context.
  341. *
  342. * Runs a user-space application. The application is started
  343. * asynchronously if wait is not set, and runs as a child of system workqueues.
  344. * (ie. it runs with full root capabilities and optimized affinity).
  345. *
  346. * Note: successful return value does not guarantee the helper was called at
  347. * all. You can't rely on sub_info->{init,cleanup} being called even for
  348. * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
  349. * into a successful no-op.
  350. */
  351. int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
  352. {
  353. DECLARE_COMPLETION_ONSTACK(done);
  354. int retval = 0;
  355. if (!sub_info->path) {
  356. call_usermodehelper_freeinfo(sub_info);
  357. return -EINVAL;
  358. }
  359. helper_lock();
  360. if (usermodehelper_disabled) {
  361. retval = -EBUSY;
  362. goto out;
  363. }
  364. /*
  365. * If there is no binary for us to call, then just return and get out of
  366. * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
  367. * disable all call_usermodehelper() calls.
  368. */
  369. if (strlen(sub_info->path) == 0)
  370. goto out;
  371. /*
  372. * Set the completion pointer only if there is a waiter.
  373. * This makes it possible to use umh_complete to free
  374. * the data structure in case of UMH_NO_WAIT.
  375. */
  376. sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
  377. sub_info->wait = wait;
  378. queue_work(system_unbound_wq, &sub_info->work);
  379. if (wait == UMH_NO_WAIT) /* task has freed sub_info */
  380. goto unlock;
  381. if (wait & UMH_KILLABLE) {
  382. retval = wait_for_completion_killable(&done);
  383. if (!retval)
  384. goto wait_done;
  385. /* umh_complete() will see NULL and free sub_info */
  386. if (xchg(&sub_info->complete, NULL))
  387. goto unlock;
  388. /* fallthrough, umh_complete() was already called */
  389. }
  390. wait_for_completion(&done);
  391. wait_done:
  392. retval = sub_info->retval;
  393. out:
  394. call_usermodehelper_freeinfo(sub_info);
  395. unlock:
  396. helper_unlock();
  397. return retval;
  398. }
  399. EXPORT_SYMBOL(call_usermodehelper_exec);
  400. /**
  401. * call_usermodehelper() - prepare and start a usermode application
  402. * @path: path to usermode executable
  403. * @argv: arg vector for process
  404. * @envp: environment for process
  405. * @wait: wait for the application to finish and return status.
  406. * when UMH_NO_WAIT don't wait at all, but you get no useful error back
  407. * when the program couldn't be exec'ed. This makes it safe to call
  408. * from interrupt context.
  409. *
  410. * This function is the equivalent to use call_usermodehelper_setup() and
  411. * call_usermodehelper_exec().
  412. */
  413. int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
  414. {
  415. struct subprocess_info *info;
  416. gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
  417. info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
  418. NULL, NULL, NULL);
  419. if (info == NULL)
  420. return -ENOMEM;
  421. return call_usermodehelper_exec(info, wait);
  422. }
  423. EXPORT_SYMBOL(call_usermodehelper);
  424. static int proc_cap_handler(struct ctl_table *table, int write,
  425. void *buffer, size_t *lenp, loff_t *ppos)
  426. {
  427. struct ctl_table t;
  428. unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
  429. kernel_cap_t new_cap;
  430. int err, i;
  431. if (write && (!capable(CAP_SETPCAP) ||
  432. !capable(CAP_SYS_MODULE)))
  433. return -EPERM;
  434. /*
  435. * convert from the global kernel_cap_t to the ulong array to print to
  436. * userspace if this is a read.
  437. */
  438. spin_lock(&umh_sysctl_lock);
  439. for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
  440. if (table->data == CAP_BSET)
  441. cap_array[i] = usermodehelper_bset.cap[i];
  442. else if (table->data == CAP_PI)
  443. cap_array[i] = usermodehelper_inheritable.cap[i];
  444. else
  445. BUG();
  446. }
  447. spin_unlock(&umh_sysctl_lock);
  448. t = *table;
  449. t.data = &cap_array;
  450. /*
  451. * actually read or write and array of ulongs from userspace. Remember
  452. * these are least significant 32 bits first
  453. */
  454. err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
  455. if (err < 0)
  456. return err;
  457. /*
  458. * convert from the sysctl array of ulongs to the kernel_cap_t
  459. * internal representation
  460. */
  461. for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
  462. new_cap.cap[i] = cap_array[i];
  463. /*
  464. * Drop everything not in the new_cap (but don't add things)
  465. */
  466. if (write) {
  467. spin_lock(&umh_sysctl_lock);
  468. if (table->data == CAP_BSET)
  469. usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
  470. if (table->data == CAP_PI)
  471. usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
  472. spin_unlock(&umh_sysctl_lock);
  473. }
  474. return 0;
  475. }
  476. struct ctl_table usermodehelper_table[] = {
  477. {
  478. .procname = "bset",
  479. .data = CAP_BSET,
  480. .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
  481. .mode = 0600,
  482. .proc_handler = proc_cap_handler,
  483. },
  484. {
  485. .procname = "inheritable",
  486. .data = CAP_PI,
  487. .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
  488. .mode = 0600,
  489. .proc_handler = proc_cap_handler,
  490. },
  491. { }
  492. };