coredump.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/slab.h>
  3. #include <linux/file.h>
  4. #include <linux/fdtable.h>
  5. #include <linux/freezer.h>
  6. #include <linux/mm.h>
  7. #include <linux/stat.h>
  8. #include <linux/fcntl.h>
  9. #include <linux/swap.h>
  10. #include <linux/ctype.h>
  11. #include <linux/string.h>
  12. #include <linux/init.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/highmem.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/key.h>
  18. #include <linux/personality.h>
  19. #include <linux/binfmts.h>
  20. #include <linux/coredump.h>
  21. #include <linux/sched/coredump.h>
  22. #include <linux/sched/signal.h>
  23. #include <linux/sched/task_stack.h>
  24. #include <linux/utsname.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/module.h>
  27. #include <linux/namei.h>
  28. #include <linux/mount.h>
  29. #include <linux/security.h>
  30. #include <linux/syscalls.h>
  31. #include <linux/tsacct_kern.h>
  32. #include <linux/cn_proc.h>
  33. #include <linux/audit.h>
  34. #include <linux/tracehook.h>
  35. #include <linux/kmod.h>
  36. #include <linux/fsnotify.h>
  37. #include <linux/fs_struct.h>
  38. #include <linux/pipe_fs_i.h>
  39. #include <linux/oom.h>
  40. #include <linux/compat.h>
  41. #include <linux/fs.h>
  42. #include <linux/path.h>
  43. #include <linux/timekeeping.h>
  44. #include <linux/elf.h>
  45. #include <linux/uaccess.h>
  46. #include <asm/mmu_context.h>
  47. #include <asm/tlb.h>
  48. #include <asm/exec.h>
  49. #include <trace/events/task.h>
  50. #include "internal.h"
  51. #include <trace/events/sched.h>
  52. int core_uses_pid;
  53. unsigned int core_pipe_limit;
  54. char core_pattern[CORENAME_MAX_SIZE] = "core";
  55. static int core_name_size = CORENAME_MAX_SIZE;
  56. struct core_name {
  57. char *corename;
  58. int used, size;
  59. };
  60. /* The maximal length of core_pattern is also specified in sysctl.c */
  61. static int expand_corename(struct core_name *cn, int size)
  62. {
  63. char *corename = krealloc(cn->corename, size, GFP_KERNEL);
  64. if (!corename)
  65. return -ENOMEM;
  66. if (size > core_name_size) /* racy but harmless */
  67. core_name_size = size;
  68. cn->size = ksize(corename);
  69. cn->corename = corename;
  70. return 0;
  71. }
  72. static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
  73. va_list arg)
  74. {
  75. int free, need;
  76. va_list arg_copy;
  77. again:
  78. free = cn->size - cn->used;
  79. va_copy(arg_copy, arg);
  80. need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
  81. va_end(arg_copy);
  82. if (need < free) {
  83. cn->used += need;
  84. return 0;
  85. }
  86. if (!expand_corename(cn, cn->size + need - free + 1))
  87. goto again;
  88. return -ENOMEM;
  89. }
  90. static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
  91. {
  92. va_list arg;
  93. int ret;
  94. va_start(arg, fmt);
  95. ret = cn_vprintf(cn, fmt, arg);
  96. va_end(arg);
  97. return ret;
  98. }
  99. static __printf(2, 3)
  100. int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
  101. {
  102. int cur = cn->used;
  103. va_list arg;
  104. int ret;
  105. va_start(arg, fmt);
  106. ret = cn_vprintf(cn, fmt, arg);
  107. va_end(arg);
  108. if (ret == 0) {
  109. /*
  110. * Ensure that this coredump name component can't cause the
  111. * resulting corefile path to consist of a ".." or ".".
  112. */
  113. if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
  114. (cn->used - cur == 2 && cn->corename[cur] == '.'
  115. && cn->corename[cur+1] == '.'))
  116. cn->corename[cur] = '!';
  117. /*
  118. * Empty names are fishy and could be used to create a "//" in a
  119. * corefile name, causing the coredump to happen one directory
  120. * level too high. Enforce that all components of the core
  121. * pattern are at least one character long.
  122. */
  123. if (cn->used == cur)
  124. ret = cn_printf(cn, "!");
  125. }
  126. for (; cur < cn->used; ++cur) {
  127. if (cn->corename[cur] == '/')
  128. cn->corename[cur] = '!';
  129. }
  130. return ret;
  131. }
  132. static int cn_print_exe_file(struct core_name *cn, bool name_only)
  133. {
  134. struct file *exe_file;
  135. char *pathbuf, *path, *ptr;
  136. int ret;
  137. exe_file = get_mm_exe_file(current->mm);
  138. if (!exe_file)
  139. return cn_esc_printf(cn, "%s (path unknown)", current->comm);
  140. pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
  141. if (!pathbuf) {
  142. ret = -ENOMEM;
  143. goto put_exe_file;
  144. }
  145. path = file_path(exe_file, pathbuf, PATH_MAX);
  146. if (IS_ERR(path)) {
  147. ret = PTR_ERR(path);
  148. goto free_buf;
  149. }
  150. if (name_only) {
  151. ptr = strrchr(path, '/');
  152. if (ptr)
  153. path = ptr + 1;
  154. }
  155. ret = cn_esc_printf(cn, "%s", path);
  156. free_buf:
  157. kfree(pathbuf);
  158. put_exe_file:
  159. fput(exe_file);
  160. return ret;
  161. }
  162. /* format_corename will inspect the pattern parameter, and output a
  163. * name into corename, which must have space for at least
  164. * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  165. */
  166. static int format_corename(struct core_name *cn, struct coredump_params *cprm,
  167. size_t **argv, int *argc)
  168. {
  169. const struct cred *cred = current_cred();
  170. const char *pat_ptr = core_pattern;
  171. int ispipe = (*pat_ptr == '|');
  172. bool was_space = false;
  173. int pid_in_pattern = 0;
  174. int err = 0;
  175. cn->used = 0;
  176. cn->corename = NULL;
  177. if (expand_corename(cn, core_name_size))
  178. return -ENOMEM;
  179. cn->corename[0] = '\0';
  180. if (ispipe) {
  181. int argvs = sizeof(core_pattern) / 2;
  182. (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
  183. if (!(*argv))
  184. return -ENOMEM;
  185. (*argv)[(*argc)++] = 0;
  186. ++pat_ptr;
  187. if (!(*pat_ptr))
  188. return -ENOMEM;
  189. }
  190. /* Repeat as long as we have more pattern to process and more output
  191. space */
  192. while (*pat_ptr) {
  193. /*
  194. * Split on spaces before doing template expansion so that
  195. * %e and %E don't get split if they have spaces in them
  196. */
  197. if (ispipe) {
  198. if (isspace(*pat_ptr)) {
  199. if (cn->used != 0)
  200. was_space = true;
  201. pat_ptr++;
  202. continue;
  203. } else if (was_space) {
  204. was_space = false;
  205. err = cn_printf(cn, "%c", '\0');
  206. if (err)
  207. return err;
  208. (*argv)[(*argc)++] = cn->used;
  209. }
  210. }
  211. if (*pat_ptr != '%') {
  212. err = cn_printf(cn, "%c", *pat_ptr++);
  213. } else {
  214. switch (*++pat_ptr) {
  215. /* single % at the end, drop that */
  216. case 0:
  217. goto out;
  218. /* Double percent, output one percent */
  219. case '%':
  220. err = cn_printf(cn, "%c", '%');
  221. break;
  222. /* pid */
  223. case 'p':
  224. pid_in_pattern = 1;
  225. err = cn_printf(cn, "%d",
  226. task_tgid_vnr(current));
  227. break;
  228. /* global pid */
  229. case 'P':
  230. err = cn_printf(cn, "%d",
  231. task_tgid_nr(current));
  232. break;
  233. case 'i':
  234. err = cn_printf(cn, "%d",
  235. task_pid_vnr(current));
  236. break;
  237. case 'I':
  238. err = cn_printf(cn, "%d",
  239. task_pid_nr(current));
  240. break;
  241. /* uid */
  242. case 'u':
  243. err = cn_printf(cn, "%u",
  244. from_kuid(&init_user_ns,
  245. cred->uid));
  246. break;
  247. /* gid */
  248. case 'g':
  249. err = cn_printf(cn, "%u",
  250. from_kgid(&init_user_ns,
  251. cred->gid));
  252. break;
  253. case 'd':
  254. err = cn_printf(cn, "%d",
  255. __get_dumpable(cprm->mm_flags));
  256. break;
  257. /* signal that caused the coredump */
  258. case 's':
  259. err = cn_printf(cn, "%d",
  260. cprm->siginfo->si_signo);
  261. break;
  262. /* UNIX time of coredump */
  263. case 't': {
  264. time64_t time;
  265. time = ktime_get_real_seconds();
  266. err = cn_printf(cn, "%lld", time);
  267. break;
  268. }
  269. /* hostname */
  270. case 'h':
  271. down_read(&uts_sem);
  272. err = cn_esc_printf(cn, "%s",
  273. utsname()->nodename);
  274. up_read(&uts_sem);
  275. break;
  276. /* executable, could be changed by prctl PR_SET_NAME etc */
  277. case 'e':
  278. err = cn_esc_printf(cn, "%s", current->comm);
  279. break;
  280. /* file name of executable */
  281. case 'f':
  282. err = cn_print_exe_file(cn, true);
  283. break;
  284. case 'E':
  285. err = cn_print_exe_file(cn, false);
  286. break;
  287. /* core limit size */
  288. case 'c':
  289. err = cn_printf(cn, "%lu",
  290. rlimit(RLIMIT_CORE));
  291. break;
  292. default:
  293. break;
  294. }
  295. ++pat_ptr;
  296. }
  297. if (err)
  298. return err;
  299. }
  300. out:
  301. /* Backward compatibility with core_uses_pid:
  302. *
  303. * If core_pattern does not include a %p (as is the default)
  304. * and core_uses_pid is set, then .%pid will be appended to
  305. * the filename. Do not do this for piped commands. */
  306. if (!ispipe && !pid_in_pattern && core_uses_pid) {
  307. err = cn_printf(cn, ".%d", task_tgid_vnr(current));
  308. if (err)
  309. return err;
  310. }
  311. return ispipe;
  312. }
  313. static int zap_process(struct task_struct *start, int exit_code, int flags)
  314. {
  315. struct task_struct *t;
  316. int nr = 0;
  317. /* ignore all signals except SIGKILL, see prepare_signal() */
  318. start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
  319. start->signal->group_exit_code = exit_code;
  320. start->signal->group_stop_count = 0;
  321. for_each_thread(start, t) {
  322. task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
  323. if (t != current && t->mm) {
  324. sigaddset(&t->pending.signal, SIGKILL);
  325. signal_wake_up(t, 1);
  326. nr++;
  327. }
  328. }
  329. return nr;
  330. }
  331. static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
  332. struct core_state *core_state, int exit_code)
  333. {
  334. struct task_struct *g, *p;
  335. unsigned long flags;
  336. int nr = -EAGAIN;
  337. spin_lock_irq(&tsk->sighand->siglock);
  338. if (!signal_group_exit(tsk->signal)) {
  339. mm->core_state = core_state;
  340. tsk->signal->group_exit_task = tsk;
  341. nr = zap_process(tsk, exit_code, 0);
  342. clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
  343. }
  344. spin_unlock_irq(&tsk->sighand->siglock);
  345. if (unlikely(nr < 0))
  346. return nr;
  347. tsk->flags |= PF_DUMPCORE;
  348. if (atomic_read(&mm->mm_users) == nr + 1)
  349. goto done;
  350. /*
  351. * We should find and kill all tasks which use this mm, and we should
  352. * count them correctly into ->nr_threads. We don't take tasklist
  353. * lock, but this is safe wrt:
  354. *
  355. * fork:
  356. * None of sub-threads can fork after zap_process(leader). All
  357. * processes which were created before this point should be
  358. * visible to zap_threads() because copy_process() adds the new
  359. * process to the tail of init_task.tasks list, and lock/unlock
  360. * of ->siglock provides a memory barrier.
  361. *
  362. * do_exit:
  363. * The caller holds mm->mmap_lock. This means that the task which
  364. * uses this mm can't pass exit_mm(), so it can't exit or clear
  365. * its ->mm.
  366. *
  367. * de_thread:
  368. * It does list_replace_rcu(&leader->tasks, &current->tasks),
  369. * we must see either old or new leader, this does not matter.
  370. * However, it can change p->sighand, so lock_task_sighand(p)
  371. * must be used. Since p->mm != NULL and we hold ->mmap_lock
  372. * it can't fail.
  373. *
  374. * Note also that "g" can be the old leader with ->mm == NULL
  375. * and already unhashed and thus removed from ->thread_group.
  376. * This is OK, __unhash_process()->list_del_rcu() does not
  377. * clear the ->next pointer, we will find the new leader via
  378. * next_thread().
  379. */
  380. rcu_read_lock();
  381. for_each_process(g) {
  382. if (g == tsk->group_leader)
  383. continue;
  384. if (g->flags & PF_KTHREAD)
  385. continue;
  386. for_each_thread(g, p) {
  387. if (unlikely(!p->mm))
  388. continue;
  389. if (unlikely(p->mm == mm)) {
  390. lock_task_sighand(p, &flags);
  391. nr += zap_process(p, exit_code,
  392. SIGNAL_GROUP_EXIT);
  393. unlock_task_sighand(p, &flags);
  394. }
  395. break;
  396. }
  397. }
  398. rcu_read_unlock();
  399. done:
  400. atomic_set(&core_state->nr_threads, nr);
  401. return nr;
  402. }
  403. static int coredump_wait(int exit_code, struct core_state *core_state)
  404. {
  405. struct task_struct *tsk = current;
  406. struct mm_struct *mm = tsk->mm;
  407. int core_waiters = -EBUSY;
  408. init_completion(&core_state->startup);
  409. core_state->dumper.task = tsk;
  410. core_state->dumper.next = NULL;
  411. if (mmap_write_lock_killable(mm))
  412. return -EINTR;
  413. if (!mm->core_state)
  414. core_waiters = zap_threads(tsk, mm, core_state, exit_code);
  415. mmap_write_unlock(mm);
  416. if (core_waiters > 0) {
  417. struct core_thread *ptr;
  418. freezer_do_not_count();
  419. wait_for_completion(&core_state->startup);
  420. freezer_count();
  421. /*
  422. * Wait for all the threads to become inactive, so that
  423. * all the thread context (extended register state, like
  424. * fpu etc) gets copied to the memory.
  425. */
  426. ptr = core_state->dumper.next;
  427. while (ptr != NULL) {
  428. wait_task_inactive(ptr->task, 0);
  429. ptr = ptr->next;
  430. }
  431. }
  432. return core_waiters;
  433. }
  434. static void coredump_finish(struct mm_struct *mm, bool core_dumped)
  435. {
  436. struct core_thread *curr, *next;
  437. struct task_struct *task;
  438. spin_lock_irq(&current->sighand->siglock);
  439. if (core_dumped && !__fatal_signal_pending(current))
  440. current->signal->group_exit_code |= 0x80;
  441. current->signal->group_exit_task = NULL;
  442. current->signal->flags = SIGNAL_GROUP_EXIT;
  443. spin_unlock_irq(&current->sighand->siglock);
  444. next = mm->core_state->dumper.next;
  445. while ((curr = next) != NULL) {
  446. next = curr->next;
  447. task = curr->task;
  448. /*
  449. * see exit_mm(), curr->task must not see
  450. * ->task == NULL before we read ->next.
  451. */
  452. smp_mb();
  453. curr->task = NULL;
  454. wake_up_process(task);
  455. }
  456. mm->core_state = NULL;
  457. }
  458. static bool dump_interrupted(void)
  459. {
  460. /*
  461. * SIGKILL or freezing() interrupt the coredumping. Perhaps we
  462. * can do try_to_freeze() and check __fatal_signal_pending(),
  463. * but then we need to teach dump_write() to restart and clear
  464. * TIF_SIGPENDING.
  465. */
  466. return signal_pending(current);
  467. }
  468. static void wait_for_dump_helpers(struct file *file)
  469. {
  470. struct pipe_inode_info *pipe = file->private_data;
  471. pipe_lock(pipe);
  472. pipe->readers++;
  473. pipe->writers--;
  474. wake_up_interruptible_sync(&pipe->rd_wait);
  475. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  476. pipe_unlock(pipe);
  477. /*
  478. * We actually want wait_event_freezable() but then we need
  479. * to clear TIF_SIGPENDING and improve dump_interrupted().
  480. */
  481. wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
  482. pipe_lock(pipe);
  483. pipe->readers--;
  484. pipe->writers++;
  485. pipe_unlock(pipe);
  486. }
  487. /*
  488. * umh_pipe_setup
  489. * helper function to customize the process used
  490. * to collect the core in userspace. Specifically
  491. * it sets up a pipe and installs it as fd 0 (stdin)
  492. * for the process. Returns 0 on success, or
  493. * PTR_ERR on failure.
  494. * Note that it also sets the core limit to 1. This
  495. * is a special value that we use to trap recursive
  496. * core dumps
  497. */
  498. static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
  499. {
  500. struct file *files[2];
  501. struct coredump_params *cp = (struct coredump_params *)info->data;
  502. int err = create_pipe_files(files, 0);
  503. if (err)
  504. return err;
  505. cp->file = files[1];
  506. err = replace_fd(0, files[0], 0);
  507. fput(files[0]);
  508. /* and disallow core files too */
  509. current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
  510. return err;
  511. }
  512. void do_coredump(const kernel_siginfo_t *siginfo)
  513. {
  514. struct core_state core_state;
  515. struct core_name cn;
  516. struct mm_struct *mm = current->mm;
  517. struct linux_binfmt * binfmt;
  518. const struct cred *old_cred;
  519. struct cred *cred;
  520. int retval = 0;
  521. int ispipe;
  522. size_t *argv = NULL;
  523. int argc = 0;
  524. struct files_struct *displaced;
  525. /* require nonrelative corefile path and be extra careful */
  526. bool need_suid_safe = false;
  527. bool core_dumped = false;
  528. static atomic_t core_dump_count = ATOMIC_INIT(0);
  529. struct coredump_params cprm = {
  530. .siginfo = siginfo,
  531. .regs = signal_pt_regs(),
  532. .limit = rlimit(RLIMIT_CORE),
  533. /*
  534. * We must use the same mm->flags while dumping core to avoid
  535. * inconsistency of bit flags, since this flag is not protected
  536. * by any locks.
  537. */
  538. .mm_flags = mm->flags,
  539. };
  540. audit_core_dumps(siginfo->si_signo);
  541. binfmt = mm->binfmt;
  542. if (!binfmt || !binfmt->core_dump)
  543. goto fail;
  544. if (!__get_dumpable(cprm.mm_flags))
  545. goto fail;
  546. cred = prepare_creds();
  547. if (!cred)
  548. goto fail;
  549. /*
  550. * We cannot trust fsuid as being the "true" uid of the process
  551. * nor do we know its entire history. We only know it was tainted
  552. * so we dump it as root in mode 2, and only into a controlled
  553. * environment (pipe handler or fully qualified path).
  554. */
  555. if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
  556. /* Setuid core dump mode */
  557. cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
  558. need_suid_safe = true;
  559. }
  560. retval = coredump_wait(siginfo->si_signo, &core_state);
  561. if (retval < 0)
  562. goto fail_creds;
  563. old_cred = override_creds(cred);
  564. ispipe = format_corename(&cn, &cprm, &argv, &argc);
  565. if (ispipe) {
  566. int argi;
  567. int dump_count;
  568. char **helper_argv;
  569. struct subprocess_info *sub_info;
  570. if (ispipe < 0) {
  571. printk(KERN_WARNING "format_corename failed\n");
  572. printk(KERN_WARNING "Aborting core\n");
  573. goto fail_unlock;
  574. }
  575. if (cprm.limit == 1) {
  576. /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
  577. *
  578. * Normally core limits are irrelevant to pipes, since
  579. * we're not writing to the file system, but we use
  580. * cprm.limit of 1 here as a special value, this is a
  581. * consistent way to catch recursive crashes.
  582. * We can still crash if the core_pattern binary sets
  583. * RLIM_CORE = !1, but it runs as root, and can do
  584. * lots of stupid things.
  585. *
  586. * Note that we use task_tgid_vnr here to grab the pid
  587. * of the process group leader. That way we get the
  588. * right pid if a thread in a multi-threaded
  589. * core_pattern process dies.
  590. */
  591. printk(KERN_WARNING
  592. "Process %d(%s) has RLIMIT_CORE set to 1\n",
  593. task_tgid_vnr(current), current->comm);
  594. printk(KERN_WARNING "Aborting core\n");
  595. goto fail_unlock;
  596. }
  597. cprm.limit = RLIM_INFINITY;
  598. dump_count = atomic_inc_return(&core_dump_count);
  599. if (core_pipe_limit && (core_pipe_limit < dump_count)) {
  600. printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
  601. task_tgid_vnr(current), current->comm);
  602. printk(KERN_WARNING "Skipping core dump\n");
  603. goto fail_dropcount;
  604. }
  605. helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
  606. GFP_KERNEL);
  607. if (!helper_argv) {
  608. printk(KERN_WARNING "%s failed to allocate memory\n",
  609. __func__);
  610. goto fail_dropcount;
  611. }
  612. for (argi = 0; argi < argc; argi++)
  613. helper_argv[argi] = cn.corename + argv[argi];
  614. helper_argv[argi] = NULL;
  615. retval = -ENOMEM;
  616. sub_info = call_usermodehelper_setup(helper_argv[0],
  617. helper_argv, NULL, GFP_KERNEL,
  618. umh_pipe_setup, NULL, &cprm);
  619. if (sub_info)
  620. retval = call_usermodehelper_exec(sub_info,
  621. UMH_WAIT_EXEC);
  622. kfree(helper_argv);
  623. if (retval) {
  624. printk(KERN_INFO "Core dump to |%s pipe failed\n",
  625. cn.corename);
  626. goto close_fail;
  627. }
  628. } else {
  629. struct inode *inode;
  630. int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
  631. O_LARGEFILE | O_EXCL;
  632. if (cprm.limit < binfmt->min_coredump)
  633. goto fail_unlock;
  634. if (need_suid_safe && cn.corename[0] != '/') {
  635. printk(KERN_WARNING "Pid %d(%s) can only dump core "\
  636. "to fully qualified path!\n",
  637. task_tgid_vnr(current), current->comm);
  638. printk(KERN_WARNING "Skipping core dump\n");
  639. goto fail_unlock;
  640. }
  641. /*
  642. * Unlink the file if it exists unless this is a SUID
  643. * binary - in that case, we're running around with root
  644. * privs and don't want to unlink another user's coredump.
  645. */
  646. if (!need_suid_safe) {
  647. /*
  648. * If it doesn't exist, that's fine. If there's some
  649. * other problem, we'll catch it at the filp_open().
  650. */
  651. do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
  652. }
  653. /*
  654. * There is a race between unlinking and creating the
  655. * file, but if that causes an EEXIST here, that's
  656. * fine - another process raced with us while creating
  657. * the corefile, and the other process won. To userspace,
  658. * what matters is that at least one of the two processes
  659. * writes its coredump successfully, not which one.
  660. */
  661. if (need_suid_safe) {
  662. /*
  663. * Using user namespaces, normal user tasks can change
  664. * their current->fs->root to point to arbitrary
  665. * directories. Since the intention of the "only dump
  666. * with a fully qualified path" rule is to control where
  667. * coredumps may be placed using root privileges,
  668. * current->fs->root must not be used. Instead, use the
  669. * root directory of init_task.
  670. */
  671. struct path root;
  672. task_lock(&init_task);
  673. get_fs_root(init_task.fs, &root);
  674. task_unlock(&init_task);
  675. cprm.file = file_open_root(root.dentry, root.mnt,
  676. cn.corename, open_flags, 0600);
  677. path_put(&root);
  678. } else {
  679. cprm.file = filp_open(cn.corename, open_flags, 0600);
  680. }
  681. if (IS_ERR(cprm.file))
  682. goto fail_unlock;
  683. inode = file_inode(cprm.file);
  684. if (inode->i_nlink > 1)
  685. goto close_fail;
  686. if (d_unhashed(cprm.file->f_path.dentry))
  687. goto close_fail;
  688. /*
  689. * AK: actually i see no reason to not allow this for named
  690. * pipes etc, but keep the previous behaviour for now.
  691. */
  692. if (!S_ISREG(inode->i_mode))
  693. goto close_fail;
  694. /*
  695. * Don't dump core if the filesystem changed owner or mode
  696. * of the file during file creation. This is an issue when
  697. * a process dumps core while its cwd is e.g. on a vfat
  698. * filesystem.
  699. */
  700. if (!uid_eq(inode->i_uid, current_fsuid()))
  701. goto close_fail;
  702. if ((inode->i_mode & 0677) != 0600)
  703. goto close_fail;
  704. if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
  705. goto close_fail;
  706. if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
  707. goto close_fail;
  708. }
  709. /* get us an unshared descriptor table; almost always a no-op */
  710. retval = unshare_files(&displaced);
  711. if (retval)
  712. goto close_fail;
  713. if (displaced)
  714. put_files_struct(displaced);
  715. if (!dump_interrupted()) {
  716. /*
  717. * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
  718. * have this set to NULL.
  719. */
  720. if (!cprm.file) {
  721. pr_info("Core dump to |%s disabled\n", cn.corename);
  722. goto close_fail;
  723. }
  724. file_start_write(cprm.file);
  725. core_dumped = binfmt->core_dump(&cprm);
  726. file_end_write(cprm.file);
  727. }
  728. if (ispipe && core_pipe_limit)
  729. wait_for_dump_helpers(cprm.file);
  730. close_fail:
  731. if (cprm.file)
  732. filp_close(cprm.file, NULL);
  733. fail_dropcount:
  734. if (ispipe)
  735. atomic_dec(&core_dump_count);
  736. fail_unlock:
  737. kfree(argv);
  738. kfree(cn.corename);
  739. coredump_finish(mm, core_dumped);
  740. revert_creds(old_cred);
  741. fail_creds:
  742. put_cred(cred);
  743. fail:
  744. return;
  745. }
  746. /*
  747. * Core dumping helper functions. These are the only things you should
  748. * do on a core-file: use only these functions to write out all the
  749. * necessary info.
  750. */
  751. int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
  752. {
  753. struct file *file = cprm->file;
  754. loff_t pos = file->f_pos;
  755. ssize_t n;
  756. if (cprm->written + nr > cprm->limit)
  757. return 0;
  758. if (dump_interrupted())
  759. return 0;
  760. n = __kernel_write(file, addr, nr, &pos);
  761. if (n != nr)
  762. return 0;
  763. file->f_pos = pos;
  764. cprm->written += n;
  765. cprm->pos += n;
  766. return 1;
  767. }
  768. EXPORT_SYMBOL(dump_emit);
  769. int dump_skip(struct coredump_params *cprm, size_t nr)
  770. {
  771. static char zeroes[PAGE_SIZE];
  772. struct file *file = cprm->file;
  773. if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
  774. if (dump_interrupted() ||
  775. file->f_op->llseek(file, nr, SEEK_CUR) < 0)
  776. return 0;
  777. cprm->pos += nr;
  778. return 1;
  779. } else {
  780. while (nr > PAGE_SIZE) {
  781. if (!dump_emit(cprm, zeroes, PAGE_SIZE))
  782. return 0;
  783. nr -= PAGE_SIZE;
  784. }
  785. return dump_emit(cprm, zeroes, nr);
  786. }
  787. }
  788. EXPORT_SYMBOL(dump_skip);
  789. #ifdef CONFIG_ELF_CORE
  790. int dump_user_range(struct coredump_params *cprm, unsigned long start,
  791. unsigned long len)
  792. {
  793. unsigned long addr;
  794. for (addr = start; addr < start + len; addr += PAGE_SIZE) {
  795. struct page *page;
  796. int stop;
  797. /*
  798. * To avoid having to allocate page tables for virtual address
  799. * ranges that have never been used yet, and also to make it
  800. * easy to generate sparse core files, use a helper that returns
  801. * NULL when encountering an empty page table entry that would
  802. * otherwise have been filled with the zero page.
  803. */
  804. page = get_dump_page(addr);
  805. if (page) {
  806. void *kaddr = kmap(page);
  807. stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
  808. kunmap(page);
  809. put_user_page(page);
  810. } else {
  811. stop = !dump_skip(cprm, PAGE_SIZE);
  812. }
  813. if (stop)
  814. return 0;
  815. }
  816. return 1;
  817. }
  818. #endif
  819. int dump_align(struct coredump_params *cprm, int align)
  820. {
  821. unsigned mod = cprm->pos & (align - 1);
  822. if (align & (align - 1))
  823. return 0;
  824. return mod ? dump_skip(cprm, align - mod) : 1;
  825. }
  826. EXPORT_SYMBOL(dump_align);
  827. /*
  828. * Ensures that file size is big enough to contain the current file
  829. * postion. This prevents gdb from complaining about a truncated file
  830. * if the last "write" to the file was dump_skip.
  831. */
  832. void dump_truncate(struct coredump_params *cprm)
  833. {
  834. struct file *file = cprm->file;
  835. loff_t offset;
  836. if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
  837. offset = file->f_op->llseek(file, 0, SEEK_CUR);
  838. if (i_size_read(file->f_mapping->host) < offset)
  839. do_truncate(file->f_path.dentry, offset, 0, file);
  840. }
  841. }
  842. EXPORT_SYMBOL(dump_truncate);
  843. /*
  844. * The purpose of always_dump_vma() is to make sure that special kernel mappings
  845. * that are useful for post-mortem analysis are included in every core dump.
  846. * In that way we ensure that the core dump is fully interpretable later
  847. * without matching up the same kernel and hardware config to see what PC values
  848. * meant. These special mappings include - vDSO, vsyscall, and other
  849. * architecture specific mappings
  850. */
  851. static bool always_dump_vma(struct vm_area_struct *vma)
  852. {
  853. /* Any vsyscall mappings? */
  854. if (vma == get_gate_vma(vma->vm_mm))
  855. return true;
  856. /*
  857. * Assume that all vmas with a .name op should always be dumped.
  858. * If this changes, a new vm_ops field can easily be added.
  859. */
  860. if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
  861. return true;
  862. /*
  863. * arch_vma_name() returns non-NULL for special architecture mappings,
  864. * such as vDSO sections.
  865. */
  866. if (arch_vma_name(vma))
  867. return true;
  868. return false;
  869. }
  870. #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
  871. /*
  872. * Decide how much of @vma's contents should be included in a core dump.
  873. */
  874. static unsigned long vma_dump_size(struct vm_area_struct *vma,
  875. unsigned long mm_flags)
  876. {
  877. #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
  878. /* always dump the vdso and vsyscall sections */
  879. if (always_dump_vma(vma))
  880. goto whole;
  881. if (vma->vm_flags & VM_DONTDUMP)
  882. return 0;
  883. /* support for DAX */
  884. if (vma_is_dax(vma)) {
  885. if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
  886. goto whole;
  887. if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
  888. goto whole;
  889. return 0;
  890. }
  891. /* Hugetlb memory check */
  892. if (is_vm_hugetlb_page(vma)) {
  893. if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
  894. goto whole;
  895. if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
  896. goto whole;
  897. return 0;
  898. }
  899. /* Do not dump I/O mapped devices or special mappings */
  900. if (vma->vm_flags & VM_IO)
  901. return 0;
  902. /* By default, dump shared memory if mapped from an anonymous file. */
  903. if (vma->vm_flags & VM_SHARED) {
  904. if (file_inode(vma->vm_file)->i_nlink == 0 ?
  905. FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
  906. goto whole;
  907. return 0;
  908. }
  909. /* Dump segments that have been written to. */
  910. if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
  911. goto whole;
  912. if (vma->vm_file == NULL)
  913. return 0;
  914. if (FILTER(MAPPED_PRIVATE))
  915. goto whole;
  916. /*
  917. * If this is the beginning of an executable file mapping,
  918. * dump the first page to aid in determining what was mapped here.
  919. */
  920. if (FILTER(ELF_HEADERS) &&
  921. vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
  922. if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
  923. return PAGE_SIZE;
  924. /*
  925. * ELF libraries aren't always executable.
  926. * We'll want to check whether the mapping starts with the ELF
  927. * magic, but not now - we're holding the mmap lock,
  928. * so copy_from_user() doesn't work here.
  929. * Use a placeholder instead, and fix it up later in
  930. * dump_vma_snapshot().
  931. */
  932. return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
  933. }
  934. #undef FILTER
  935. return 0;
  936. whole:
  937. return vma->vm_end - vma->vm_start;
  938. }
  939. static struct vm_area_struct *first_vma(struct task_struct *tsk,
  940. struct vm_area_struct *gate_vma)
  941. {
  942. struct vm_area_struct *ret = tsk->mm->mmap;
  943. if (ret)
  944. return ret;
  945. return gate_vma;
  946. }
  947. /*
  948. * Helper function for iterating across a vma list. It ensures that the caller
  949. * will visit `gate_vma' prior to terminating the search.
  950. */
  951. static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
  952. struct vm_area_struct *gate_vma)
  953. {
  954. struct vm_area_struct *ret;
  955. ret = this_vma->vm_next;
  956. if (ret)
  957. return ret;
  958. if (this_vma == gate_vma)
  959. return NULL;
  960. return gate_vma;
  961. }
  962. /*
  963. * Under the mmap_lock, take a snapshot of relevant information about the task's
  964. * VMAs.
  965. */
  966. int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
  967. struct core_vma_metadata **vma_meta,
  968. size_t *vma_data_size_ptr)
  969. {
  970. struct vm_area_struct *vma, *gate_vma;
  971. struct mm_struct *mm = current->mm;
  972. int i;
  973. size_t vma_data_size = 0;
  974. /*
  975. * Once the stack expansion code is fixed to not change VMA bounds
  976. * under mmap_lock in read mode, this can be changed to take the
  977. * mmap_lock in read mode.
  978. */
  979. if (mmap_write_lock_killable(mm))
  980. return -EINTR;
  981. gate_vma = get_gate_vma(mm);
  982. *vma_count = mm->map_count + (gate_vma ? 1 : 0);
  983. *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
  984. if (!*vma_meta) {
  985. mmap_write_unlock(mm);
  986. return -ENOMEM;
  987. }
  988. for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
  989. vma = next_vma(vma, gate_vma), i++) {
  990. struct core_vma_metadata *m = (*vma_meta) + i;
  991. m->start = vma->vm_start;
  992. m->end = vma->vm_end;
  993. m->flags = vma->vm_flags;
  994. m->dump_size = vma_dump_size(vma, cprm->mm_flags);
  995. }
  996. mmap_write_unlock(mm);
  997. if (WARN_ON(i != *vma_count)) {
  998. kvfree(*vma_meta);
  999. return -EFAULT;
  1000. }
  1001. for (i = 0; i < *vma_count; i++) {
  1002. struct core_vma_metadata *m = (*vma_meta) + i;
  1003. if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
  1004. char elfmag[SELFMAG];
  1005. if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
  1006. memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
  1007. m->dump_size = 0;
  1008. } else {
  1009. m->dump_size = PAGE_SIZE;
  1010. }
  1011. }
  1012. vma_data_size += m->dump_size;
  1013. }
  1014. *vma_data_size_ptr = vma_data_size;
  1015. return 0;
  1016. }