exec.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/fs/exec.c
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. */
  7. /*
  8. * #!-checking implemented by tytso.
  9. */
  10. /*
  11. * Demand-loading implemented 01.12.91 - no need to read anything but
  12. * the header into memory. The inode of the executable is put into
  13. * "current->executable", and page faults do the actual loading. Clean.
  14. *
  15. * Once more I can proudly say that linux stood up to being changed: it
  16. * was less than 2 hours work to get demand-loading completely implemented.
  17. *
  18. * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
  19. * current->executable is only used by the procfs. This allows a dispatch
  20. * table to check for several different types of binary formats. We keep
  21. * trying until we recognize the file or we run out of supported binary
  22. * formats.
  23. */
  24. #include <linux/kernel_read_file.h>
  25. #include <linux/slab.h>
  26. #include <linux/file.h>
  27. #include <linux/fdtable.h>
  28. #include <linux/mm.h>
  29. #include <linux/vmacache.h>
  30. #include <linux/stat.h>
  31. #include <linux/fcntl.h>
  32. #include <linux/swap.h>
  33. #include <linux/string.h>
  34. #include <linux/init.h>
  35. #include <linux/sched/mm.h>
  36. #include <linux/sched/coredump.h>
  37. #include <linux/sched/signal.h>
  38. #include <linux/sched/numa_balancing.h>
  39. #include <linux/sched/task.h>
  40. #include <linux/pagemap.h>
  41. #include <linux/perf_event.h>
  42. #include <linux/highmem.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/key.h>
  45. #include <linux/personality.h>
  46. #include <linux/binfmts.h>
  47. #include <linux/utsname.h>
  48. #include <linux/pid_namespace.h>
  49. #include <linux/module.h>
  50. #include <linux/namei.h>
  51. #include <linux/mount.h>
  52. #include <linux/security.h>
  53. #include <linux/syscalls.h>
  54. #include <linux/tsacct_kern.h>
  55. #include <linux/cn_proc.h>
  56. #include <linux/audit.h>
  57. #include <linux/tracehook.h>
  58. #include <linux/kmod.h>
  59. #include <linux/fsnotify.h>
  60. #include <linux/fs_struct.h>
  61. #include <linux/oom.h>
  62. #include <linux/compat.h>
  63. #include <linux/vmalloc.h>
  64. #include <linux/io_uring.h>
  65. #include <linux/uaccess.h>
  66. #include <asm/mmu_context.h>
  67. #include <asm/tlb.h>
  68. #include <trace/events/task.h>
  69. #include "internal.h"
  70. #include <trace/events/sched.h>
  71. EXPORT_TRACEPOINT_SYMBOL_GPL(task_rename);
  72. static int bprm_creds_from_file(struct linux_binprm *bprm);
  73. int suid_dumpable = 0;
  74. static LIST_HEAD(formats);
  75. static DEFINE_RWLOCK(binfmt_lock);
  76. void __register_binfmt(struct linux_binfmt * fmt, int insert)
  77. {
  78. BUG_ON(!fmt);
  79. if (WARN_ON(!fmt->load_binary))
  80. return;
  81. write_lock(&binfmt_lock);
  82. insert ? list_add(&fmt->lh, &formats) :
  83. list_add_tail(&fmt->lh, &formats);
  84. write_unlock(&binfmt_lock);
  85. }
  86. EXPORT_SYMBOL(__register_binfmt);
  87. void unregister_binfmt(struct linux_binfmt * fmt)
  88. {
  89. write_lock(&binfmt_lock);
  90. list_del(&fmt->lh);
  91. write_unlock(&binfmt_lock);
  92. }
  93. EXPORT_SYMBOL(unregister_binfmt);
  94. static inline void put_binfmt(struct linux_binfmt * fmt)
  95. {
  96. module_put(fmt->module);
  97. }
  98. bool path_noexec(const struct path *path)
  99. {
  100. return (path->mnt->mnt_flags & MNT_NOEXEC) ||
  101. (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
  102. }
  103. #ifdef CONFIG_USELIB
  104. /*
  105. * Note that a shared library must be both readable and executable due to
  106. * security reasons.
  107. *
  108. * Also note that we take the address to load from from the file itself.
  109. */
  110. SYSCALL_DEFINE1(uselib, const char __user *, library)
  111. {
  112. struct linux_binfmt *fmt;
  113. struct file *file;
  114. struct filename *tmp = getname(library);
  115. int error = PTR_ERR(tmp);
  116. static const struct open_flags uselib_flags = {
  117. .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
  118. .acc_mode = MAY_READ | MAY_EXEC,
  119. .intent = LOOKUP_OPEN,
  120. .lookup_flags = LOOKUP_FOLLOW,
  121. };
  122. if (IS_ERR(tmp))
  123. goto out;
  124. file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
  125. putname(tmp);
  126. error = PTR_ERR(file);
  127. if (IS_ERR(file))
  128. goto out;
  129. /*
  130. * may_open() has already checked for this, so it should be
  131. * impossible to trip now. But we need to be extra cautious
  132. * and check again at the very end too.
  133. */
  134. error = -EACCES;
  135. if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
  136. path_noexec(&file->f_path)))
  137. goto exit;
  138. fsnotify_open(file);
  139. error = -ENOEXEC;
  140. read_lock(&binfmt_lock);
  141. list_for_each_entry(fmt, &formats, lh) {
  142. if (!fmt->load_shlib)
  143. continue;
  144. if (!try_module_get(fmt->module))
  145. continue;
  146. read_unlock(&binfmt_lock);
  147. error = fmt->load_shlib(file);
  148. read_lock(&binfmt_lock);
  149. put_binfmt(fmt);
  150. if (error != -ENOEXEC)
  151. break;
  152. }
  153. read_unlock(&binfmt_lock);
  154. exit:
  155. fput(file);
  156. out:
  157. return error;
  158. }
  159. #endif /* #ifdef CONFIG_USELIB */
  160. #ifdef CONFIG_MMU
  161. /*
  162. * The nascent bprm->mm is not visible until exec_mmap() but it can
  163. * use a lot of memory, account these pages in current->mm temporary
  164. * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
  165. * change the counter back via acct_arg_size(0).
  166. */
  167. static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
  168. {
  169. struct mm_struct *mm = current->mm;
  170. long diff = (long)(pages - bprm->vma_pages);
  171. if (!mm || !diff)
  172. return;
  173. bprm->vma_pages = pages;
  174. add_mm_counter(mm, MM_ANONPAGES, diff);
  175. }
  176. static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  177. int write)
  178. {
  179. struct page *page;
  180. int ret;
  181. unsigned int gup_flags = FOLL_FORCE;
  182. #ifdef CONFIG_STACK_GROWSUP
  183. if (write) {
  184. ret = expand_downwards(bprm->vma, pos);
  185. if (ret < 0)
  186. return NULL;
  187. }
  188. #endif
  189. if (write)
  190. gup_flags |= FOLL_WRITE;
  191. /*
  192. * We are doing an exec(). 'current' is the process
  193. * doing the exec and bprm->mm is the new process's mm.
  194. */
  195. ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
  196. &page, NULL, NULL);
  197. if (ret <= 0)
  198. return NULL;
  199. if (write)
  200. acct_arg_size(bprm, vma_pages(bprm->vma));
  201. return page;
  202. }
  203. static void put_arg_page(struct page *page)
  204. {
  205. put_user_page(page);
  206. }
  207. static void free_arg_pages(struct linux_binprm *bprm)
  208. {
  209. }
  210. static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  211. struct page *page)
  212. {
  213. flush_cache_page(bprm->vma, pos, page_to_pfn(page));
  214. }
  215. static int __bprm_mm_init(struct linux_binprm *bprm)
  216. {
  217. int err;
  218. struct vm_area_struct *vma = NULL;
  219. struct mm_struct *mm = bprm->mm;
  220. bprm->vma = vma = vm_area_alloc(mm);
  221. if (!vma)
  222. return -ENOMEM;
  223. vma_set_anonymous(vma);
  224. if (mmap_write_lock_killable(mm)) {
  225. err = -EINTR;
  226. goto err_free;
  227. }
  228. /*
  229. * Place the stack at the largest stack address the architecture
  230. * supports. Later, we'll move this to an appropriate place. We don't
  231. * use STACK_TOP because that can depend on attributes which aren't
  232. * configured yet.
  233. */
  234. BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
  235. vma->vm_end = STACK_TOP_MAX;
  236. vma->vm_start = vma->vm_end - PAGE_SIZE;
  237. vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
  238. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  239. err = insert_vm_struct(mm, vma);
  240. if (err)
  241. goto err;
  242. mm->stack_vm = mm->total_vm = 1;
  243. mmap_write_unlock(mm);
  244. bprm->p = vma->vm_end - sizeof(void *);
  245. return 0;
  246. err:
  247. mmap_write_unlock(mm);
  248. err_free:
  249. bprm->vma = NULL;
  250. vm_area_free(vma);
  251. return err;
  252. }
  253. static bool valid_arg_len(struct linux_binprm *bprm, long len)
  254. {
  255. return len <= MAX_ARG_STRLEN;
  256. }
  257. #else
  258. static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
  259. {
  260. }
  261. static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  262. int write)
  263. {
  264. struct page *page;
  265. page = bprm->page[pos / PAGE_SIZE];
  266. if (!page && write) {
  267. page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
  268. if (!page)
  269. return NULL;
  270. bprm->page[pos / PAGE_SIZE] = page;
  271. }
  272. return page;
  273. }
  274. static void put_arg_page(struct page *page)
  275. {
  276. }
  277. static void free_arg_page(struct linux_binprm *bprm, int i)
  278. {
  279. if (bprm->page[i]) {
  280. __free_page(bprm->page[i]);
  281. bprm->page[i] = NULL;
  282. }
  283. }
  284. static void free_arg_pages(struct linux_binprm *bprm)
  285. {
  286. int i;
  287. for (i = 0; i < MAX_ARG_PAGES; i++)
  288. free_arg_page(bprm, i);
  289. }
  290. static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  291. struct page *page)
  292. {
  293. }
  294. static int __bprm_mm_init(struct linux_binprm *bprm)
  295. {
  296. bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
  297. return 0;
  298. }
  299. static bool valid_arg_len(struct linux_binprm *bprm, long len)
  300. {
  301. return len <= bprm->p;
  302. }
  303. #endif /* CONFIG_MMU */
  304. /*
  305. * Create a new mm_struct and populate it with a temporary stack
  306. * vm_area_struct. We don't have enough context at this point to set the stack
  307. * flags, permissions, and offset, so we use temporary values. We'll update
  308. * them later in setup_arg_pages().
  309. */
  310. static int bprm_mm_init(struct linux_binprm *bprm)
  311. {
  312. int err;
  313. struct mm_struct *mm = NULL;
  314. bprm->mm = mm = mm_alloc();
  315. err = -ENOMEM;
  316. if (!mm)
  317. goto err;
  318. /* Save current stack limit for all calculations made during exec. */
  319. task_lock(current->group_leader);
  320. bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
  321. task_unlock(current->group_leader);
  322. err = __bprm_mm_init(bprm);
  323. if (err)
  324. goto err;
  325. return 0;
  326. err:
  327. if (mm) {
  328. bprm->mm = NULL;
  329. mmdrop(mm);
  330. }
  331. return err;
  332. }
  333. struct user_arg_ptr {
  334. #ifdef CONFIG_COMPAT
  335. bool is_compat;
  336. #endif
  337. union {
  338. const char __user *const __user *native;
  339. #ifdef CONFIG_COMPAT
  340. const compat_uptr_t __user *compat;
  341. #endif
  342. } ptr;
  343. };
  344. static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
  345. {
  346. const char __user *native;
  347. #ifdef CONFIG_COMPAT
  348. if (unlikely(argv.is_compat)) {
  349. compat_uptr_t compat;
  350. if (get_user(compat, argv.ptr.compat + nr))
  351. return ERR_PTR(-EFAULT);
  352. return compat_ptr(compat);
  353. }
  354. #endif
  355. if (get_user(native, argv.ptr.native + nr))
  356. return ERR_PTR(-EFAULT);
  357. return native;
  358. }
  359. /*
  360. * count() counts the number of strings in array ARGV.
  361. */
  362. static int count(struct user_arg_ptr argv, int max)
  363. {
  364. int i = 0;
  365. if (argv.ptr.native != NULL) {
  366. for (;;) {
  367. const char __user *p = get_user_arg_ptr(argv, i);
  368. if (!p)
  369. break;
  370. if (IS_ERR(p))
  371. return -EFAULT;
  372. if (i >= max)
  373. return -E2BIG;
  374. ++i;
  375. if (fatal_signal_pending(current))
  376. return -ERESTARTNOHAND;
  377. cond_resched();
  378. }
  379. }
  380. return i;
  381. }
  382. static int count_strings_kernel(const char *const *argv)
  383. {
  384. int i;
  385. if (!argv)
  386. return 0;
  387. for (i = 0; argv[i]; ++i) {
  388. if (i >= MAX_ARG_STRINGS)
  389. return -E2BIG;
  390. if (fatal_signal_pending(current))
  391. return -ERESTARTNOHAND;
  392. cond_resched();
  393. }
  394. return i;
  395. }
  396. static int bprm_stack_limits(struct linux_binprm *bprm)
  397. {
  398. unsigned long limit, ptr_size;
  399. /*
  400. * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
  401. * (whichever is smaller) for the argv+env strings.
  402. * This ensures that:
  403. * - the remaining binfmt code will not run out of stack space,
  404. * - the program will have a reasonable amount of stack left
  405. * to work from.
  406. */
  407. limit = _STK_LIM / 4 * 3;
  408. limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
  409. /*
  410. * We've historically supported up to 32 pages (ARG_MAX)
  411. * of argument strings even with small stacks
  412. */
  413. limit = max_t(unsigned long, limit, ARG_MAX);
  414. /*
  415. * We must account for the size of all the argv and envp pointers to
  416. * the argv and envp strings, since they will also take up space in
  417. * the stack. They aren't stored until much later when we can't
  418. * signal to the parent that the child has run out of stack space.
  419. * Instead, calculate it here so it's possible to fail gracefully.
  420. *
  421. * In the case of argc = 0, make sure there is space for adding a
  422. * empty string (which will bump argc to 1), to ensure confused
  423. * userspace programs don't start processing from argv[1], thinking
  424. * argc can never be 0, to keep them from walking envp by accident.
  425. * See do_execveat_common().
  426. */
  427. ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
  428. if (limit <= ptr_size)
  429. return -E2BIG;
  430. limit -= ptr_size;
  431. bprm->argmin = bprm->p - limit;
  432. return 0;
  433. }
  434. /*
  435. * 'copy_strings()' copies argument/environment strings from the old
  436. * processes's memory to the new process's stack. The call to get_user_pages()
  437. * ensures the destination page is created and not swapped out.
  438. */
  439. static int copy_strings(int argc, struct user_arg_ptr argv,
  440. struct linux_binprm *bprm)
  441. {
  442. struct page *kmapped_page = NULL;
  443. char *kaddr = NULL;
  444. unsigned long kpos = 0;
  445. int ret;
  446. while (argc-- > 0) {
  447. const char __user *str;
  448. int len;
  449. unsigned long pos;
  450. ret = -EFAULT;
  451. str = get_user_arg_ptr(argv, argc);
  452. if (IS_ERR(str))
  453. goto out;
  454. len = strnlen_user(str, MAX_ARG_STRLEN);
  455. if (!len)
  456. goto out;
  457. ret = -E2BIG;
  458. if (!valid_arg_len(bprm, len))
  459. goto out;
  460. /* We're going to work our way backwords. */
  461. pos = bprm->p;
  462. str += len;
  463. bprm->p -= len;
  464. #ifdef CONFIG_MMU
  465. if (bprm->p < bprm->argmin)
  466. goto out;
  467. #endif
  468. while (len > 0) {
  469. int offset, bytes_to_copy;
  470. if (fatal_signal_pending(current)) {
  471. ret = -ERESTARTNOHAND;
  472. goto out;
  473. }
  474. cond_resched();
  475. offset = pos % PAGE_SIZE;
  476. if (offset == 0)
  477. offset = PAGE_SIZE;
  478. bytes_to_copy = offset;
  479. if (bytes_to_copy > len)
  480. bytes_to_copy = len;
  481. offset -= bytes_to_copy;
  482. pos -= bytes_to_copy;
  483. str -= bytes_to_copy;
  484. len -= bytes_to_copy;
  485. if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
  486. struct page *page;
  487. page = get_arg_page(bprm, pos, 1);
  488. if (!page) {
  489. ret = -E2BIG;
  490. goto out;
  491. }
  492. if (kmapped_page) {
  493. flush_kernel_dcache_page(kmapped_page);
  494. kunmap(kmapped_page);
  495. put_arg_page(kmapped_page);
  496. }
  497. kmapped_page = page;
  498. kaddr = kmap(kmapped_page);
  499. kpos = pos & PAGE_MASK;
  500. flush_arg_page(bprm, kpos, kmapped_page);
  501. }
  502. if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
  503. ret = -EFAULT;
  504. goto out;
  505. }
  506. }
  507. }
  508. ret = 0;
  509. out:
  510. if (kmapped_page) {
  511. flush_kernel_dcache_page(kmapped_page);
  512. kunmap(kmapped_page);
  513. put_arg_page(kmapped_page);
  514. }
  515. return ret;
  516. }
  517. /*
  518. * Copy and argument/environment string from the kernel to the processes stack.
  519. */
  520. int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
  521. {
  522. int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
  523. unsigned long pos = bprm->p;
  524. if (len == 0)
  525. return -EFAULT;
  526. if (!valid_arg_len(bprm, len))
  527. return -E2BIG;
  528. /* We're going to work our way backwards. */
  529. arg += len;
  530. bprm->p -= len;
  531. if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
  532. return -E2BIG;
  533. while (len > 0) {
  534. unsigned int bytes_to_copy = min_t(unsigned int, len,
  535. min_not_zero(offset_in_page(pos), PAGE_SIZE));
  536. struct page *page;
  537. char *kaddr;
  538. pos -= bytes_to_copy;
  539. arg -= bytes_to_copy;
  540. len -= bytes_to_copy;
  541. page = get_arg_page(bprm, pos, 1);
  542. if (!page)
  543. return -E2BIG;
  544. kaddr = kmap_atomic(page);
  545. flush_arg_page(bprm, pos & PAGE_MASK, page);
  546. memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
  547. flush_kernel_dcache_page(page);
  548. kunmap_atomic(kaddr);
  549. put_arg_page(page);
  550. }
  551. return 0;
  552. }
  553. EXPORT_SYMBOL(copy_string_kernel);
  554. static int copy_strings_kernel(int argc, const char *const *argv,
  555. struct linux_binprm *bprm)
  556. {
  557. while (argc-- > 0) {
  558. int ret = copy_string_kernel(argv[argc], bprm);
  559. if (ret < 0)
  560. return ret;
  561. if (fatal_signal_pending(current))
  562. return -ERESTARTNOHAND;
  563. cond_resched();
  564. }
  565. return 0;
  566. }
  567. #ifdef CONFIG_MMU
  568. /*
  569. * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
  570. * the binfmt code determines where the new stack should reside, we shift it to
  571. * its final location. The process proceeds as follows:
  572. *
  573. * 1) Use shift to calculate the new vma endpoints.
  574. * 2) Extend vma to cover both the old and new ranges. This ensures the
  575. * arguments passed to subsequent functions are consistent.
  576. * 3) Move vma's page tables to the new range.
  577. * 4) Free up any cleared pgd range.
  578. * 5) Shrink the vma to cover only the new range.
  579. */
  580. static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
  581. {
  582. struct mm_struct *mm = vma->vm_mm;
  583. unsigned long old_start = vma->vm_start;
  584. unsigned long old_end = vma->vm_end;
  585. unsigned long length = old_end - old_start;
  586. unsigned long new_start = old_start - shift;
  587. unsigned long new_end = old_end - shift;
  588. struct mmu_gather tlb;
  589. BUG_ON(new_start > new_end);
  590. /*
  591. * ensure there are no vmas between where we want to go
  592. * and where we are
  593. */
  594. if (vma != find_vma(mm, new_start))
  595. return -EFAULT;
  596. /*
  597. * cover the whole range: [new_start, old_end)
  598. */
  599. if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
  600. return -ENOMEM;
  601. /*
  602. * move the page tables downwards, on failure we rely on
  603. * process cleanup to remove whatever mess we made.
  604. */
  605. if (length != move_page_tables(vma, old_start,
  606. vma, new_start, length, false))
  607. return -ENOMEM;
  608. lru_add_drain();
  609. tlb_gather_mmu(&tlb, mm, old_start, old_end);
  610. if (new_end > old_start) {
  611. /*
  612. * when the old and new regions overlap clear from new_end.
  613. */
  614. free_pgd_range(&tlb, new_end, old_end, new_end,
  615. vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
  616. } else {
  617. /*
  618. * otherwise, clean from old_start; this is done to not touch
  619. * the address space in [new_end, old_start) some architectures
  620. * have constraints on va-space that make this illegal (IA64) -
  621. * for the others its just a little faster.
  622. */
  623. free_pgd_range(&tlb, old_start, old_end, new_end,
  624. vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
  625. }
  626. tlb_finish_mmu(&tlb, old_start, old_end);
  627. /*
  628. * Shrink the vma to just the new range. Always succeeds.
  629. */
  630. vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
  631. return 0;
  632. }
  633. /*
  634. * Finalizes the stack vm_area_struct. The flags and permissions are updated,
  635. * the stack is optionally relocated, and some extra space is added.
  636. */
  637. int setup_arg_pages(struct linux_binprm *bprm,
  638. unsigned long stack_top,
  639. int executable_stack)
  640. {
  641. unsigned long ret;
  642. unsigned long stack_shift;
  643. struct mm_struct *mm = current->mm;
  644. struct vm_area_struct *vma = bprm->vma;
  645. struct vm_area_struct *prev = NULL;
  646. unsigned long vm_flags;
  647. unsigned long stack_base;
  648. unsigned long stack_size;
  649. unsigned long stack_expand;
  650. unsigned long rlim_stack;
  651. #ifdef CONFIG_STACK_GROWSUP
  652. /* Limit stack size */
  653. stack_base = bprm->rlim_stack.rlim_max;
  654. if (stack_base > STACK_SIZE_MAX)
  655. stack_base = STACK_SIZE_MAX;
  656. /* Add space for stack randomization. */
  657. stack_base += (STACK_RND_MASK << PAGE_SHIFT);
  658. /* Make sure we didn't let the argument array grow too large. */
  659. if (vma->vm_end - vma->vm_start > stack_base)
  660. return -ENOMEM;
  661. stack_base = PAGE_ALIGN(stack_top - stack_base);
  662. stack_shift = vma->vm_start - stack_base;
  663. mm->arg_start = bprm->p - stack_shift;
  664. bprm->p = vma->vm_end - stack_shift;
  665. #else
  666. stack_top = arch_align_stack(stack_top);
  667. stack_top = PAGE_ALIGN(stack_top);
  668. if (unlikely(stack_top < mmap_min_addr) ||
  669. unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
  670. return -ENOMEM;
  671. stack_shift = vma->vm_end - stack_top;
  672. bprm->p -= stack_shift;
  673. mm->arg_start = bprm->p;
  674. #endif
  675. if (bprm->loader)
  676. bprm->loader -= stack_shift;
  677. bprm->exec -= stack_shift;
  678. if (mmap_write_lock_killable(mm))
  679. return -EINTR;
  680. vm_flags = VM_STACK_FLAGS;
  681. /*
  682. * Adjust stack execute permissions; explicitly enable for
  683. * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
  684. * (arch default) otherwise.
  685. */
  686. if (unlikely(executable_stack == EXSTACK_ENABLE_X))
  687. vm_flags |= VM_EXEC;
  688. else if (executable_stack == EXSTACK_DISABLE_X)
  689. vm_flags &= ~VM_EXEC;
  690. vm_flags |= mm->def_flags;
  691. vm_flags |= VM_STACK_INCOMPLETE_SETUP;
  692. ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
  693. vm_flags);
  694. if (ret)
  695. goto out_unlock;
  696. BUG_ON(prev != vma);
  697. if (unlikely(vm_flags & VM_EXEC)) {
  698. pr_warn_once("process '%pD4' started with executable stack\n",
  699. bprm->file);
  700. }
  701. /* Move stack pages down in memory. */
  702. if (stack_shift) {
  703. ret = shift_arg_pages(vma, stack_shift);
  704. if (ret)
  705. goto out_unlock;
  706. }
  707. /* mprotect_fixup is overkill to remove the temporary stack flags */
  708. vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
  709. stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
  710. stack_size = vma->vm_end - vma->vm_start;
  711. /*
  712. * Align this down to a page boundary as expand_stack
  713. * will align it up.
  714. */
  715. rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
  716. #ifdef CONFIG_STACK_GROWSUP
  717. if (stack_size + stack_expand > rlim_stack)
  718. stack_base = vma->vm_start + rlim_stack;
  719. else
  720. stack_base = vma->vm_end + stack_expand;
  721. #else
  722. if (stack_size + stack_expand > rlim_stack)
  723. stack_base = vma->vm_end - rlim_stack;
  724. else
  725. stack_base = vma->vm_start - stack_expand;
  726. #endif
  727. current->mm->start_stack = bprm->p;
  728. ret = expand_stack(vma, stack_base);
  729. if (ret)
  730. ret = -EFAULT;
  731. out_unlock:
  732. mmap_write_unlock(mm);
  733. return ret;
  734. }
  735. EXPORT_SYMBOL(setup_arg_pages);
  736. #else
  737. /*
  738. * Transfer the program arguments and environment from the holding pages
  739. * onto the stack. The provided stack pointer is adjusted accordingly.
  740. */
  741. int transfer_args_to_stack(struct linux_binprm *bprm,
  742. unsigned long *sp_location)
  743. {
  744. unsigned long index, stop, sp;
  745. int ret = 0;
  746. stop = bprm->p >> PAGE_SHIFT;
  747. sp = *sp_location;
  748. for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
  749. unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
  750. char *src = kmap(bprm->page[index]) + offset;
  751. sp -= PAGE_SIZE - offset;
  752. if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
  753. ret = -EFAULT;
  754. kunmap(bprm->page[index]);
  755. if (ret)
  756. goto out;
  757. }
  758. *sp_location = sp;
  759. out:
  760. return ret;
  761. }
  762. EXPORT_SYMBOL(transfer_args_to_stack);
  763. #endif /* CONFIG_MMU */
  764. static struct file *do_open_execat(int fd, struct filename *name, int flags)
  765. {
  766. struct file *file;
  767. int err;
  768. struct open_flags open_exec_flags = {
  769. .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
  770. .acc_mode = MAY_EXEC,
  771. .intent = LOOKUP_OPEN,
  772. .lookup_flags = LOOKUP_FOLLOW,
  773. };
  774. if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
  775. return ERR_PTR(-EINVAL);
  776. if (flags & AT_SYMLINK_NOFOLLOW)
  777. open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
  778. if (flags & AT_EMPTY_PATH)
  779. open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
  780. file = do_filp_open(fd, name, &open_exec_flags);
  781. if (IS_ERR(file))
  782. goto out;
  783. /*
  784. * may_open() has already checked for this, so it should be
  785. * impossible to trip now. But we need to be extra cautious
  786. * and check again at the very end too.
  787. */
  788. err = -EACCES;
  789. if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
  790. path_noexec(&file->f_path)))
  791. goto exit;
  792. err = deny_write_access(file);
  793. if (err)
  794. goto exit;
  795. if (name->name[0] != '\0')
  796. fsnotify_open(file);
  797. out:
  798. return file;
  799. exit:
  800. fput(file);
  801. return ERR_PTR(err);
  802. }
  803. struct file *open_exec(const char *name)
  804. {
  805. struct filename *filename = getname_kernel(name);
  806. struct file *f = ERR_CAST(filename);
  807. if (!IS_ERR(filename)) {
  808. f = do_open_execat(AT_FDCWD, filename, 0);
  809. putname(filename);
  810. }
  811. return f;
  812. }
  813. EXPORT_SYMBOL(open_exec);
  814. #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
  815. defined(CONFIG_BINFMT_ELF_FDPIC)
  816. ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
  817. {
  818. ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
  819. if (res > 0)
  820. flush_icache_user_range(addr, addr + len);
  821. return res;
  822. }
  823. EXPORT_SYMBOL(read_code);
  824. #endif
  825. /*
  826. * Maps the mm_struct mm into the current task struct.
  827. * On success, this function returns with exec_update_lock
  828. * held for writing.
  829. */
  830. static int exec_mmap(struct mm_struct *mm)
  831. {
  832. struct task_struct *tsk;
  833. struct mm_struct *old_mm, *active_mm;
  834. int ret;
  835. /* Notify parent that we're no longer interested in the old VM */
  836. tsk = current;
  837. old_mm = current->mm;
  838. exec_mm_release(tsk, old_mm);
  839. if (old_mm)
  840. sync_mm_rss(old_mm);
  841. ret = down_write_killable(&tsk->signal->exec_update_lock);
  842. if (ret)
  843. return ret;
  844. if (old_mm) {
  845. /*
  846. * Make sure that if there is a core dump in progress
  847. * for the old mm, we get out and die instead of going
  848. * through with the exec. We must hold mmap_lock around
  849. * checking core_state and changing tsk->mm.
  850. */
  851. mmap_read_lock(old_mm);
  852. if (unlikely(old_mm->core_state)) {
  853. mmap_read_unlock(old_mm);
  854. up_write(&tsk->signal->exec_update_lock);
  855. return -EINTR;
  856. }
  857. }
  858. task_lock(tsk);
  859. membarrier_exec_mmap(mm);
  860. local_irq_disable();
  861. active_mm = tsk->active_mm;
  862. tsk->active_mm = mm;
  863. tsk->mm = mm;
  864. /*
  865. * This prevents preemption while active_mm is being loaded and
  866. * it and mm are being updated, which could cause problems for
  867. * lazy tlb mm refcounting when these are updated by context
  868. * switches. Not all architectures can handle irqs off over
  869. * activate_mm yet.
  870. */
  871. if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
  872. local_irq_enable();
  873. activate_mm(active_mm, mm);
  874. if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
  875. local_irq_enable();
  876. tsk->mm->vmacache_seqnum = 0;
  877. vmacache_flush(tsk);
  878. task_unlock(tsk);
  879. if (old_mm) {
  880. mmap_read_unlock(old_mm);
  881. BUG_ON(active_mm != old_mm);
  882. setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
  883. mm_update_next_owner(old_mm);
  884. mmput(old_mm);
  885. return 0;
  886. }
  887. mmdrop(active_mm);
  888. return 0;
  889. }
  890. static int de_thread(struct task_struct *tsk)
  891. {
  892. struct signal_struct *sig = tsk->signal;
  893. struct sighand_struct *oldsighand = tsk->sighand;
  894. spinlock_t *lock = &oldsighand->siglock;
  895. if (thread_group_empty(tsk))
  896. goto no_thread_group;
  897. /*
  898. * Kill all other threads in the thread group.
  899. */
  900. spin_lock_irq(lock);
  901. if (signal_group_exit(sig)) {
  902. /*
  903. * Another group action in progress, just
  904. * return so that the signal is processed.
  905. */
  906. spin_unlock_irq(lock);
  907. return -EAGAIN;
  908. }
  909. sig->group_exit_task = tsk;
  910. sig->notify_count = zap_other_threads(tsk);
  911. if (!thread_group_leader(tsk))
  912. sig->notify_count--;
  913. while (sig->notify_count) {
  914. __set_current_state(TASK_KILLABLE);
  915. spin_unlock_irq(lock);
  916. schedule();
  917. if (__fatal_signal_pending(tsk))
  918. goto killed;
  919. spin_lock_irq(lock);
  920. }
  921. spin_unlock_irq(lock);
  922. /*
  923. * At this point all other threads have exited, all we have to
  924. * do is to wait for the thread group leader to become inactive,
  925. * and to assume its PID:
  926. */
  927. if (!thread_group_leader(tsk)) {
  928. struct task_struct *leader = tsk->group_leader;
  929. for (;;) {
  930. cgroup_threadgroup_change_begin(tsk);
  931. write_lock_irq(&tasklist_lock);
  932. /*
  933. * Do this under tasklist_lock to ensure that
  934. * exit_notify() can't miss ->group_exit_task
  935. */
  936. sig->notify_count = -1;
  937. if (likely(leader->exit_state))
  938. break;
  939. __set_current_state(TASK_KILLABLE);
  940. write_unlock_irq(&tasklist_lock);
  941. cgroup_threadgroup_change_end(tsk);
  942. schedule();
  943. if (__fatal_signal_pending(tsk))
  944. goto killed;
  945. }
  946. /*
  947. * The only record we have of the real-time age of a
  948. * process, regardless of execs it's done, is start_time.
  949. * All the past CPU time is accumulated in signal_struct
  950. * from sister threads now dead. But in this non-leader
  951. * exec, nothing survives from the original leader thread,
  952. * whose birth marks the true age of this process now.
  953. * When we take on its identity by switching to its PID, we
  954. * also take its birthdate (always earlier than our own).
  955. */
  956. tsk->start_time = leader->start_time;
  957. tsk->start_boottime = leader->start_boottime;
  958. BUG_ON(!same_thread_group(leader, tsk));
  959. /*
  960. * An exec() starts a new thread group with the
  961. * TGID of the previous thread group. Rehash the
  962. * two threads with a switched PID, and release
  963. * the former thread group leader:
  964. */
  965. /* Become a process group leader with the old leader's pid.
  966. * The old leader becomes a thread of the this thread group.
  967. */
  968. exchange_tids(tsk, leader);
  969. transfer_pid(leader, tsk, PIDTYPE_TGID);
  970. transfer_pid(leader, tsk, PIDTYPE_PGID);
  971. transfer_pid(leader, tsk, PIDTYPE_SID);
  972. list_replace_rcu(&leader->tasks, &tsk->tasks);
  973. list_replace_init(&leader->sibling, &tsk->sibling);
  974. tsk->group_leader = tsk;
  975. leader->group_leader = tsk;
  976. tsk->exit_signal = SIGCHLD;
  977. leader->exit_signal = -1;
  978. BUG_ON(leader->exit_state != EXIT_ZOMBIE);
  979. leader->exit_state = EXIT_DEAD;
  980. /*
  981. * We are going to release_task()->ptrace_unlink() silently,
  982. * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
  983. * the tracer wont't block again waiting for this thread.
  984. */
  985. if (unlikely(leader->ptrace))
  986. __wake_up_parent(leader, leader->parent);
  987. write_unlock_irq(&tasklist_lock);
  988. cgroup_threadgroup_change_end(tsk);
  989. release_task(leader);
  990. }
  991. sig->group_exit_task = NULL;
  992. sig->notify_count = 0;
  993. no_thread_group:
  994. /* we have changed execution domain */
  995. tsk->exit_signal = SIGCHLD;
  996. BUG_ON(!thread_group_leader(tsk));
  997. return 0;
  998. killed:
  999. /* protects against exit_notify() and __exit_signal() */
  1000. read_lock(&tasklist_lock);
  1001. sig->group_exit_task = NULL;
  1002. sig->notify_count = 0;
  1003. read_unlock(&tasklist_lock);
  1004. return -EAGAIN;
  1005. }
  1006. /*
  1007. * This function makes sure the current process has its own signal table,
  1008. * so that flush_signal_handlers can later reset the handlers without
  1009. * disturbing other processes. (Other processes might share the signal
  1010. * table via the CLONE_SIGHAND option to clone().)
  1011. */
  1012. static int unshare_sighand(struct task_struct *me)
  1013. {
  1014. struct sighand_struct *oldsighand = me->sighand;
  1015. if (refcount_read(&oldsighand->count) != 1) {
  1016. struct sighand_struct *newsighand;
  1017. /*
  1018. * This ->sighand is shared with the CLONE_SIGHAND
  1019. * but not CLONE_THREAD task, switch to the new one.
  1020. */
  1021. newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
  1022. if (!newsighand)
  1023. return -ENOMEM;
  1024. refcount_set(&newsighand->count, 1);
  1025. memcpy(newsighand->action, oldsighand->action,
  1026. sizeof(newsighand->action));
  1027. write_lock_irq(&tasklist_lock);
  1028. spin_lock(&oldsighand->siglock);
  1029. rcu_assign_pointer(me->sighand, newsighand);
  1030. spin_unlock(&oldsighand->siglock);
  1031. write_unlock_irq(&tasklist_lock);
  1032. __cleanup_sighand(oldsighand);
  1033. }
  1034. return 0;
  1035. }
  1036. char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
  1037. {
  1038. task_lock(tsk);
  1039. strncpy(buf, tsk->comm, buf_size);
  1040. task_unlock(tsk);
  1041. return buf;
  1042. }
  1043. EXPORT_SYMBOL_GPL(__get_task_comm);
  1044. /*
  1045. * These functions flushes out all traces of the currently running executable
  1046. * so that a new one can be started
  1047. */
  1048. void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
  1049. {
  1050. task_lock(tsk);
  1051. trace_task_rename(tsk, buf);
  1052. strlcpy(tsk->comm, buf, sizeof(tsk->comm));
  1053. task_unlock(tsk);
  1054. perf_event_comm(tsk, exec);
  1055. }
  1056. /*
  1057. * Calling this is the point of no return. None of the failures will be
  1058. * seen by userspace since either the process is already taking a fatal
  1059. * signal (via de_thread() or coredump), or will have SEGV raised
  1060. * (after exec_mmap()) by search_binary_handler (see below).
  1061. */
  1062. int begin_new_exec(struct linux_binprm * bprm)
  1063. {
  1064. struct task_struct *me = current;
  1065. int retval;
  1066. /* Once we are committed compute the creds */
  1067. retval = bprm_creds_from_file(bprm);
  1068. if (retval)
  1069. return retval;
  1070. /*
  1071. * Ensure all future errors are fatal.
  1072. */
  1073. bprm->point_of_no_return = true;
  1074. /*
  1075. * Make this the only thread in the thread group.
  1076. */
  1077. retval = de_thread(me);
  1078. if (retval)
  1079. goto out;
  1080. /*
  1081. * Must be called _before_ exec_mmap() as bprm->mm is
  1082. * not visibile until then. This also enables the update
  1083. * to be lockless.
  1084. */
  1085. set_mm_exe_file(bprm->mm, bprm->file);
  1086. /* If the binary is not readable then enforce mm->dumpable=0 */
  1087. would_dump(bprm, bprm->file);
  1088. if (bprm->have_execfd)
  1089. would_dump(bprm, bprm->executable);
  1090. /*
  1091. * Release all of the old mmap stuff
  1092. */
  1093. acct_arg_size(bprm, 0);
  1094. retval = exec_mmap(bprm->mm);
  1095. if (retval)
  1096. goto out;
  1097. bprm->mm = NULL;
  1098. #ifdef CONFIG_POSIX_TIMERS
  1099. exit_itimers(me->signal);
  1100. flush_itimer_signals();
  1101. #endif
  1102. /*
  1103. * Make the signal table private.
  1104. */
  1105. retval = unshare_sighand(me);
  1106. if (retval)
  1107. goto out_unlock;
  1108. /*
  1109. * Ensure that the uaccess routines can actually operate on userspace
  1110. * pointers:
  1111. */
  1112. force_uaccess_begin();
  1113. me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
  1114. PF_NOFREEZE | PF_NO_SETAFFINITY);
  1115. flush_thread();
  1116. me->personality &= ~bprm->per_clear;
  1117. /*
  1118. * We have to apply CLOEXEC before we change whether the process is
  1119. * dumpable (in setup_new_exec) to avoid a race with a process in userspace
  1120. * trying to access the should-be-closed file descriptors of a process
  1121. * undergoing exec(2).
  1122. */
  1123. do_close_on_exec(me->files);
  1124. if (bprm->secureexec) {
  1125. /* Make sure parent cannot signal privileged process. */
  1126. me->pdeath_signal = 0;
  1127. /*
  1128. * For secureexec, reset the stack limit to sane default to
  1129. * avoid bad behavior from the prior rlimits. This has to
  1130. * happen before arch_pick_mmap_layout(), which examines
  1131. * RLIMIT_STACK, but after the point of no return to avoid
  1132. * needing to clean up the change on failure.
  1133. */
  1134. if (bprm->rlim_stack.rlim_cur > _STK_LIM)
  1135. bprm->rlim_stack.rlim_cur = _STK_LIM;
  1136. }
  1137. me->sas_ss_sp = me->sas_ss_size = 0;
  1138. /*
  1139. * Figure out dumpability. Note that this checking only of current
  1140. * is wrong, but userspace depends on it. This should be testing
  1141. * bprm->secureexec instead.
  1142. */
  1143. if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
  1144. !(uid_eq(current_euid(), current_uid()) &&
  1145. gid_eq(current_egid(), current_gid())))
  1146. set_dumpable(current->mm, suid_dumpable);
  1147. else
  1148. set_dumpable(current->mm, SUID_DUMP_USER);
  1149. perf_event_exec();
  1150. __set_task_comm(me, kbasename(bprm->filename), true);
  1151. /* An exec changes our domain. We are no longer part of the thread
  1152. group */
  1153. WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
  1154. flush_signal_handlers(me, 0);
  1155. /*
  1156. * install the new credentials for this executable
  1157. */
  1158. security_bprm_committing_creds(bprm);
  1159. commit_creds(bprm->cred);
  1160. bprm->cred = NULL;
  1161. /*
  1162. * Disable monitoring for regular users
  1163. * when executing setuid binaries. Must
  1164. * wait until new credentials are committed
  1165. * by commit_creds() above
  1166. */
  1167. if (get_dumpable(me->mm) != SUID_DUMP_USER)
  1168. perf_event_exit_task(me);
  1169. /*
  1170. * cred_guard_mutex must be held at least to this point to prevent
  1171. * ptrace_attach() from altering our determination of the task's
  1172. * credentials; any time after this it may be unlocked.
  1173. */
  1174. security_bprm_committed_creds(bprm);
  1175. /* Pass the opened binary to the interpreter. */
  1176. if (bprm->have_execfd) {
  1177. retval = get_unused_fd_flags(0);
  1178. if (retval < 0)
  1179. goto out_unlock;
  1180. fd_install(retval, bprm->executable);
  1181. bprm->executable = NULL;
  1182. bprm->execfd = retval;
  1183. }
  1184. return 0;
  1185. out_unlock:
  1186. up_write(&me->signal->exec_update_lock);
  1187. out:
  1188. return retval;
  1189. }
  1190. EXPORT_SYMBOL(begin_new_exec);
  1191. void would_dump(struct linux_binprm *bprm, struct file *file)
  1192. {
  1193. struct inode *inode = file_inode(file);
  1194. if (inode_permission(inode, MAY_READ) < 0) {
  1195. struct user_namespace *old, *user_ns;
  1196. bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
  1197. /* Ensure mm->user_ns contains the executable */
  1198. user_ns = old = bprm->mm->user_ns;
  1199. while ((user_ns != &init_user_ns) &&
  1200. !privileged_wrt_inode_uidgid(user_ns, inode))
  1201. user_ns = user_ns->parent;
  1202. if (old != user_ns) {
  1203. bprm->mm->user_ns = get_user_ns(user_ns);
  1204. put_user_ns(old);
  1205. }
  1206. }
  1207. }
  1208. EXPORT_SYMBOL(would_dump);
  1209. void setup_new_exec(struct linux_binprm * bprm)
  1210. {
  1211. /* Setup things that can depend upon the personality */
  1212. struct task_struct *me = current;
  1213. arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
  1214. arch_setup_new_exec();
  1215. /* Set the new mm task size. We have to do that late because it may
  1216. * depend on TIF_32BIT which is only updated in flush_thread() on
  1217. * some architectures like powerpc
  1218. */
  1219. me->mm->task_size = TASK_SIZE;
  1220. up_write(&me->signal->exec_update_lock);
  1221. mutex_unlock(&me->signal->cred_guard_mutex);
  1222. }
  1223. EXPORT_SYMBOL(setup_new_exec);
  1224. /* Runs immediately before start_thread() takes over. */
  1225. void finalize_exec(struct linux_binprm *bprm)
  1226. {
  1227. /* Store any stack rlimit changes before starting thread. */
  1228. task_lock(current->group_leader);
  1229. current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
  1230. task_unlock(current->group_leader);
  1231. }
  1232. EXPORT_SYMBOL(finalize_exec);
  1233. /*
  1234. * Prepare credentials and lock ->cred_guard_mutex.
  1235. * setup_new_exec() commits the new creds and drops the lock.
  1236. * Or, if exec fails before, free_bprm() should release ->cred and
  1237. * and unlock.
  1238. */
  1239. static int prepare_bprm_creds(struct linux_binprm *bprm)
  1240. {
  1241. if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
  1242. return -ERESTARTNOINTR;
  1243. bprm->cred = prepare_exec_creds();
  1244. if (likely(bprm->cred))
  1245. return 0;
  1246. mutex_unlock(&current->signal->cred_guard_mutex);
  1247. return -ENOMEM;
  1248. }
  1249. static void free_bprm(struct linux_binprm *bprm)
  1250. {
  1251. if (bprm->mm) {
  1252. acct_arg_size(bprm, 0);
  1253. mmput(bprm->mm);
  1254. }
  1255. free_arg_pages(bprm);
  1256. if (bprm->cred) {
  1257. mutex_unlock(&current->signal->cred_guard_mutex);
  1258. abort_creds(bprm->cred);
  1259. }
  1260. if (bprm->file) {
  1261. allow_write_access(bprm->file);
  1262. fput(bprm->file);
  1263. }
  1264. if (bprm->executable)
  1265. fput(bprm->executable);
  1266. /* If a binfmt changed the interp, free it. */
  1267. if (bprm->interp != bprm->filename)
  1268. kfree(bprm->interp);
  1269. kfree(bprm->fdpath);
  1270. kfree(bprm);
  1271. }
  1272. static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
  1273. {
  1274. struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
  1275. int retval = -ENOMEM;
  1276. if (!bprm)
  1277. goto out;
  1278. if (fd == AT_FDCWD || filename->name[0] == '/') {
  1279. bprm->filename = filename->name;
  1280. } else {
  1281. if (filename->name[0] == '\0')
  1282. bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
  1283. else
  1284. bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
  1285. fd, filename->name);
  1286. if (!bprm->fdpath)
  1287. goto out_free;
  1288. bprm->filename = bprm->fdpath;
  1289. }
  1290. bprm->interp = bprm->filename;
  1291. retval = bprm_mm_init(bprm);
  1292. if (retval)
  1293. goto out_free;
  1294. return bprm;
  1295. out_free:
  1296. free_bprm(bprm);
  1297. out:
  1298. return ERR_PTR(retval);
  1299. }
  1300. int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
  1301. {
  1302. /* If a binfmt changed the interp, free it first. */
  1303. if (bprm->interp != bprm->filename)
  1304. kfree(bprm->interp);
  1305. bprm->interp = kstrdup(interp, GFP_KERNEL);
  1306. if (!bprm->interp)
  1307. return -ENOMEM;
  1308. return 0;
  1309. }
  1310. EXPORT_SYMBOL(bprm_change_interp);
  1311. /*
  1312. * determine how safe it is to execute the proposed program
  1313. * - the caller must hold ->cred_guard_mutex to protect against
  1314. * PTRACE_ATTACH or seccomp thread-sync
  1315. */
  1316. static void check_unsafe_exec(struct linux_binprm *bprm)
  1317. {
  1318. struct task_struct *p = current, *t;
  1319. unsigned n_fs;
  1320. if (p->ptrace)
  1321. bprm->unsafe |= LSM_UNSAFE_PTRACE;
  1322. /*
  1323. * This isn't strictly necessary, but it makes it harder for LSMs to
  1324. * mess up.
  1325. */
  1326. if (task_no_new_privs(current))
  1327. bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
  1328. t = p;
  1329. n_fs = 1;
  1330. spin_lock(&p->fs->lock);
  1331. rcu_read_lock();
  1332. while_each_thread(p, t) {
  1333. if (t->fs == p->fs)
  1334. n_fs++;
  1335. }
  1336. rcu_read_unlock();
  1337. if (p->fs->users > n_fs)
  1338. bprm->unsafe |= LSM_UNSAFE_SHARE;
  1339. else
  1340. p->fs->in_exec = 1;
  1341. spin_unlock(&p->fs->lock);
  1342. }
  1343. static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
  1344. {
  1345. /* Handle suid and sgid on files */
  1346. struct inode *inode;
  1347. unsigned int mode;
  1348. kuid_t uid;
  1349. kgid_t gid;
  1350. if (!mnt_may_suid(file->f_path.mnt))
  1351. return;
  1352. if (task_no_new_privs(current))
  1353. return;
  1354. inode = file->f_path.dentry->d_inode;
  1355. mode = READ_ONCE(inode->i_mode);
  1356. if (!(mode & (S_ISUID|S_ISGID)))
  1357. return;
  1358. /* Be careful if suid/sgid is set */
  1359. inode_lock(inode);
  1360. /* reload atomically mode/uid/gid now that lock held */
  1361. mode = inode->i_mode;
  1362. uid = inode->i_uid;
  1363. gid = inode->i_gid;
  1364. inode_unlock(inode);
  1365. /* We ignore suid/sgid if there are no mappings for them in the ns */
  1366. if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
  1367. !kgid_has_mapping(bprm->cred->user_ns, gid))
  1368. return;
  1369. if (mode & S_ISUID) {
  1370. bprm->per_clear |= PER_CLEAR_ON_SETID;
  1371. bprm->cred->euid = uid;
  1372. }
  1373. if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
  1374. bprm->per_clear |= PER_CLEAR_ON_SETID;
  1375. bprm->cred->egid = gid;
  1376. }
  1377. }
  1378. /*
  1379. * Compute brpm->cred based upon the final binary.
  1380. */
  1381. static int bprm_creds_from_file(struct linux_binprm *bprm)
  1382. {
  1383. /* Compute creds based on which file? */
  1384. struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
  1385. bprm_fill_uid(bprm, file);
  1386. return security_bprm_creds_from_file(bprm, file);
  1387. }
  1388. /*
  1389. * Fill the binprm structure from the inode.
  1390. * Read the first BINPRM_BUF_SIZE bytes
  1391. *
  1392. * This may be called multiple times for binary chains (scripts for example).
  1393. */
  1394. static int prepare_binprm(struct linux_binprm *bprm)
  1395. {
  1396. loff_t pos = 0;
  1397. memset(bprm->buf, 0, BINPRM_BUF_SIZE);
  1398. return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
  1399. }
  1400. /*
  1401. * Arguments are '\0' separated strings found at the location bprm->p
  1402. * points to; chop off the first by relocating brpm->p to right after
  1403. * the first '\0' encountered.
  1404. */
  1405. int remove_arg_zero(struct linux_binprm *bprm)
  1406. {
  1407. int ret = 0;
  1408. unsigned long offset;
  1409. char *kaddr;
  1410. struct page *page;
  1411. if (!bprm->argc)
  1412. return 0;
  1413. do {
  1414. offset = bprm->p & ~PAGE_MASK;
  1415. page = get_arg_page(bprm, bprm->p, 0);
  1416. if (!page) {
  1417. ret = -EFAULT;
  1418. goto out;
  1419. }
  1420. kaddr = kmap_atomic(page);
  1421. for (; offset < PAGE_SIZE && kaddr[offset];
  1422. offset++, bprm->p++)
  1423. ;
  1424. kunmap_atomic(kaddr);
  1425. put_arg_page(page);
  1426. } while (offset == PAGE_SIZE);
  1427. bprm->p++;
  1428. bprm->argc--;
  1429. ret = 0;
  1430. out:
  1431. return ret;
  1432. }
  1433. EXPORT_SYMBOL(remove_arg_zero);
  1434. #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
  1435. /*
  1436. * cycle the list of binary formats handler, until one recognizes the image
  1437. */
  1438. static int search_binary_handler(struct linux_binprm *bprm)
  1439. {
  1440. bool need_retry = IS_ENABLED(CONFIG_MODULES);
  1441. struct linux_binfmt *fmt;
  1442. int retval;
  1443. retval = prepare_binprm(bprm);
  1444. if (retval < 0)
  1445. return retval;
  1446. retval = security_bprm_check(bprm);
  1447. if (retval)
  1448. return retval;
  1449. retval = -ENOENT;
  1450. retry:
  1451. read_lock(&binfmt_lock);
  1452. list_for_each_entry(fmt, &formats, lh) {
  1453. if (!try_module_get(fmt->module))
  1454. continue;
  1455. read_unlock(&binfmt_lock);
  1456. retval = fmt->load_binary(bprm);
  1457. read_lock(&binfmt_lock);
  1458. put_binfmt(fmt);
  1459. if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
  1460. read_unlock(&binfmt_lock);
  1461. return retval;
  1462. }
  1463. }
  1464. read_unlock(&binfmt_lock);
  1465. if (need_retry) {
  1466. if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
  1467. printable(bprm->buf[2]) && printable(bprm->buf[3]))
  1468. return retval;
  1469. if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
  1470. return retval;
  1471. need_retry = false;
  1472. goto retry;
  1473. }
  1474. return retval;
  1475. }
  1476. static int exec_binprm(struct linux_binprm *bprm)
  1477. {
  1478. pid_t old_pid, old_vpid;
  1479. int ret, depth;
  1480. /* Need to fetch pid before load_binary changes it */
  1481. old_pid = current->pid;
  1482. rcu_read_lock();
  1483. old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
  1484. rcu_read_unlock();
  1485. /* This allows 4 levels of binfmt rewrites before failing hard. */
  1486. for (depth = 0;; depth++) {
  1487. struct file *exec;
  1488. if (depth > 5)
  1489. return -ELOOP;
  1490. ret = search_binary_handler(bprm);
  1491. if (ret < 0)
  1492. return ret;
  1493. if (!bprm->interpreter)
  1494. break;
  1495. exec = bprm->file;
  1496. bprm->file = bprm->interpreter;
  1497. bprm->interpreter = NULL;
  1498. allow_write_access(exec);
  1499. if (unlikely(bprm->have_execfd)) {
  1500. if (bprm->executable) {
  1501. fput(exec);
  1502. return -ENOEXEC;
  1503. }
  1504. bprm->executable = exec;
  1505. } else
  1506. fput(exec);
  1507. }
  1508. audit_bprm(bprm);
  1509. trace_sched_process_exec(current, old_pid, bprm);
  1510. ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
  1511. proc_exec_connector(current);
  1512. return 0;
  1513. }
  1514. /*
  1515. * sys_execve() executes a new program.
  1516. */
  1517. static int bprm_execve(struct linux_binprm *bprm,
  1518. int fd, struct filename *filename, int flags)
  1519. {
  1520. struct file *file;
  1521. struct files_struct *displaced;
  1522. int retval;
  1523. /*
  1524. * Cancel any io_uring activity across execve
  1525. */
  1526. io_uring_task_cancel();
  1527. retval = unshare_files(&displaced);
  1528. if (retval)
  1529. return retval;
  1530. retval = prepare_bprm_creds(bprm);
  1531. if (retval)
  1532. goto out_files;
  1533. check_unsafe_exec(bprm);
  1534. current->in_execve = 1;
  1535. file = do_open_execat(fd, filename, flags);
  1536. retval = PTR_ERR(file);
  1537. if (IS_ERR(file))
  1538. goto out_unmark;
  1539. sched_exec();
  1540. bprm->file = file;
  1541. /*
  1542. * Record that a name derived from an O_CLOEXEC fd will be
  1543. * inaccessible after exec. Relies on having exclusive access to
  1544. * current->files (due to unshare_files above).
  1545. */
  1546. if (bprm->fdpath &&
  1547. close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
  1548. bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
  1549. /* Set the unchanging part of bprm->cred */
  1550. retval = security_bprm_creds_for_exec(bprm);
  1551. if (retval)
  1552. goto out;
  1553. retval = exec_binprm(bprm);
  1554. if (retval < 0)
  1555. goto out;
  1556. /* execve succeeded */
  1557. current->fs->in_exec = 0;
  1558. current->in_execve = 0;
  1559. rseq_execve(current);
  1560. acct_update_integrals(current);
  1561. task_numa_free(current, false);
  1562. if (displaced)
  1563. put_files_struct(displaced);
  1564. return retval;
  1565. out:
  1566. /*
  1567. * If past the point of no return ensure the the code never
  1568. * returns to the userspace process. Use an existing fatal
  1569. * signal if present otherwise terminate the process with
  1570. * SIGSEGV.
  1571. */
  1572. if (bprm->point_of_no_return && !fatal_signal_pending(current))
  1573. force_sigsegv(SIGSEGV);
  1574. out_unmark:
  1575. current->fs->in_exec = 0;
  1576. current->in_execve = 0;
  1577. out_files:
  1578. if (displaced)
  1579. reset_files_struct(displaced);
  1580. return retval;
  1581. }
  1582. static int do_execveat_common(int fd, struct filename *filename,
  1583. struct user_arg_ptr argv,
  1584. struct user_arg_ptr envp,
  1585. int flags)
  1586. {
  1587. struct linux_binprm *bprm;
  1588. int retval;
  1589. if (IS_ERR(filename))
  1590. return PTR_ERR(filename);
  1591. /*
  1592. * We move the actual failure in case of RLIMIT_NPROC excess from
  1593. * set*uid() to execve() because too many poorly written programs
  1594. * don't check setuid() return code. Here we additionally recheck
  1595. * whether NPROC limit is still exceeded.
  1596. */
  1597. if ((current->flags & PF_NPROC_EXCEEDED) &&
  1598. atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
  1599. retval = -EAGAIN;
  1600. goto out_ret;
  1601. }
  1602. /* We're below the limit (still or again), so we don't want to make
  1603. * further execve() calls fail. */
  1604. current->flags &= ~PF_NPROC_EXCEEDED;
  1605. bprm = alloc_bprm(fd, filename);
  1606. if (IS_ERR(bprm)) {
  1607. retval = PTR_ERR(bprm);
  1608. goto out_ret;
  1609. }
  1610. retval = count(argv, MAX_ARG_STRINGS);
  1611. if (retval == 0)
  1612. pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
  1613. current->comm, bprm->filename);
  1614. if (retval < 0)
  1615. goto out_free;
  1616. bprm->argc = retval;
  1617. retval = count(envp, MAX_ARG_STRINGS);
  1618. if (retval < 0)
  1619. goto out_free;
  1620. bprm->envc = retval;
  1621. retval = bprm_stack_limits(bprm);
  1622. if (retval < 0)
  1623. goto out_free;
  1624. retval = copy_string_kernel(bprm->filename, bprm);
  1625. if (retval < 0)
  1626. goto out_free;
  1627. bprm->exec = bprm->p;
  1628. retval = copy_strings(bprm->envc, envp, bprm);
  1629. if (retval < 0)
  1630. goto out_free;
  1631. retval = copy_strings(bprm->argc, argv, bprm);
  1632. if (retval < 0)
  1633. goto out_free;
  1634. /*
  1635. * When argv is empty, add an empty string ("") as argv[0] to
  1636. * ensure confused userspace programs that start processing
  1637. * from argv[1] won't end up walking envp. See also
  1638. * bprm_stack_limits().
  1639. */
  1640. if (bprm->argc == 0) {
  1641. retval = copy_string_kernel("", bprm);
  1642. if (retval < 0)
  1643. goto out_free;
  1644. bprm->argc = 1;
  1645. }
  1646. retval = bprm_execve(bprm, fd, filename, flags);
  1647. out_free:
  1648. free_bprm(bprm);
  1649. out_ret:
  1650. putname(filename);
  1651. return retval;
  1652. }
  1653. int kernel_execve(const char *kernel_filename,
  1654. const char *const *argv, const char *const *envp)
  1655. {
  1656. struct filename *filename;
  1657. struct linux_binprm *bprm;
  1658. int fd = AT_FDCWD;
  1659. int retval;
  1660. filename = getname_kernel(kernel_filename);
  1661. if (IS_ERR(filename))
  1662. return PTR_ERR(filename);
  1663. bprm = alloc_bprm(fd, filename);
  1664. if (IS_ERR(bprm)) {
  1665. retval = PTR_ERR(bprm);
  1666. goto out_ret;
  1667. }
  1668. retval = count_strings_kernel(argv);
  1669. if (WARN_ON_ONCE(retval == 0))
  1670. retval = -EINVAL;
  1671. if (retval < 0)
  1672. goto out_free;
  1673. bprm->argc = retval;
  1674. retval = count_strings_kernel(envp);
  1675. if (retval < 0)
  1676. goto out_free;
  1677. bprm->envc = retval;
  1678. retval = bprm_stack_limits(bprm);
  1679. if (retval < 0)
  1680. goto out_free;
  1681. retval = copy_string_kernel(bprm->filename, bprm);
  1682. if (retval < 0)
  1683. goto out_free;
  1684. bprm->exec = bprm->p;
  1685. retval = copy_strings_kernel(bprm->envc, envp, bprm);
  1686. if (retval < 0)
  1687. goto out_free;
  1688. retval = copy_strings_kernel(bprm->argc, argv, bprm);
  1689. if (retval < 0)
  1690. goto out_free;
  1691. retval = bprm_execve(bprm, fd, filename, 0);
  1692. out_free:
  1693. free_bprm(bprm);
  1694. out_ret:
  1695. putname(filename);
  1696. return retval;
  1697. }
  1698. static int do_execve(struct filename *filename,
  1699. const char __user *const __user *__argv,
  1700. const char __user *const __user *__envp)
  1701. {
  1702. struct user_arg_ptr argv = { .ptr.native = __argv };
  1703. struct user_arg_ptr envp = { .ptr.native = __envp };
  1704. return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
  1705. }
  1706. static int do_execveat(int fd, struct filename *filename,
  1707. const char __user *const __user *__argv,
  1708. const char __user *const __user *__envp,
  1709. int flags)
  1710. {
  1711. struct user_arg_ptr argv = { .ptr.native = __argv };
  1712. struct user_arg_ptr envp = { .ptr.native = __envp };
  1713. return do_execveat_common(fd, filename, argv, envp, flags);
  1714. }
  1715. #ifdef CONFIG_COMPAT
  1716. static int compat_do_execve(struct filename *filename,
  1717. const compat_uptr_t __user *__argv,
  1718. const compat_uptr_t __user *__envp)
  1719. {
  1720. struct user_arg_ptr argv = {
  1721. .is_compat = true,
  1722. .ptr.compat = __argv,
  1723. };
  1724. struct user_arg_ptr envp = {
  1725. .is_compat = true,
  1726. .ptr.compat = __envp,
  1727. };
  1728. return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
  1729. }
  1730. static int compat_do_execveat(int fd, struct filename *filename,
  1731. const compat_uptr_t __user *__argv,
  1732. const compat_uptr_t __user *__envp,
  1733. int flags)
  1734. {
  1735. struct user_arg_ptr argv = {
  1736. .is_compat = true,
  1737. .ptr.compat = __argv,
  1738. };
  1739. struct user_arg_ptr envp = {
  1740. .is_compat = true,
  1741. .ptr.compat = __envp,
  1742. };
  1743. return do_execveat_common(fd, filename, argv, envp, flags);
  1744. }
  1745. #endif
  1746. void set_binfmt(struct linux_binfmt *new)
  1747. {
  1748. struct mm_struct *mm = current->mm;
  1749. if (mm->binfmt)
  1750. module_put(mm->binfmt->module);
  1751. mm->binfmt = new;
  1752. if (new)
  1753. __module_get(new->module);
  1754. }
  1755. EXPORT_SYMBOL(set_binfmt);
  1756. /*
  1757. * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
  1758. */
  1759. void set_dumpable(struct mm_struct *mm, int value)
  1760. {
  1761. if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
  1762. return;
  1763. set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
  1764. }
  1765. SYSCALL_DEFINE3(execve,
  1766. const char __user *, filename,
  1767. const char __user *const __user *, argv,
  1768. const char __user *const __user *, envp)
  1769. {
  1770. return do_execve(getname(filename), argv, envp);
  1771. }
  1772. SYSCALL_DEFINE5(execveat,
  1773. int, fd, const char __user *, filename,
  1774. const char __user *const __user *, argv,
  1775. const char __user *const __user *, envp,
  1776. int, flags)
  1777. {
  1778. int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
  1779. return do_execveat(fd,
  1780. getname_flags(filename, lookup_flags, NULL),
  1781. argv, envp, flags);
  1782. }
  1783. #ifdef CONFIG_COMPAT
  1784. COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
  1785. const compat_uptr_t __user *, argv,
  1786. const compat_uptr_t __user *, envp)
  1787. {
  1788. return compat_do_execve(getname(filename), argv, envp);
  1789. }
  1790. COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
  1791. const char __user *, filename,
  1792. const compat_uptr_t __user *, argv,
  1793. const compat_uptr_t __user *, envp,
  1794. int, flags)
  1795. {
  1796. int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
  1797. return compat_do_execveat(fd,
  1798. getname_flags(filename, lookup_flags, NULL),
  1799. argv, envp, flags);
  1800. }
  1801. #endif