module.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AArch64 loadable module support.
  4. *
  5. * Copyright (C) 2012 ARM Limited
  6. *
  7. * Author: Will Deacon <will.deacon@arm.com>
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/elf.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/gfp.h>
  13. #include <linux/kasan.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/moduleloader.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/alternative.h>
  19. #include <asm/insn.h>
  20. #include <asm/sections.h>
  21. void *module_alloc(unsigned long size)
  22. {
  23. u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
  24. gfp_t gfp_mask = GFP_KERNEL;
  25. void *p;
  26. /* Silence the initial allocation */
  27. if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  28. gfp_mask |= __GFP_NOWARN;
  29. if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
  30. IS_ENABLED(CONFIG_KASAN_SW_TAGS))
  31. /* don't exceed the static module region - see below */
  32. module_alloc_end = MODULES_END;
  33. p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  34. module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
  35. NUMA_NO_NODE, __builtin_return_address(0));
  36. if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  37. (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
  38. (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
  39. !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
  40. /*
  41. * KASAN without KASAN_VMALLOC can only deal with module
  42. * allocations being served from the reserved module region,
  43. * since the remainder of the vmalloc region is already
  44. * backed by zero shadow pages, and punching holes into it
  45. * is non-trivial. Since the module region is not randomized
  46. * when KASAN is enabled without KASAN_VMALLOC, it is even
  47. * less likely that the module region gets exhausted, so we
  48. * can simply omit this fallback in that case.
  49. */
  50. p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  51. module_alloc_base + SZ_2G, GFP_KERNEL,
  52. PAGE_KERNEL, 0, NUMA_NO_NODE,
  53. __builtin_return_address(0));
  54. if (p && (kasan_module_alloc(p, size) < 0)) {
  55. vfree(p);
  56. return NULL;
  57. }
  58. return p;
  59. }
  60. enum aarch64_reloc_op {
  61. RELOC_OP_NONE,
  62. RELOC_OP_ABS,
  63. RELOC_OP_PREL,
  64. RELOC_OP_PAGE,
  65. };
  66. static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  67. {
  68. switch (reloc_op) {
  69. case RELOC_OP_ABS:
  70. return val;
  71. case RELOC_OP_PREL:
  72. return val - (u64)place;
  73. case RELOC_OP_PAGE:
  74. return (val & ~0xfff) - ((u64)place & ~0xfff);
  75. case RELOC_OP_NONE:
  76. return 0;
  77. }
  78. pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  79. return 0;
  80. }
  81. static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  82. {
  83. s64 sval = do_reloc(op, place, val);
  84. /*
  85. * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
  86. * relative and absolute relocations as having a range of [-2^15, 2^16)
  87. * or [-2^31, 2^32), respectively. However, in order to be able to
  88. * detect overflows reliably, we have to choose whether we interpret
  89. * such quantities as signed or as unsigned, and stick with it.
  90. * The way we organize our address space requires a signed
  91. * interpretation of 32-bit relative references, so let's use that
  92. * for all R_AARCH64_PRELxx relocations. This means our upper
  93. * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
  94. */
  95. switch (len) {
  96. case 16:
  97. *(s16 *)place = sval;
  98. switch (op) {
  99. case RELOC_OP_ABS:
  100. if (sval < 0 || sval > U16_MAX)
  101. return -ERANGE;
  102. break;
  103. case RELOC_OP_PREL:
  104. if (sval < S16_MIN || sval > S16_MAX)
  105. return -ERANGE;
  106. break;
  107. default:
  108. pr_err("Invalid 16-bit data relocation (%d)\n", op);
  109. return 0;
  110. }
  111. break;
  112. case 32:
  113. *(s32 *)place = sval;
  114. switch (op) {
  115. case RELOC_OP_ABS:
  116. if (sval < 0 || sval > U32_MAX)
  117. return -ERANGE;
  118. break;
  119. case RELOC_OP_PREL:
  120. if (sval < S32_MIN || sval > S32_MAX)
  121. return -ERANGE;
  122. break;
  123. default:
  124. pr_err("Invalid 32-bit data relocation (%d)\n", op);
  125. return 0;
  126. }
  127. break;
  128. case 64:
  129. *(s64 *)place = sval;
  130. break;
  131. default:
  132. pr_err("Invalid length (%d) for data relocation\n", len);
  133. return 0;
  134. }
  135. return 0;
  136. }
  137. enum aarch64_insn_movw_imm_type {
  138. AARCH64_INSN_IMM_MOVNZ,
  139. AARCH64_INSN_IMM_MOVKZ,
  140. };
  141. static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
  142. int lsb, enum aarch64_insn_movw_imm_type imm_type)
  143. {
  144. u64 imm;
  145. s64 sval;
  146. u32 insn = le32_to_cpu(*place);
  147. sval = do_reloc(op, place, val);
  148. imm = sval >> lsb;
  149. if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
  150. /*
  151. * For signed MOVW relocations, we have to manipulate the
  152. * instruction encoding depending on whether or not the
  153. * immediate is less than zero.
  154. */
  155. insn &= ~(3 << 29);
  156. if (sval >= 0) {
  157. /* >=0: Set the instruction to MOVZ (opcode 10b). */
  158. insn |= 2 << 29;
  159. } else {
  160. /*
  161. * <0: Set the instruction to MOVN (opcode 00b).
  162. * Since we've masked the opcode already, we
  163. * don't need to do anything other than
  164. * inverting the new immediate field.
  165. */
  166. imm = ~imm;
  167. }
  168. }
  169. /* Update the instruction with the new encoding. */
  170. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  171. *place = cpu_to_le32(insn);
  172. if (imm > U16_MAX)
  173. return -ERANGE;
  174. return 0;
  175. }
  176. static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
  177. int lsb, int len, enum aarch64_insn_imm_type imm_type)
  178. {
  179. u64 imm, imm_mask;
  180. s64 sval;
  181. u32 insn = le32_to_cpu(*place);
  182. /* Calculate the relocation value. */
  183. sval = do_reloc(op, place, val);
  184. sval >>= lsb;
  185. /* Extract the value bits and shift them to bit 0. */
  186. imm_mask = (BIT(lsb + len) - 1) >> lsb;
  187. imm = sval & imm_mask;
  188. /* Update the instruction's immediate field. */
  189. insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
  190. *place = cpu_to_le32(insn);
  191. /*
  192. * Extract the upper value bits (including the sign bit) and
  193. * shift them to bit 0.
  194. */
  195. sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
  196. /*
  197. * Overflow has occurred if the upper bits are not all equal to
  198. * the sign bit of the value.
  199. */
  200. if ((u64)(sval + 1) >= 2)
  201. return -ERANGE;
  202. return 0;
  203. }
  204. static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
  205. __le32 *place, u64 val)
  206. {
  207. u32 insn;
  208. if (!is_forbidden_offset_for_adrp(place))
  209. return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
  210. AARCH64_INSN_IMM_ADR);
  211. /* patch ADRP to ADR if it is in range */
  212. if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
  213. AARCH64_INSN_IMM_ADR)) {
  214. insn = le32_to_cpu(*place);
  215. insn &= ~BIT(31);
  216. } else {
  217. /* out of range for ADR -> emit a veneer */
  218. val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
  219. if (!val)
  220. return -ENOEXEC;
  221. insn = aarch64_insn_gen_branch_imm((u64)place, val,
  222. AARCH64_INSN_BRANCH_NOLINK);
  223. }
  224. *place = cpu_to_le32(insn);
  225. return 0;
  226. }
  227. int apply_relocate_add(Elf64_Shdr *sechdrs,
  228. const char *strtab,
  229. unsigned int symindex,
  230. unsigned int relsec,
  231. struct module *me)
  232. {
  233. unsigned int i;
  234. int ovf;
  235. bool overflow_check;
  236. Elf64_Sym *sym;
  237. void *loc;
  238. u64 val;
  239. Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
  240. for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
  241. /* loc corresponds to P in the AArch64 ELF document. */
  242. loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
  243. + rel[i].r_offset;
  244. /* sym is the ELF symbol we're referring to. */
  245. sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
  246. + ELF64_R_SYM(rel[i].r_info);
  247. /* val corresponds to (S + A) in the AArch64 ELF document. */
  248. val = sym->st_value + rel[i].r_addend;
  249. /* Check for overflow by default. */
  250. overflow_check = true;
  251. /* Perform the static relocation. */
  252. switch (ELF64_R_TYPE(rel[i].r_info)) {
  253. /* Null relocations. */
  254. case R_ARM_NONE:
  255. case R_AARCH64_NONE:
  256. ovf = 0;
  257. break;
  258. /* Data relocations. */
  259. case R_AARCH64_ABS64:
  260. overflow_check = false;
  261. ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
  262. break;
  263. case R_AARCH64_ABS32:
  264. ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
  265. break;
  266. case R_AARCH64_ABS16:
  267. ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
  268. break;
  269. case R_AARCH64_PREL64:
  270. overflow_check = false;
  271. ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
  272. break;
  273. case R_AARCH64_PREL32:
  274. ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
  275. break;
  276. case R_AARCH64_PREL16:
  277. ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
  278. break;
  279. /* MOVW instruction relocations. */
  280. case R_AARCH64_MOVW_UABS_G0_NC:
  281. overflow_check = false;
  282. fallthrough;
  283. case R_AARCH64_MOVW_UABS_G0:
  284. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  285. AARCH64_INSN_IMM_MOVKZ);
  286. break;
  287. case R_AARCH64_MOVW_UABS_G1_NC:
  288. overflow_check = false;
  289. fallthrough;
  290. case R_AARCH64_MOVW_UABS_G1:
  291. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  292. AARCH64_INSN_IMM_MOVKZ);
  293. break;
  294. case R_AARCH64_MOVW_UABS_G2_NC:
  295. overflow_check = false;
  296. fallthrough;
  297. case R_AARCH64_MOVW_UABS_G2:
  298. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  299. AARCH64_INSN_IMM_MOVKZ);
  300. break;
  301. case R_AARCH64_MOVW_UABS_G3:
  302. /* We're using the top bits so we can't overflow. */
  303. overflow_check = false;
  304. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
  305. AARCH64_INSN_IMM_MOVKZ);
  306. break;
  307. case R_AARCH64_MOVW_SABS_G0:
  308. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
  309. AARCH64_INSN_IMM_MOVNZ);
  310. break;
  311. case R_AARCH64_MOVW_SABS_G1:
  312. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
  313. AARCH64_INSN_IMM_MOVNZ);
  314. break;
  315. case R_AARCH64_MOVW_SABS_G2:
  316. ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
  317. AARCH64_INSN_IMM_MOVNZ);
  318. break;
  319. case R_AARCH64_MOVW_PREL_G0_NC:
  320. overflow_check = false;
  321. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  322. AARCH64_INSN_IMM_MOVKZ);
  323. break;
  324. case R_AARCH64_MOVW_PREL_G0:
  325. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
  326. AARCH64_INSN_IMM_MOVNZ);
  327. break;
  328. case R_AARCH64_MOVW_PREL_G1_NC:
  329. overflow_check = false;
  330. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  331. AARCH64_INSN_IMM_MOVKZ);
  332. break;
  333. case R_AARCH64_MOVW_PREL_G1:
  334. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
  335. AARCH64_INSN_IMM_MOVNZ);
  336. break;
  337. case R_AARCH64_MOVW_PREL_G2_NC:
  338. overflow_check = false;
  339. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  340. AARCH64_INSN_IMM_MOVKZ);
  341. break;
  342. case R_AARCH64_MOVW_PREL_G2:
  343. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
  344. AARCH64_INSN_IMM_MOVNZ);
  345. break;
  346. case R_AARCH64_MOVW_PREL_G3:
  347. /* We're using the top bits so we can't overflow. */
  348. overflow_check = false;
  349. ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
  350. AARCH64_INSN_IMM_MOVNZ);
  351. break;
  352. /* Immediate instruction relocations. */
  353. case R_AARCH64_LD_PREL_LO19:
  354. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  355. AARCH64_INSN_IMM_19);
  356. break;
  357. case R_AARCH64_ADR_PREL_LO21:
  358. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
  359. AARCH64_INSN_IMM_ADR);
  360. break;
  361. case R_AARCH64_ADR_PREL_PG_HI21_NC:
  362. overflow_check = false;
  363. fallthrough;
  364. case R_AARCH64_ADR_PREL_PG_HI21:
  365. ovf = reloc_insn_adrp(me, sechdrs, loc, val);
  366. if (ovf && ovf != -ERANGE)
  367. return ovf;
  368. break;
  369. case R_AARCH64_ADD_ABS_LO12_NC:
  370. case R_AARCH64_LDST8_ABS_LO12_NC:
  371. overflow_check = false;
  372. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
  373. AARCH64_INSN_IMM_12);
  374. break;
  375. case R_AARCH64_LDST16_ABS_LO12_NC:
  376. overflow_check = false;
  377. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
  378. AARCH64_INSN_IMM_12);
  379. break;
  380. case R_AARCH64_LDST32_ABS_LO12_NC:
  381. overflow_check = false;
  382. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
  383. AARCH64_INSN_IMM_12);
  384. break;
  385. case R_AARCH64_LDST64_ABS_LO12_NC:
  386. overflow_check = false;
  387. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
  388. AARCH64_INSN_IMM_12);
  389. break;
  390. case R_AARCH64_LDST128_ABS_LO12_NC:
  391. overflow_check = false;
  392. ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
  393. AARCH64_INSN_IMM_12);
  394. break;
  395. case R_AARCH64_TSTBR14:
  396. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
  397. AARCH64_INSN_IMM_14);
  398. break;
  399. case R_AARCH64_CONDBR19:
  400. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
  401. AARCH64_INSN_IMM_19);
  402. break;
  403. case R_AARCH64_JUMP26:
  404. case R_AARCH64_CALL26:
  405. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
  406. AARCH64_INSN_IMM_26);
  407. if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  408. ovf == -ERANGE) {
  409. val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
  410. if (!val)
  411. return -ENOEXEC;
  412. ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
  413. 26, AARCH64_INSN_IMM_26);
  414. }
  415. break;
  416. default:
  417. pr_err("module %s: unsupported RELA relocation: %llu\n",
  418. me->name, ELF64_R_TYPE(rel[i].r_info));
  419. return -ENOEXEC;
  420. }
  421. if (overflow_check && ovf == -ERANGE)
  422. goto overflow;
  423. }
  424. return 0;
  425. overflow:
  426. pr_err("module %s: overflow in relocation type %d val %Lx\n",
  427. me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
  428. return -ENOEXEC;
  429. }
  430. static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
  431. const Elf_Shdr *sechdrs,
  432. const char *name)
  433. {
  434. const Elf_Shdr *s, *se;
  435. const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  436. for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
  437. if (strcmp(name, secstrs + s->sh_name) == 0)
  438. return s;
  439. }
  440. return NULL;
  441. }
  442. static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
  443. {
  444. *plt = get_plt_entry(addr, plt);
  445. }
  446. static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
  447. const Elf_Shdr *sechdrs,
  448. struct module *mod)
  449. {
  450. #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
  451. const Elf_Shdr *s;
  452. struct plt_entry *plts;
  453. s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
  454. if (!s)
  455. return -ENOEXEC;
  456. plts = (void *)s->sh_addr;
  457. __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
  458. if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  459. __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
  460. mod->arch.ftrace_trampolines = plts;
  461. #endif
  462. return 0;
  463. }
  464. int module_finalize(const Elf_Ehdr *hdr,
  465. const Elf_Shdr *sechdrs,
  466. struct module *me)
  467. {
  468. const Elf_Shdr *s;
  469. s = find_section(hdr, sechdrs, ".altinstructions");
  470. if (s)
  471. apply_alternatives_module((void *)s->sh_addr, s->sh_size);
  472. return module_init_ftrace_plt(hdr, sechdrs, me);
  473. }