module-plts.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  4. */
  5. #include <linux/elf.h>
  6. #include <linux/ftrace.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/sort.h>
  11. static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
  12. enum aarch64_insn_register reg)
  13. {
  14. u32 adrp, add;
  15. adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
  16. add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
  17. AARCH64_INSN_VARIANT_64BIT,
  18. AARCH64_INSN_ADSB_ADD);
  19. return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
  20. }
  21. struct plt_entry get_plt_entry(u64 dst, void *pc)
  22. {
  23. struct plt_entry plt;
  24. static u32 br;
  25. if (!br)
  26. br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
  27. AARCH64_INSN_BRANCH_NOLINK);
  28. plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
  29. plt.br = cpu_to_le32(br);
  30. return plt;
  31. }
  32. bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
  33. {
  34. u64 p, q;
  35. /*
  36. * Check whether both entries refer to the same target:
  37. * do the cheapest checks first.
  38. * If the 'add' or 'br' opcodes are different, then the target
  39. * cannot be the same.
  40. */
  41. if (a->add != b->add || a->br != b->br)
  42. return false;
  43. p = ALIGN_DOWN((u64)a, SZ_4K);
  44. q = ALIGN_DOWN((u64)b, SZ_4K);
  45. /*
  46. * If the 'adrp' opcodes are the same then we just need to check
  47. * that they refer to the same 4k region.
  48. */
  49. if (a->adrp == b->adrp && p == q)
  50. return true;
  51. return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
  52. (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
  53. }
  54. static bool in_init(const struct module *mod, void *loc)
  55. {
  56. return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
  57. }
  58. u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
  59. void *loc, const Elf64_Rela *rela,
  60. Elf64_Sym *sym)
  61. {
  62. struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
  63. &mod->arch.init;
  64. struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
  65. int i = pltsec->plt_num_entries;
  66. int j = i - 1;
  67. u64 val = sym->st_value + rela->r_addend;
  68. if (is_forbidden_offset_for_adrp(&plt[i].adrp))
  69. i++;
  70. plt[i] = get_plt_entry(val, &plt[i]);
  71. /*
  72. * Check if the entry we just created is a duplicate. Given that the
  73. * relocations are sorted, this will be the last entry we allocated.
  74. * (if one exists).
  75. */
  76. if (j >= 0 && plt_entries_equal(plt + i, plt + j))
  77. return (u64)&plt[j];
  78. pltsec->plt_num_entries += i - j;
  79. if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
  80. return 0;
  81. return (u64)&plt[i];
  82. }
  83. #ifdef CONFIG_ARM64_ERRATUM_843419
  84. u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
  85. void *loc, u64 val)
  86. {
  87. struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
  88. &mod->arch.init;
  89. struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
  90. int i = pltsec->plt_num_entries++;
  91. u32 br;
  92. int rd;
  93. if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
  94. return 0;
  95. if (is_forbidden_offset_for_adrp(&plt[i].adrp))
  96. i = pltsec->plt_num_entries++;
  97. /* get the destination register of the ADRP instruction */
  98. rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
  99. le32_to_cpup((__le32 *)loc));
  100. br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
  101. AARCH64_INSN_BRANCH_NOLINK);
  102. plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
  103. plt[i].br = cpu_to_le32(br);
  104. return (u64)&plt[i];
  105. }
  106. #endif
  107. #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
  108. static int cmp_rela(const void *a, const void *b)
  109. {
  110. const Elf64_Rela *x = a, *y = b;
  111. int i;
  112. /* sort by type, symbol index and addend */
  113. i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
  114. if (i == 0)
  115. i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
  116. if (i == 0)
  117. i = cmp_3way(x->r_addend, y->r_addend);
  118. return i;
  119. }
  120. static bool duplicate_rel(const Elf64_Rela *rela, int num)
  121. {
  122. /*
  123. * Entries are sorted by type, symbol index and addend. That means
  124. * that, if a duplicate entry exists, it must be in the preceding
  125. * slot.
  126. */
  127. return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
  128. }
  129. static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
  130. Elf64_Word dstidx, Elf_Shdr *dstsec)
  131. {
  132. unsigned int ret = 0;
  133. Elf64_Sym *s;
  134. int i;
  135. for (i = 0; i < num; i++) {
  136. u64 min_align;
  137. switch (ELF64_R_TYPE(rela[i].r_info)) {
  138. case R_AARCH64_JUMP26:
  139. case R_AARCH64_CALL26:
  140. if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  141. break;
  142. /*
  143. * We only have to consider branch targets that resolve
  144. * to symbols that are defined in a different section.
  145. * This is not simply a heuristic, it is a fundamental
  146. * limitation, since there is no guaranteed way to emit
  147. * PLT entries sufficiently close to the branch if the
  148. * section size exceeds the range of a branch
  149. * instruction. So ignore relocations against defined
  150. * symbols if they live in the same section as the
  151. * relocation target.
  152. */
  153. s = syms + ELF64_R_SYM(rela[i].r_info);
  154. if (s->st_shndx == dstidx)
  155. break;
  156. /*
  157. * Jump relocations with non-zero addends against
  158. * undefined symbols are supported by the ELF spec, but
  159. * do not occur in practice (e.g., 'jump n bytes past
  160. * the entry point of undefined function symbol f').
  161. * So we need to support them, but there is no need to
  162. * take them into consideration when trying to optimize
  163. * this code. So let's only check for duplicates when
  164. * the addend is zero: this allows us to record the PLT
  165. * entry address in the symbol table itself, rather than
  166. * having to search the list for duplicates each time we
  167. * emit one.
  168. */
  169. if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
  170. ret++;
  171. break;
  172. case R_AARCH64_ADR_PREL_PG_HI21_NC:
  173. case R_AARCH64_ADR_PREL_PG_HI21:
  174. if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
  175. !cpus_have_const_cap(ARM64_WORKAROUND_843419))
  176. break;
  177. /*
  178. * Determine the minimal safe alignment for this ADRP
  179. * instruction: the section alignment at which it is
  180. * guaranteed not to appear at a vulnerable offset.
  181. *
  182. * This comes down to finding the least significant zero
  183. * bit in bits [11:3] of the section offset, and
  184. * increasing the section's alignment so that the
  185. * resulting address of this instruction is guaranteed
  186. * to equal the offset in that particular bit (as well
  187. * as all less signficant bits). This ensures that the
  188. * address modulo 4 KB != 0xfff8 or 0xfffc (which would
  189. * have all ones in bits [11:3])
  190. */
  191. min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
  192. /*
  193. * Allocate veneer space for each ADRP that may appear
  194. * at a vulnerable offset nonetheless. At relocation
  195. * time, some of these will remain unused since some
  196. * ADRP instructions can be patched to ADR instructions
  197. * instead.
  198. */
  199. if (min_align > SZ_4K)
  200. ret++;
  201. else
  202. dstsec->sh_addralign = max(dstsec->sh_addralign,
  203. min_align);
  204. break;
  205. }
  206. }
  207. if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
  208. cpus_have_const_cap(ARM64_WORKAROUND_843419))
  209. /*
  210. * Add some slack so we can skip PLT slots that may trigger
  211. * the erratum due to the placement of the ADRP instruction.
  212. */
  213. ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
  214. return ret;
  215. }
  216. static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
  217. Elf64_Word dstidx)
  218. {
  219. Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
  220. if (s->st_shndx == dstidx)
  221. return false;
  222. return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
  223. ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
  224. }
  225. /* Group branch PLT relas at the front end of the array. */
  226. static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
  227. int numrels, Elf64_Word dstidx)
  228. {
  229. int i = 0, j = numrels - 1;
  230. if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
  231. return 0;
  232. while (i < j) {
  233. if (branch_rela_needs_plt(syms, &rela[i], dstidx))
  234. i++;
  235. else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
  236. swap(rela[i], rela[j]);
  237. else
  238. j--;
  239. }
  240. return i;
  241. }
  242. int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
  243. char *secstrings, struct module *mod)
  244. {
  245. bool copy_rela_for_fips140 = false;
  246. unsigned long core_plts = 0;
  247. unsigned long init_plts = 0;
  248. Elf64_Sym *syms = NULL;
  249. Elf_Shdr *pltsec, *tramp = NULL;
  250. int i;
  251. /*
  252. * Find the empty .plt section so we can expand it to store the PLT
  253. * entries. Record the symtab address as well.
  254. */
  255. for (i = 0; i < ehdr->e_shnum; i++) {
  256. if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
  257. mod->arch.core.plt_shndx = i;
  258. else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
  259. mod->arch.init.plt_shndx = i;
  260. else if (!strcmp(secstrings + sechdrs[i].sh_name,
  261. ".text.ftrace_trampoline"))
  262. tramp = sechdrs + i;
  263. else if (sechdrs[i].sh_type == SHT_SYMTAB)
  264. syms = (Elf64_Sym *)sechdrs[i].sh_addr;
  265. }
  266. if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
  267. pr_err("%s: module PLT section(s) missing\n", mod->name);
  268. return -ENOEXEC;
  269. }
  270. if (!syms) {
  271. pr_err("%s: module symtab section missing\n", mod->name);
  272. return -ENOEXEC;
  273. }
  274. if (IS_ENABLED(CONFIG_CRYPTO_FIPS140) &&
  275. !strcmp(mod->name, "fips140"))
  276. copy_rela_for_fips140 = true;
  277. for (i = 0; i < ehdr->e_shnum; i++) {
  278. Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
  279. int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
  280. Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
  281. if (sechdrs[i].sh_type != SHT_RELA)
  282. continue;
  283. #ifdef CONFIG_CRYPTO_FIPS140
  284. if (copy_rela_for_fips140 &&
  285. !strcmp(secstrings + dstsec->sh_name, ".rodata")) {
  286. void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
  287. GFP_KERNEL);
  288. if (!p) {
  289. pr_err("fips140: failed to allocate .rodata RELA buffer\n");
  290. return -ENOMEM;
  291. }
  292. mod->arch.rodata_relocations = p;
  293. mod->arch.num_rodata_relocations = numrels;
  294. }
  295. #endif
  296. /* ignore relocations that operate on non-exec sections */
  297. if (!(dstsec->sh_flags & SHF_EXECINSTR))
  298. continue;
  299. #ifdef CONFIG_CRYPTO_FIPS140
  300. if (copy_rela_for_fips140 &&
  301. !strcmp(secstrings + dstsec->sh_name, ".text")) {
  302. void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
  303. GFP_KERNEL);
  304. if (!p) {
  305. pr_err("fips140: failed to allocate .text RELA buffer\n");
  306. return -ENOMEM;
  307. }
  308. mod->arch.text_relocations = p;
  309. mod->arch.num_text_relocations = numrels;
  310. }
  311. #endif
  312. /*
  313. * sort branch relocations requiring a PLT by type, symbol index
  314. * and addend
  315. */
  316. nents = partition_branch_plt_relas(syms, rels, numrels,
  317. sechdrs[i].sh_info);
  318. if (nents)
  319. sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
  320. if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
  321. core_plts += count_plts(syms, rels, numrels,
  322. sechdrs[i].sh_info, dstsec);
  323. else
  324. init_plts += count_plts(syms, rels, numrels,
  325. sechdrs[i].sh_info, dstsec);
  326. }
  327. pltsec = sechdrs + mod->arch.core.plt_shndx;
  328. pltsec->sh_type = SHT_NOBITS;
  329. pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
  330. pltsec->sh_addralign = L1_CACHE_BYTES;
  331. pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
  332. mod->arch.core.plt_num_entries = 0;
  333. mod->arch.core.plt_max_entries = core_plts;
  334. pltsec = sechdrs + mod->arch.init.plt_shndx;
  335. pltsec->sh_type = SHT_NOBITS;
  336. pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
  337. pltsec->sh_addralign = L1_CACHE_BYTES;
  338. pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
  339. mod->arch.init.plt_num_entries = 0;
  340. mod->arch.init.plt_max_entries = init_plts;
  341. if (tramp) {
  342. tramp->sh_type = SHT_NOBITS;
  343. tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
  344. tramp->sh_addralign = __alignof__(struct plt_entry);
  345. tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
  346. }
  347. return 0;
  348. }