insn.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2013 Huawei Ltd.
  4. * Author: Jiang Liu <liuj97@gmail.com>
  5. *
  6. * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/bug.h>
  10. #include <linux/compiler.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/stop_machine.h>
  16. #include <linux/types.h>
  17. #include <linux/uaccess.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/debug-monitors.h>
  20. #include <asm/fixmap.h>
  21. #include <asm/insn.h>
  22. #include <asm/kprobes.h>
  23. #include <asm/sections.h>
  24. #define AARCH64_INSN_SF_BIT BIT(31)
  25. #define AARCH64_INSN_N_BIT BIT(22)
  26. #define AARCH64_INSN_LSL_12 BIT(22)
  27. static const int aarch64_insn_encoding_class[] = {
  28. AARCH64_INSN_CLS_UNKNOWN,
  29. AARCH64_INSN_CLS_UNKNOWN,
  30. AARCH64_INSN_CLS_UNKNOWN,
  31. AARCH64_INSN_CLS_UNKNOWN,
  32. AARCH64_INSN_CLS_LDST,
  33. AARCH64_INSN_CLS_DP_REG,
  34. AARCH64_INSN_CLS_LDST,
  35. AARCH64_INSN_CLS_DP_FPSIMD,
  36. AARCH64_INSN_CLS_DP_IMM,
  37. AARCH64_INSN_CLS_DP_IMM,
  38. AARCH64_INSN_CLS_BR_SYS,
  39. AARCH64_INSN_CLS_BR_SYS,
  40. AARCH64_INSN_CLS_LDST,
  41. AARCH64_INSN_CLS_DP_REG,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_FPSIMD,
  44. };
  45. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  46. {
  47. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  48. }
  49. bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
  50. {
  51. if (!aarch64_insn_is_hint(insn))
  52. return false;
  53. switch (insn & 0xFE0) {
  54. case AARCH64_INSN_HINT_XPACLRI:
  55. case AARCH64_INSN_HINT_PACIA_1716:
  56. case AARCH64_INSN_HINT_PACIB_1716:
  57. case AARCH64_INSN_HINT_PACIAZ:
  58. case AARCH64_INSN_HINT_PACIASP:
  59. case AARCH64_INSN_HINT_PACIBZ:
  60. case AARCH64_INSN_HINT_PACIBSP:
  61. case AARCH64_INSN_HINT_BTI:
  62. case AARCH64_INSN_HINT_BTIC:
  63. case AARCH64_INSN_HINT_BTIJ:
  64. case AARCH64_INSN_HINT_BTIJC:
  65. case AARCH64_INSN_HINT_NOP:
  66. return true;
  67. default:
  68. return false;
  69. }
  70. }
  71. bool aarch64_insn_is_branch_imm(u32 insn)
  72. {
  73. return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  74. aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  75. aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  76. aarch64_insn_is_bcond(insn));
  77. }
  78. static DEFINE_RAW_SPINLOCK(patch_lock);
  79. static bool is_exit_text(unsigned long addr)
  80. {
  81. /* discarded with init text/data */
  82. return system_state < SYSTEM_RUNNING &&
  83. addr >= (unsigned long)__exittext_begin &&
  84. addr < (unsigned long)__exittext_end;
  85. }
  86. static bool is_image_text(unsigned long addr)
  87. {
  88. return core_kernel_text(addr) || is_exit_text(addr);
  89. }
  90. static void __kprobes *patch_map(void *addr, int fixmap)
  91. {
  92. unsigned long uintaddr = (uintptr_t) addr;
  93. bool image = is_image_text(uintaddr);
  94. struct page *page;
  95. if (image)
  96. page = phys_to_page(__pa_symbol(addr));
  97. else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  98. page = vmalloc_to_page(addr);
  99. else
  100. return addr;
  101. BUG_ON(!page);
  102. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  103. (uintaddr & ~PAGE_MASK));
  104. }
  105. static void __kprobes patch_unmap(int fixmap)
  106. {
  107. clear_fixmap(fixmap);
  108. }
  109. /*
  110. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  111. * little-endian.
  112. */
  113. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  114. {
  115. int ret;
  116. __le32 val;
  117. ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
  118. if (!ret)
  119. *insnp = le32_to_cpu(val);
  120. return ret;
  121. }
  122. static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
  123. {
  124. void *waddr = addr;
  125. unsigned long flags = 0;
  126. int ret;
  127. raw_spin_lock_irqsave(&patch_lock, flags);
  128. waddr = patch_map(addr, FIX_TEXT_POKE0);
  129. ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
  130. patch_unmap(FIX_TEXT_POKE0);
  131. raw_spin_unlock_irqrestore(&patch_lock, flags);
  132. return ret;
  133. }
  134. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  135. {
  136. return __aarch64_insn_write(addr, cpu_to_le32(insn));
  137. }
  138. bool __kprobes aarch64_insn_uses_literal(u32 insn)
  139. {
  140. /* ldr/ldrsw (literal), prfm */
  141. return aarch64_insn_is_ldr_lit(insn) ||
  142. aarch64_insn_is_ldrsw_lit(insn) ||
  143. aarch64_insn_is_adr_adrp(insn) ||
  144. aarch64_insn_is_prfm_lit(insn);
  145. }
  146. bool __kprobes aarch64_insn_is_branch(u32 insn)
  147. {
  148. /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
  149. return aarch64_insn_is_b(insn) ||
  150. aarch64_insn_is_bl(insn) ||
  151. aarch64_insn_is_cbz(insn) ||
  152. aarch64_insn_is_cbnz(insn) ||
  153. aarch64_insn_is_tbz(insn) ||
  154. aarch64_insn_is_tbnz(insn) ||
  155. aarch64_insn_is_ret(insn) ||
  156. aarch64_insn_is_ret_auth(insn) ||
  157. aarch64_insn_is_br(insn) ||
  158. aarch64_insn_is_br_auth(insn) ||
  159. aarch64_insn_is_blr(insn) ||
  160. aarch64_insn_is_blr_auth(insn) ||
  161. aarch64_insn_is_bcond(insn);
  162. }
  163. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  164. {
  165. u32 *tp = addr;
  166. int ret;
  167. /* A64 instructions must be word aligned */
  168. if ((uintptr_t)tp & 0x3)
  169. return -EINVAL;
  170. ret = aarch64_insn_write(tp, insn);
  171. if (ret == 0)
  172. __flush_icache_range((uintptr_t)tp,
  173. (uintptr_t)tp + AARCH64_INSN_SIZE);
  174. return ret;
  175. }
  176. struct aarch64_insn_patch {
  177. void **text_addrs;
  178. u32 *new_insns;
  179. int insn_cnt;
  180. atomic_t cpu_count;
  181. };
  182. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  183. {
  184. int i, ret = 0;
  185. struct aarch64_insn_patch *pp = arg;
  186. /* The last CPU becomes master */
  187. if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
  188. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  189. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  190. pp->new_insns[i]);
  191. /* Notify other processors with an additional increment. */
  192. atomic_inc(&pp->cpu_count);
  193. } else {
  194. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  195. cpu_relax();
  196. isb();
  197. }
  198. return ret;
  199. }
  200. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  201. {
  202. struct aarch64_insn_patch patch = {
  203. .text_addrs = addrs,
  204. .new_insns = insns,
  205. .insn_cnt = cnt,
  206. .cpu_count = ATOMIC_INIT(0),
  207. };
  208. if (cnt <= 0)
  209. return -EINVAL;
  210. return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
  211. cpu_online_mask);
  212. }
  213. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  214. u32 *maskp, int *shiftp)
  215. {
  216. u32 mask;
  217. int shift;
  218. switch (type) {
  219. case AARCH64_INSN_IMM_26:
  220. mask = BIT(26) - 1;
  221. shift = 0;
  222. break;
  223. case AARCH64_INSN_IMM_19:
  224. mask = BIT(19) - 1;
  225. shift = 5;
  226. break;
  227. case AARCH64_INSN_IMM_16:
  228. mask = BIT(16) - 1;
  229. shift = 5;
  230. break;
  231. case AARCH64_INSN_IMM_14:
  232. mask = BIT(14) - 1;
  233. shift = 5;
  234. break;
  235. case AARCH64_INSN_IMM_12:
  236. mask = BIT(12) - 1;
  237. shift = 10;
  238. break;
  239. case AARCH64_INSN_IMM_9:
  240. mask = BIT(9) - 1;
  241. shift = 12;
  242. break;
  243. case AARCH64_INSN_IMM_7:
  244. mask = BIT(7) - 1;
  245. shift = 15;
  246. break;
  247. case AARCH64_INSN_IMM_6:
  248. case AARCH64_INSN_IMM_S:
  249. mask = BIT(6) - 1;
  250. shift = 10;
  251. break;
  252. case AARCH64_INSN_IMM_R:
  253. mask = BIT(6) - 1;
  254. shift = 16;
  255. break;
  256. case AARCH64_INSN_IMM_N:
  257. mask = 1;
  258. shift = 22;
  259. break;
  260. default:
  261. return -EINVAL;
  262. }
  263. *maskp = mask;
  264. *shiftp = shift;
  265. return 0;
  266. }
  267. #define ADR_IMM_HILOSPLIT 2
  268. #define ADR_IMM_SIZE SZ_2M
  269. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  270. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  271. #define ADR_IMM_LOSHIFT 29
  272. #define ADR_IMM_HISHIFT 5
  273. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  274. {
  275. u32 immlo, immhi, mask;
  276. int shift;
  277. switch (type) {
  278. case AARCH64_INSN_IMM_ADR:
  279. shift = 0;
  280. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  281. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  282. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  283. mask = ADR_IMM_SIZE - 1;
  284. break;
  285. default:
  286. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  287. pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
  288. type);
  289. return 0;
  290. }
  291. }
  292. return (insn >> shift) & mask;
  293. }
  294. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  295. u32 insn, u64 imm)
  296. {
  297. u32 immlo, immhi, mask;
  298. int shift;
  299. if (insn == AARCH64_BREAK_FAULT)
  300. return AARCH64_BREAK_FAULT;
  301. switch (type) {
  302. case AARCH64_INSN_IMM_ADR:
  303. shift = 0;
  304. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  305. imm >>= ADR_IMM_HILOSPLIT;
  306. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  307. imm = immlo | immhi;
  308. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  309. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  310. break;
  311. default:
  312. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  313. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  314. type);
  315. return AARCH64_BREAK_FAULT;
  316. }
  317. }
  318. /* Update the immediate field. */
  319. insn &= ~(mask << shift);
  320. insn |= (imm & mask) << shift;
  321. return insn;
  322. }
  323. u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
  324. u32 insn)
  325. {
  326. int shift;
  327. switch (type) {
  328. case AARCH64_INSN_REGTYPE_RT:
  329. case AARCH64_INSN_REGTYPE_RD:
  330. shift = 0;
  331. break;
  332. case AARCH64_INSN_REGTYPE_RN:
  333. shift = 5;
  334. break;
  335. case AARCH64_INSN_REGTYPE_RT2:
  336. case AARCH64_INSN_REGTYPE_RA:
  337. shift = 10;
  338. break;
  339. case AARCH64_INSN_REGTYPE_RM:
  340. shift = 16;
  341. break;
  342. default:
  343. pr_err("%s: unknown register type encoding %d\n", __func__,
  344. type);
  345. return 0;
  346. }
  347. return (insn >> shift) & GENMASK(4, 0);
  348. }
  349. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  350. u32 insn,
  351. enum aarch64_insn_register reg)
  352. {
  353. int shift;
  354. if (insn == AARCH64_BREAK_FAULT)
  355. return AARCH64_BREAK_FAULT;
  356. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  357. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  358. return AARCH64_BREAK_FAULT;
  359. }
  360. switch (type) {
  361. case AARCH64_INSN_REGTYPE_RT:
  362. case AARCH64_INSN_REGTYPE_RD:
  363. shift = 0;
  364. break;
  365. case AARCH64_INSN_REGTYPE_RN:
  366. shift = 5;
  367. break;
  368. case AARCH64_INSN_REGTYPE_RT2:
  369. case AARCH64_INSN_REGTYPE_RA:
  370. shift = 10;
  371. break;
  372. case AARCH64_INSN_REGTYPE_RM:
  373. case AARCH64_INSN_REGTYPE_RS:
  374. shift = 16;
  375. break;
  376. default:
  377. pr_err("%s: unknown register type encoding %d\n", __func__,
  378. type);
  379. return AARCH64_BREAK_FAULT;
  380. }
  381. insn &= ~(GENMASK(4, 0) << shift);
  382. insn |= reg << shift;
  383. return insn;
  384. }
  385. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  386. u32 insn)
  387. {
  388. u32 size;
  389. switch (type) {
  390. case AARCH64_INSN_SIZE_8:
  391. size = 0;
  392. break;
  393. case AARCH64_INSN_SIZE_16:
  394. size = 1;
  395. break;
  396. case AARCH64_INSN_SIZE_32:
  397. size = 2;
  398. break;
  399. case AARCH64_INSN_SIZE_64:
  400. size = 3;
  401. break;
  402. default:
  403. pr_err("%s: unknown size encoding %d\n", __func__, type);
  404. return AARCH64_BREAK_FAULT;
  405. }
  406. insn &= ~GENMASK(31, 30);
  407. insn |= size << 30;
  408. return insn;
  409. }
  410. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  411. long range)
  412. {
  413. long offset;
  414. if ((pc & 0x3) || (addr & 0x3)) {
  415. pr_err("%s: A64 instructions must be word aligned\n", __func__);
  416. return range;
  417. }
  418. offset = ((long)addr - (long)pc);
  419. if (offset < -range || offset >= range) {
  420. pr_err("%s: offset out of range\n", __func__);
  421. return range;
  422. }
  423. return offset;
  424. }
  425. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  426. enum aarch64_insn_branch_type type)
  427. {
  428. u32 insn;
  429. long offset;
  430. /*
  431. * B/BL support [-128M, 128M) offset
  432. * ARM64 virtual address arrangement guarantees all kernel and module
  433. * texts are within +/-128M.
  434. */
  435. offset = branch_imm_common(pc, addr, SZ_128M);
  436. if (offset >= SZ_128M)
  437. return AARCH64_BREAK_FAULT;
  438. switch (type) {
  439. case AARCH64_INSN_BRANCH_LINK:
  440. insn = aarch64_insn_get_bl_value();
  441. break;
  442. case AARCH64_INSN_BRANCH_NOLINK:
  443. insn = aarch64_insn_get_b_value();
  444. break;
  445. default:
  446. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  447. return AARCH64_BREAK_FAULT;
  448. }
  449. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  450. offset >> 2);
  451. }
  452. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  453. enum aarch64_insn_register reg,
  454. enum aarch64_insn_variant variant,
  455. enum aarch64_insn_branch_type type)
  456. {
  457. u32 insn;
  458. long offset;
  459. offset = branch_imm_common(pc, addr, SZ_1M);
  460. if (offset >= SZ_1M)
  461. return AARCH64_BREAK_FAULT;
  462. switch (type) {
  463. case AARCH64_INSN_BRANCH_COMP_ZERO:
  464. insn = aarch64_insn_get_cbz_value();
  465. break;
  466. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  467. insn = aarch64_insn_get_cbnz_value();
  468. break;
  469. default:
  470. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  471. return AARCH64_BREAK_FAULT;
  472. }
  473. switch (variant) {
  474. case AARCH64_INSN_VARIANT_32BIT:
  475. break;
  476. case AARCH64_INSN_VARIANT_64BIT:
  477. insn |= AARCH64_INSN_SF_BIT;
  478. break;
  479. default:
  480. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  481. return AARCH64_BREAK_FAULT;
  482. }
  483. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  484. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  485. offset >> 2);
  486. }
  487. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  488. enum aarch64_insn_condition cond)
  489. {
  490. u32 insn;
  491. long offset;
  492. offset = branch_imm_common(pc, addr, SZ_1M);
  493. insn = aarch64_insn_get_bcond_value();
  494. if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
  495. pr_err("%s: unknown condition encoding %d\n", __func__, cond);
  496. return AARCH64_BREAK_FAULT;
  497. }
  498. insn |= cond;
  499. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  500. offset >> 2);
  501. }
  502. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
  503. {
  504. return aarch64_insn_get_hint_value() | op;
  505. }
  506. u32 __kprobes aarch64_insn_gen_nop(void)
  507. {
  508. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  509. }
  510. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  511. enum aarch64_insn_branch_type type)
  512. {
  513. u32 insn;
  514. switch (type) {
  515. case AARCH64_INSN_BRANCH_NOLINK:
  516. insn = aarch64_insn_get_br_value();
  517. break;
  518. case AARCH64_INSN_BRANCH_LINK:
  519. insn = aarch64_insn_get_blr_value();
  520. break;
  521. case AARCH64_INSN_BRANCH_RETURN:
  522. insn = aarch64_insn_get_ret_value();
  523. break;
  524. default:
  525. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  526. return AARCH64_BREAK_FAULT;
  527. }
  528. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  529. }
  530. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  531. enum aarch64_insn_register base,
  532. enum aarch64_insn_register offset,
  533. enum aarch64_insn_size_type size,
  534. enum aarch64_insn_ldst_type type)
  535. {
  536. u32 insn;
  537. switch (type) {
  538. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  539. insn = aarch64_insn_get_ldr_reg_value();
  540. break;
  541. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  542. insn = aarch64_insn_get_str_reg_value();
  543. break;
  544. default:
  545. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  546. return AARCH64_BREAK_FAULT;
  547. }
  548. insn = aarch64_insn_encode_ldst_size(size, insn);
  549. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  550. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  551. base);
  552. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  553. offset);
  554. }
  555. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  556. enum aarch64_insn_register reg2,
  557. enum aarch64_insn_register base,
  558. int offset,
  559. enum aarch64_insn_variant variant,
  560. enum aarch64_insn_ldst_type type)
  561. {
  562. u32 insn;
  563. int shift;
  564. switch (type) {
  565. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  566. insn = aarch64_insn_get_ldp_pre_value();
  567. break;
  568. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  569. insn = aarch64_insn_get_stp_pre_value();
  570. break;
  571. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  572. insn = aarch64_insn_get_ldp_post_value();
  573. break;
  574. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  575. insn = aarch64_insn_get_stp_post_value();
  576. break;
  577. default:
  578. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  579. return AARCH64_BREAK_FAULT;
  580. }
  581. switch (variant) {
  582. case AARCH64_INSN_VARIANT_32BIT:
  583. if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
  584. pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
  585. __func__, offset);
  586. return AARCH64_BREAK_FAULT;
  587. }
  588. shift = 2;
  589. break;
  590. case AARCH64_INSN_VARIANT_64BIT:
  591. if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
  592. pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
  593. __func__, offset);
  594. return AARCH64_BREAK_FAULT;
  595. }
  596. shift = 3;
  597. insn |= AARCH64_INSN_SF_BIT;
  598. break;
  599. default:
  600. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  601. return AARCH64_BREAK_FAULT;
  602. }
  603. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  604. reg1);
  605. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  606. reg2);
  607. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  608. base);
  609. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  610. offset >> shift);
  611. }
  612. u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
  613. enum aarch64_insn_register base,
  614. enum aarch64_insn_register state,
  615. enum aarch64_insn_size_type size,
  616. enum aarch64_insn_ldst_type type)
  617. {
  618. u32 insn;
  619. switch (type) {
  620. case AARCH64_INSN_LDST_LOAD_EX:
  621. insn = aarch64_insn_get_load_ex_value();
  622. break;
  623. case AARCH64_INSN_LDST_STORE_EX:
  624. insn = aarch64_insn_get_store_ex_value();
  625. break;
  626. default:
  627. pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
  628. return AARCH64_BREAK_FAULT;
  629. }
  630. insn = aarch64_insn_encode_ldst_size(size, insn);
  631. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  632. reg);
  633. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  634. base);
  635. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  636. AARCH64_INSN_REG_ZR);
  637. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  638. state);
  639. }
  640. u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
  641. enum aarch64_insn_register address,
  642. enum aarch64_insn_register value,
  643. enum aarch64_insn_size_type size)
  644. {
  645. u32 insn = aarch64_insn_get_ldadd_value();
  646. switch (size) {
  647. case AARCH64_INSN_SIZE_32:
  648. case AARCH64_INSN_SIZE_64:
  649. break;
  650. default:
  651. pr_err("%s: unimplemented size encoding %d\n", __func__, size);
  652. return AARCH64_BREAK_FAULT;
  653. }
  654. insn = aarch64_insn_encode_ldst_size(size, insn);
  655. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  656. result);
  657. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  658. address);
  659. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  660. value);
  661. }
  662. u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
  663. enum aarch64_insn_register value,
  664. enum aarch64_insn_size_type size)
  665. {
  666. /*
  667. * STADD is simply encoded as an alias for LDADD with XZR as
  668. * the destination register.
  669. */
  670. return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
  671. value, size);
  672. }
  673. static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
  674. enum aarch64_insn_prfm_target target,
  675. enum aarch64_insn_prfm_policy policy,
  676. u32 insn)
  677. {
  678. u32 imm_type = 0, imm_target = 0, imm_policy = 0;
  679. switch (type) {
  680. case AARCH64_INSN_PRFM_TYPE_PLD:
  681. break;
  682. case AARCH64_INSN_PRFM_TYPE_PLI:
  683. imm_type = BIT(0);
  684. break;
  685. case AARCH64_INSN_PRFM_TYPE_PST:
  686. imm_type = BIT(1);
  687. break;
  688. default:
  689. pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
  690. return AARCH64_BREAK_FAULT;
  691. }
  692. switch (target) {
  693. case AARCH64_INSN_PRFM_TARGET_L1:
  694. break;
  695. case AARCH64_INSN_PRFM_TARGET_L2:
  696. imm_target = BIT(0);
  697. break;
  698. case AARCH64_INSN_PRFM_TARGET_L3:
  699. imm_target = BIT(1);
  700. break;
  701. default:
  702. pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
  703. return AARCH64_BREAK_FAULT;
  704. }
  705. switch (policy) {
  706. case AARCH64_INSN_PRFM_POLICY_KEEP:
  707. break;
  708. case AARCH64_INSN_PRFM_POLICY_STRM:
  709. imm_policy = BIT(0);
  710. break;
  711. default:
  712. pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
  713. return AARCH64_BREAK_FAULT;
  714. }
  715. /* In this case, imm5 is encoded into Rt field. */
  716. insn &= ~GENMASK(4, 0);
  717. insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
  718. return insn;
  719. }
  720. u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
  721. enum aarch64_insn_prfm_type type,
  722. enum aarch64_insn_prfm_target target,
  723. enum aarch64_insn_prfm_policy policy)
  724. {
  725. u32 insn = aarch64_insn_get_prfm_value();
  726. insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
  727. insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
  728. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  729. base);
  730. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
  731. }
  732. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  733. enum aarch64_insn_register src,
  734. int imm, enum aarch64_insn_variant variant,
  735. enum aarch64_insn_adsb_type type)
  736. {
  737. u32 insn;
  738. switch (type) {
  739. case AARCH64_INSN_ADSB_ADD:
  740. insn = aarch64_insn_get_add_imm_value();
  741. break;
  742. case AARCH64_INSN_ADSB_SUB:
  743. insn = aarch64_insn_get_sub_imm_value();
  744. break;
  745. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  746. insn = aarch64_insn_get_adds_imm_value();
  747. break;
  748. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  749. insn = aarch64_insn_get_subs_imm_value();
  750. break;
  751. default:
  752. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  753. return AARCH64_BREAK_FAULT;
  754. }
  755. switch (variant) {
  756. case AARCH64_INSN_VARIANT_32BIT:
  757. break;
  758. case AARCH64_INSN_VARIANT_64BIT:
  759. insn |= AARCH64_INSN_SF_BIT;
  760. break;
  761. default:
  762. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  763. return AARCH64_BREAK_FAULT;
  764. }
  765. /* We can't encode more than a 24bit value (12bit + 12bit shift) */
  766. if (imm & ~(BIT(24) - 1))
  767. goto out;
  768. /* If we have something in the top 12 bits... */
  769. if (imm & ~(SZ_4K - 1)) {
  770. /* ... and in the low 12 bits -> error */
  771. if (imm & (SZ_4K - 1))
  772. goto out;
  773. imm >>= 12;
  774. insn |= AARCH64_INSN_LSL_12;
  775. }
  776. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  777. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  778. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  779. out:
  780. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  781. return AARCH64_BREAK_FAULT;
  782. }
  783. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  784. enum aarch64_insn_register src,
  785. int immr, int imms,
  786. enum aarch64_insn_variant variant,
  787. enum aarch64_insn_bitfield_type type)
  788. {
  789. u32 insn;
  790. u32 mask;
  791. switch (type) {
  792. case AARCH64_INSN_BITFIELD_MOVE:
  793. insn = aarch64_insn_get_bfm_value();
  794. break;
  795. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  796. insn = aarch64_insn_get_ubfm_value();
  797. break;
  798. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  799. insn = aarch64_insn_get_sbfm_value();
  800. break;
  801. default:
  802. pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
  803. return AARCH64_BREAK_FAULT;
  804. }
  805. switch (variant) {
  806. case AARCH64_INSN_VARIANT_32BIT:
  807. mask = GENMASK(4, 0);
  808. break;
  809. case AARCH64_INSN_VARIANT_64BIT:
  810. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  811. mask = GENMASK(5, 0);
  812. break;
  813. default:
  814. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  815. return AARCH64_BREAK_FAULT;
  816. }
  817. if (immr & ~mask) {
  818. pr_err("%s: invalid immr encoding %d\n", __func__, immr);
  819. return AARCH64_BREAK_FAULT;
  820. }
  821. if (imms & ~mask) {
  822. pr_err("%s: invalid imms encoding %d\n", __func__, imms);
  823. return AARCH64_BREAK_FAULT;
  824. }
  825. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  826. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  827. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  828. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  829. }
  830. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  831. int imm, int shift,
  832. enum aarch64_insn_variant variant,
  833. enum aarch64_insn_movewide_type type)
  834. {
  835. u32 insn;
  836. switch (type) {
  837. case AARCH64_INSN_MOVEWIDE_ZERO:
  838. insn = aarch64_insn_get_movz_value();
  839. break;
  840. case AARCH64_INSN_MOVEWIDE_KEEP:
  841. insn = aarch64_insn_get_movk_value();
  842. break;
  843. case AARCH64_INSN_MOVEWIDE_INVERSE:
  844. insn = aarch64_insn_get_movn_value();
  845. break;
  846. default:
  847. pr_err("%s: unknown movewide encoding %d\n", __func__, type);
  848. return AARCH64_BREAK_FAULT;
  849. }
  850. if (imm & ~(SZ_64K - 1)) {
  851. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  852. return AARCH64_BREAK_FAULT;
  853. }
  854. switch (variant) {
  855. case AARCH64_INSN_VARIANT_32BIT:
  856. if (shift != 0 && shift != 16) {
  857. pr_err("%s: invalid shift encoding %d\n", __func__,
  858. shift);
  859. return AARCH64_BREAK_FAULT;
  860. }
  861. break;
  862. case AARCH64_INSN_VARIANT_64BIT:
  863. insn |= AARCH64_INSN_SF_BIT;
  864. if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
  865. pr_err("%s: invalid shift encoding %d\n", __func__,
  866. shift);
  867. return AARCH64_BREAK_FAULT;
  868. }
  869. break;
  870. default:
  871. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  872. return AARCH64_BREAK_FAULT;
  873. }
  874. insn |= (shift >> 4) << 21;
  875. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  876. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  877. }
  878. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  879. enum aarch64_insn_register src,
  880. enum aarch64_insn_register reg,
  881. int shift,
  882. enum aarch64_insn_variant variant,
  883. enum aarch64_insn_adsb_type type)
  884. {
  885. u32 insn;
  886. switch (type) {
  887. case AARCH64_INSN_ADSB_ADD:
  888. insn = aarch64_insn_get_add_value();
  889. break;
  890. case AARCH64_INSN_ADSB_SUB:
  891. insn = aarch64_insn_get_sub_value();
  892. break;
  893. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  894. insn = aarch64_insn_get_adds_value();
  895. break;
  896. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  897. insn = aarch64_insn_get_subs_value();
  898. break;
  899. default:
  900. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  901. return AARCH64_BREAK_FAULT;
  902. }
  903. switch (variant) {
  904. case AARCH64_INSN_VARIANT_32BIT:
  905. if (shift & ~(SZ_32 - 1)) {
  906. pr_err("%s: invalid shift encoding %d\n", __func__,
  907. shift);
  908. return AARCH64_BREAK_FAULT;
  909. }
  910. break;
  911. case AARCH64_INSN_VARIANT_64BIT:
  912. insn |= AARCH64_INSN_SF_BIT;
  913. if (shift & ~(SZ_64 - 1)) {
  914. pr_err("%s: invalid shift encoding %d\n", __func__,
  915. shift);
  916. return AARCH64_BREAK_FAULT;
  917. }
  918. break;
  919. default:
  920. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  921. return AARCH64_BREAK_FAULT;
  922. }
  923. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  924. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  925. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  926. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  927. }
  928. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  929. enum aarch64_insn_register src,
  930. enum aarch64_insn_variant variant,
  931. enum aarch64_insn_data1_type type)
  932. {
  933. u32 insn;
  934. switch (type) {
  935. case AARCH64_INSN_DATA1_REVERSE_16:
  936. insn = aarch64_insn_get_rev16_value();
  937. break;
  938. case AARCH64_INSN_DATA1_REVERSE_32:
  939. insn = aarch64_insn_get_rev32_value();
  940. break;
  941. case AARCH64_INSN_DATA1_REVERSE_64:
  942. if (variant != AARCH64_INSN_VARIANT_64BIT) {
  943. pr_err("%s: invalid variant for reverse64 %d\n",
  944. __func__, variant);
  945. return AARCH64_BREAK_FAULT;
  946. }
  947. insn = aarch64_insn_get_rev64_value();
  948. break;
  949. default:
  950. pr_err("%s: unknown data1 encoding %d\n", __func__, type);
  951. return AARCH64_BREAK_FAULT;
  952. }
  953. switch (variant) {
  954. case AARCH64_INSN_VARIANT_32BIT:
  955. break;
  956. case AARCH64_INSN_VARIANT_64BIT:
  957. insn |= AARCH64_INSN_SF_BIT;
  958. break;
  959. default:
  960. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  961. return AARCH64_BREAK_FAULT;
  962. }
  963. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  964. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  965. }
  966. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  967. enum aarch64_insn_register src,
  968. enum aarch64_insn_register reg,
  969. enum aarch64_insn_variant variant,
  970. enum aarch64_insn_data2_type type)
  971. {
  972. u32 insn;
  973. switch (type) {
  974. case AARCH64_INSN_DATA2_UDIV:
  975. insn = aarch64_insn_get_udiv_value();
  976. break;
  977. case AARCH64_INSN_DATA2_SDIV:
  978. insn = aarch64_insn_get_sdiv_value();
  979. break;
  980. case AARCH64_INSN_DATA2_LSLV:
  981. insn = aarch64_insn_get_lslv_value();
  982. break;
  983. case AARCH64_INSN_DATA2_LSRV:
  984. insn = aarch64_insn_get_lsrv_value();
  985. break;
  986. case AARCH64_INSN_DATA2_ASRV:
  987. insn = aarch64_insn_get_asrv_value();
  988. break;
  989. case AARCH64_INSN_DATA2_RORV:
  990. insn = aarch64_insn_get_rorv_value();
  991. break;
  992. default:
  993. pr_err("%s: unknown data2 encoding %d\n", __func__, type);
  994. return AARCH64_BREAK_FAULT;
  995. }
  996. switch (variant) {
  997. case AARCH64_INSN_VARIANT_32BIT:
  998. break;
  999. case AARCH64_INSN_VARIANT_64BIT:
  1000. insn |= AARCH64_INSN_SF_BIT;
  1001. break;
  1002. default:
  1003. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1004. return AARCH64_BREAK_FAULT;
  1005. }
  1006. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1007. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1008. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1009. }
  1010. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  1011. enum aarch64_insn_register src,
  1012. enum aarch64_insn_register reg1,
  1013. enum aarch64_insn_register reg2,
  1014. enum aarch64_insn_variant variant,
  1015. enum aarch64_insn_data3_type type)
  1016. {
  1017. u32 insn;
  1018. switch (type) {
  1019. case AARCH64_INSN_DATA3_MADD:
  1020. insn = aarch64_insn_get_madd_value();
  1021. break;
  1022. case AARCH64_INSN_DATA3_MSUB:
  1023. insn = aarch64_insn_get_msub_value();
  1024. break;
  1025. default:
  1026. pr_err("%s: unknown data3 encoding %d\n", __func__, type);
  1027. return AARCH64_BREAK_FAULT;
  1028. }
  1029. switch (variant) {
  1030. case AARCH64_INSN_VARIANT_32BIT:
  1031. break;
  1032. case AARCH64_INSN_VARIANT_64BIT:
  1033. insn |= AARCH64_INSN_SF_BIT;
  1034. break;
  1035. default:
  1036. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1037. return AARCH64_BREAK_FAULT;
  1038. }
  1039. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1040. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  1041. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  1042. reg1);
  1043. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  1044. reg2);
  1045. }
  1046. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  1047. enum aarch64_insn_register src,
  1048. enum aarch64_insn_register reg,
  1049. int shift,
  1050. enum aarch64_insn_variant variant,
  1051. enum aarch64_insn_logic_type type)
  1052. {
  1053. u32 insn;
  1054. switch (type) {
  1055. case AARCH64_INSN_LOGIC_AND:
  1056. insn = aarch64_insn_get_and_value();
  1057. break;
  1058. case AARCH64_INSN_LOGIC_BIC:
  1059. insn = aarch64_insn_get_bic_value();
  1060. break;
  1061. case AARCH64_INSN_LOGIC_ORR:
  1062. insn = aarch64_insn_get_orr_value();
  1063. break;
  1064. case AARCH64_INSN_LOGIC_ORN:
  1065. insn = aarch64_insn_get_orn_value();
  1066. break;
  1067. case AARCH64_INSN_LOGIC_EOR:
  1068. insn = aarch64_insn_get_eor_value();
  1069. break;
  1070. case AARCH64_INSN_LOGIC_EON:
  1071. insn = aarch64_insn_get_eon_value();
  1072. break;
  1073. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1074. insn = aarch64_insn_get_ands_value();
  1075. break;
  1076. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  1077. insn = aarch64_insn_get_bics_value();
  1078. break;
  1079. default:
  1080. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1081. return AARCH64_BREAK_FAULT;
  1082. }
  1083. switch (variant) {
  1084. case AARCH64_INSN_VARIANT_32BIT:
  1085. if (shift & ~(SZ_32 - 1)) {
  1086. pr_err("%s: invalid shift encoding %d\n", __func__,
  1087. shift);
  1088. return AARCH64_BREAK_FAULT;
  1089. }
  1090. break;
  1091. case AARCH64_INSN_VARIANT_64BIT:
  1092. insn |= AARCH64_INSN_SF_BIT;
  1093. if (shift & ~(SZ_64 - 1)) {
  1094. pr_err("%s: invalid shift encoding %d\n", __func__,
  1095. shift);
  1096. return AARCH64_BREAK_FAULT;
  1097. }
  1098. break;
  1099. default:
  1100. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1101. return AARCH64_BREAK_FAULT;
  1102. }
  1103. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1104. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1105. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1106. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  1107. }
  1108. /*
  1109. * MOV (register) is architecturally an alias of ORR (shifted register) where
  1110. * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
  1111. */
  1112. u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
  1113. enum aarch64_insn_register src,
  1114. enum aarch64_insn_variant variant)
  1115. {
  1116. return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
  1117. src, 0, variant,
  1118. AARCH64_INSN_LOGIC_ORR);
  1119. }
  1120. u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
  1121. enum aarch64_insn_register reg,
  1122. enum aarch64_insn_adr_type type)
  1123. {
  1124. u32 insn;
  1125. s32 offset;
  1126. switch (type) {
  1127. case AARCH64_INSN_ADR_TYPE_ADR:
  1128. insn = aarch64_insn_get_adr_value();
  1129. offset = addr - pc;
  1130. break;
  1131. case AARCH64_INSN_ADR_TYPE_ADRP:
  1132. insn = aarch64_insn_get_adrp_value();
  1133. offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
  1134. break;
  1135. default:
  1136. pr_err("%s: unknown adr encoding %d\n", __func__, type);
  1137. return AARCH64_BREAK_FAULT;
  1138. }
  1139. if (offset < -SZ_1M || offset >= SZ_1M)
  1140. return AARCH64_BREAK_FAULT;
  1141. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
  1142. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
  1143. }
  1144. /*
  1145. * Decode the imm field of a branch, and return the byte offset as a
  1146. * signed value (so it can be used when computing a new branch
  1147. * target).
  1148. */
  1149. s32 aarch64_get_branch_offset(u32 insn)
  1150. {
  1151. s32 imm;
  1152. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
  1153. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
  1154. return (imm << 6) >> 4;
  1155. }
  1156. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1157. aarch64_insn_is_bcond(insn)) {
  1158. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
  1159. return (imm << 13) >> 11;
  1160. }
  1161. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
  1162. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
  1163. return (imm << 18) >> 16;
  1164. }
  1165. /* Unhandled instruction */
  1166. BUG();
  1167. }
  1168. /*
  1169. * Encode the displacement of a branch in the imm field and return the
  1170. * updated instruction.
  1171. */
  1172. u32 aarch64_set_branch_offset(u32 insn, s32 offset)
  1173. {
  1174. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
  1175. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  1176. offset >> 2);
  1177. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1178. aarch64_insn_is_bcond(insn))
  1179. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  1180. offset >> 2);
  1181. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
  1182. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
  1183. offset >> 2);
  1184. /* Unhandled instruction */
  1185. BUG();
  1186. }
  1187. s32 aarch64_insn_adrp_get_offset(u32 insn)
  1188. {
  1189. BUG_ON(!aarch64_insn_is_adrp(insn));
  1190. return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
  1191. }
  1192. u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
  1193. {
  1194. BUG_ON(!aarch64_insn_is_adrp(insn));
  1195. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
  1196. offset >> 12);
  1197. }
  1198. /*
  1199. * Extract the Op/CR data from a msr/mrs instruction.
  1200. */
  1201. u32 aarch64_insn_extract_system_reg(u32 insn)
  1202. {
  1203. return (insn & 0x1FFFE0) >> 5;
  1204. }
  1205. bool aarch32_insn_is_wide(u32 insn)
  1206. {
  1207. return insn >= 0xe800;
  1208. }
  1209. /*
  1210. * Macros/defines for extracting register numbers from instruction.
  1211. */
  1212. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  1213. {
  1214. return (insn & (0xf << offset)) >> offset;
  1215. }
  1216. #define OPC2_MASK 0x7
  1217. #define OPC2_OFFSET 5
  1218. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  1219. {
  1220. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  1221. }
  1222. #define CRM_MASK 0xf
  1223. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  1224. {
  1225. return insn & CRM_MASK;
  1226. }
  1227. static bool __kprobes __check_eq(unsigned long pstate)
  1228. {
  1229. return (pstate & PSR_Z_BIT) != 0;
  1230. }
  1231. static bool __kprobes __check_ne(unsigned long pstate)
  1232. {
  1233. return (pstate & PSR_Z_BIT) == 0;
  1234. }
  1235. static bool __kprobes __check_cs(unsigned long pstate)
  1236. {
  1237. return (pstate & PSR_C_BIT) != 0;
  1238. }
  1239. static bool __kprobes __check_cc(unsigned long pstate)
  1240. {
  1241. return (pstate & PSR_C_BIT) == 0;
  1242. }
  1243. static bool __kprobes __check_mi(unsigned long pstate)
  1244. {
  1245. return (pstate & PSR_N_BIT) != 0;
  1246. }
  1247. static bool __kprobes __check_pl(unsigned long pstate)
  1248. {
  1249. return (pstate & PSR_N_BIT) == 0;
  1250. }
  1251. static bool __kprobes __check_vs(unsigned long pstate)
  1252. {
  1253. return (pstate & PSR_V_BIT) != 0;
  1254. }
  1255. static bool __kprobes __check_vc(unsigned long pstate)
  1256. {
  1257. return (pstate & PSR_V_BIT) == 0;
  1258. }
  1259. static bool __kprobes __check_hi(unsigned long pstate)
  1260. {
  1261. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1262. return (pstate & PSR_C_BIT) != 0;
  1263. }
  1264. static bool __kprobes __check_ls(unsigned long pstate)
  1265. {
  1266. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1267. return (pstate & PSR_C_BIT) == 0;
  1268. }
  1269. static bool __kprobes __check_ge(unsigned long pstate)
  1270. {
  1271. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1272. return (pstate & PSR_N_BIT) == 0;
  1273. }
  1274. static bool __kprobes __check_lt(unsigned long pstate)
  1275. {
  1276. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1277. return (pstate & PSR_N_BIT) != 0;
  1278. }
  1279. static bool __kprobes __check_gt(unsigned long pstate)
  1280. {
  1281. /*PSR_N_BIT ^= PSR_V_BIT */
  1282. unsigned long temp = pstate ^ (pstate << 3);
  1283. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1284. return (temp & PSR_N_BIT) == 0;
  1285. }
  1286. static bool __kprobes __check_le(unsigned long pstate)
  1287. {
  1288. /*PSR_N_BIT ^= PSR_V_BIT */
  1289. unsigned long temp = pstate ^ (pstate << 3);
  1290. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1291. return (temp & PSR_N_BIT) != 0;
  1292. }
  1293. static bool __kprobes __check_al(unsigned long pstate)
  1294. {
  1295. return true;
  1296. }
  1297. /*
  1298. * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
  1299. * it behaves identically to 0b1110 ("al").
  1300. */
  1301. pstate_check_t * const aarch32_opcode_cond_checks[16] = {
  1302. __check_eq, __check_ne, __check_cs, __check_cc,
  1303. __check_mi, __check_pl, __check_vs, __check_vc,
  1304. __check_hi, __check_ls, __check_ge, __check_lt,
  1305. __check_gt, __check_le, __check_al, __check_al
  1306. };
  1307. static bool range_of_ones(u64 val)
  1308. {
  1309. /* Doesn't handle full ones or full zeroes */
  1310. u64 sval = val >> __ffs64(val);
  1311. /* One of Sean Eron Anderson's bithack tricks */
  1312. return ((sval + 1) & (sval)) == 0;
  1313. }
  1314. static u32 aarch64_encode_immediate(u64 imm,
  1315. enum aarch64_insn_variant variant,
  1316. u32 insn)
  1317. {
  1318. unsigned int immr, imms, n, ones, ror, esz, tmp;
  1319. u64 mask;
  1320. switch (variant) {
  1321. case AARCH64_INSN_VARIANT_32BIT:
  1322. esz = 32;
  1323. break;
  1324. case AARCH64_INSN_VARIANT_64BIT:
  1325. insn |= AARCH64_INSN_SF_BIT;
  1326. esz = 64;
  1327. break;
  1328. default:
  1329. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1330. return AARCH64_BREAK_FAULT;
  1331. }
  1332. mask = GENMASK(esz - 1, 0);
  1333. /* Can't encode full zeroes, full ones, or value wider than the mask */
  1334. if (!imm || imm == mask || imm & ~mask)
  1335. return AARCH64_BREAK_FAULT;
  1336. /*
  1337. * Inverse of Replicate(). Try to spot a repeating pattern
  1338. * with a pow2 stride.
  1339. */
  1340. for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
  1341. u64 emask = BIT(tmp) - 1;
  1342. if ((imm & emask) != ((imm >> tmp) & emask))
  1343. break;
  1344. esz = tmp;
  1345. mask = emask;
  1346. }
  1347. /* N is only set if we're encoding a 64bit value */
  1348. n = esz == 64;
  1349. /* Trim imm to the element size */
  1350. imm &= mask;
  1351. /* That's how many ones we need to encode */
  1352. ones = hweight64(imm);
  1353. /*
  1354. * imms is set to (ones - 1), prefixed with a string of ones
  1355. * and a zero if they fit. Cap it to 6 bits.
  1356. */
  1357. imms = ones - 1;
  1358. imms |= 0xf << ffs(esz);
  1359. imms &= BIT(6) - 1;
  1360. /* Compute the rotation */
  1361. if (range_of_ones(imm)) {
  1362. /*
  1363. * Pattern: 0..01..10..0
  1364. *
  1365. * Compute how many rotate we need to align it right
  1366. */
  1367. ror = __ffs64(imm);
  1368. } else {
  1369. /*
  1370. * Pattern: 0..01..10..01..1
  1371. *
  1372. * Fill the unused top bits with ones, and check if
  1373. * the result is a valid immediate (all ones with a
  1374. * contiguous ranges of zeroes).
  1375. */
  1376. imm |= ~mask;
  1377. if (!range_of_ones(~imm))
  1378. return AARCH64_BREAK_FAULT;
  1379. /*
  1380. * Compute the rotation to get a continuous set of
  1381. * ones, with the first bit set at position 0
  1382. */
  1383. ror = fls(~imm);
  1384. }
  1385. /*
  1386. * immr is the number of bits we need to rotate back to the
  1387. * original set of ones. Note that this is relative to the
  1388. * element size...
  1389. */
  1390. immr = (esz - ror) % esz;
  1391. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
  1392. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  1393. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  1394. }
  1395. u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
  1396. enum aarch64_insn_variant variant,
  1397. enum aarch64_insn_register Rn,
  1398. enum aarch64_insn_register Rd,
  1399. u64 imm)
  1400. {
  1401. u32 insn;
  1402. switch (type) {
  1403. case AARCH64_INSN_LOGIC_AND:
  1404. insn = aarch64_insn_get_and_imm_value();
  1405. break;
  1406. case AARCH64_INSN_LOGIC_ORR:
  1407. insn = aarch64_insn_get_orr_imm_value();
  1408. break;
  1409. case AARCH64_INSN_LOGIC_EOR:
  1410. insn = aarch64_insn_get_eor_imm_value();
  1411. break;
  1412. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1413. insn = aarch64_insn_get_ands_imm_value();
  1414. break;
  1415. default:
  1416. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1417. return AARCH64_BREAK_FAULT;
  1418. }
  1419. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1420. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1421. return aarch64_encode_immediate(imm, variant, insn);
  1422. }
  1423. u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
  1424. enum aarch64_insn_register Rm,
  1425. enum aarch64_insn_register Rn,
  1426. enum aarch64_insn_register Rd,
  1427. u8 lsb)
  1428. {
  1429. u32 insn;
  1430. insn = aarch64_insn_get_extr_value();
  1431. switch (variant) {
  1432. case AARCH64_INSN_VARIANT_32BIT:
  1433. if (lsb > 31)
  1434. return AARCH64_BREAK_FAULT;
  1435. break;
  1436. case AARCH64_INSN_VARIANT_64BIT:
  1437. if (lsb > 63)
  1438. return AARCH64_BREAK_FAULT;
  1439. insn |= AARCH64_INSN_SF_BIT;
  1440. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
  1441. break;
  1442. default:
  1443. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1444. return AARCH64_BREAK_FAULT;
  1445. }
  1446. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
  1447. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1448. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1449. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
  1450. }