arm_utils.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. // Copyright 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef COMPONENTS_ZUCCHINI_ARM_UTILS_H_
  5. #define COMPONENTS_ZUCCHINI_ARM_UTILS_H_
  6. #include <stddef.h>
  7. #include <stdint.h>
  8. #include "base/check_op.h"
  9. #include "components/zucchini/address_translator.h"
  10. #include "components/zucchini/buffer_view.h"
  11. namespace zucchini {
  12. // References:
  13. // * AArch32 (32-bit ARM, AKA ARM32):
  14. // https://static.docs.arm.com/ddi0406/c/DDI0406C_C_arm_architecture_reference_manual.pdf
  15. // * AArch64 (64-bit ARM):
  16. // https://static.docs.arm.com/ddi0487/da/DDI0487D_a_armv8_arm.pdf
  17. // Definitions (used in Zucchini):
  18. // * |instr_rva|: Instruction RVA: The RVA where an instruction is located. In
  19. // ARM mode and for AArch64 this is 4-byte aligned; in THUMB2 mode this is
  20. // 2-byte aligned.
  21. // * |code|: Instruction code: ARM instruction code as seen in manual. In ARM
  22. // mode and for AArch64, this is a 32-bit int. In THUMB2 mode, this may be a
  23. // 16-bit or 32-bit int.
  24. // * |disp|: Displacement: For branch instructions (e.g.: B, BL, BLX, and
  25. // conditional varieties) this is the value encoded in instruction bytes.
  26. // * PC: Program Counter: In ARM mode this is |instr_rva + 8|; in THUMB2 mode
  27. // this is |instr_rva + 4|; for AArch64 this is |instr_rva|.
  28. // * |target_rva|: Target RVA: The RVA targeted by a branch instruction.
  29. //
  30. // These are related by:
  31. // |code| = Fetch(image data at offset(|instr_rva|)).
  32. // |disp| = Decode(|code|).
  33. // PC = |instr_rva| + {8 in ARM mode, 4 in THUMB2 mode, 0 for AArch64}.
  34. // |target_rva| = PC + |disp| - (see "BLX complication" below)
  35. //
  36. // Example 1 (ARM mode):
  37. // 00103050: 00 01 02 EA B 00183458
  38. // |instr_rva| = 0x00103050 (4-byte aligned).
  39. // |code| = 0xEA020100 (little endian fetched from data).
  40. // |disp| = 0x00080400 (decoded from |code| with A24 -> B encoding T1).
  41. // PC = |instr_rva| + 8 = 0x00103058 (ARM mode).
  42. // |target_rva| = PC + |disp| = 0x00183458.
  43. //
  44. // Example 2 (THUMB2 mode):
  45. // 001030A2: 00 F0 01 FA BL 001034A8
  46. // |instr_rva| = 0x001030A2 (2-byte aligned).
  47. // |code| = 0xF000FA01 (special THUMB2 mode data fetch).
  48. // |disp| = 0x00000402 (decoded from |code| with T24 -> BL encoding T1).
  49. // PC = |instr_rva| + 4 = 0x001030A6 (THUMB2 mode).
  50. // |target_rva| = PC + |disp| = 0x001034A8.
  51. //
  52. // Example 3 (AArch64):
  53. // 0000000000305070: 03 02 01 14 B 000000000034587C
  54. // |instr_rva| = 0x00305070 (4-byte aligned, assumed to fit in 32-bit).
  55. // |code| = 0x14010203 (little endian fetchd from data).
  56. // |disp| = 0x0004080C (decoded from |code| with Immd -> B).
  57. // PC = |instr_rva| = 0x00305070 (AArch64).
  58. // |target_rva| = PC + |disp| = 0x0034587C.
  59. // BLX complication: BLX transits between ARM mode and THUMB2 mode, and branches
  60. // to an address. Therefore |instr_rva| must align by the "old" mode, and
  61. // |target_rva| must align by the "new" mode. In particular:
  62. // * BLX encoding A2 (ARM -> THUMB2): |instr_rva| is 4-byte aligned with
  63. // PC = |instr_rva| + 8; |target_rva| is 2-byte aligned, and so |disp| is
  64. // 2-byte aligned.
  65. // * BLX encoding T2 (THUMB2 -> ARM): |instr_rva| is 2-byte aligned with
  66. // PC = |instr_rva| + 4; |target_rva| is 4-byte aligned. Complication: BLX
  67. // encoding T2 stores a bit |H| that corresponds to "2" in binary, but |H|
  68. // must be set to 0. Thus the encoded value is effectively 4-byte aligned. So
  69. // when computing |target_rva| by adding PC (2-byte aligned) to the stored
  70. // value (4-byte aligned), the result must be rounded down to the nearest
  71. // 4-byte aligned address.
  72. // The last situation creates ambiguity in how |disp| is defined! Alternatives:
  73. // (1) |disp| := |target_rva| - PC: So |code| <-> |disp| for BLX encoding T2,
  74. // requires |instr_rva| % 4 to be determined, and adjustments made.
  75. // (2) |disp| := Value stored in |code|: So |disp| <-> |target_rva| for BLX
  76. // encoding T2 requires adjustment: |disp| -> |target_rva| needs to round
  77. // down, whereas |target_rva| -> |disp| needs to round up.
  78. // We adopt (2) to simplify |code| <-> |disp|, since that gets used.
  79. using arm_disp_t = int32_t;
  80. // Alignment requirement for |target_rva|, useful for |disp| <-> |target_rva|
  81. // (also requires |instr_rva|). Alignment is determined by parsing |code| in
  82. // *Decode() functions. kArmAlignFail is also defined to indicate parse failure.
  83. // Alignments can be 2 or 4. These values are also used in the enum, so
  84. // |x % align| with |x & (align - 1)| to compute alignment.
  85. enum ArmAlign : uint32_t {
  86. kArmAlignFail = 0U,
  87. kArmAlign2 = 2U,
  88. kArmAlign4 = 4U,
  89. };
  90. // Traits for rel32 address types (technically rel64 for AArch64 -- but we
  91. // assume values are small enough), which form collections of strategies to
  92. // process each rel32 address type.
  93. template <typename ENUM_ADDR_TYPE,
  94. ENUM_ADDR_TYPE ADDR_TYPE,
  95. typename CODE_T,
  96. CODE_T (*FETCH)(ConstBufferView, offset_t),
  97. void (*STORE)(MutableBufferView, offset_t, CODE_T),
  98. ArmAlign (*DECODE)(CODE_T, arm_disp_t*),
  99. bool (*ENCODE)(arm_disp_t, CODE_T*),
  100. bool (*READ)(rva_t, CODE_T, rva_t*),
  101. bool (*WRITE)(rva_t, rva_t, CODE_T*)>
  102. class ArmAddrTraits {
  103. public:
  104. static constexpr ENUM_ADDR_TYPE addr_type = ADDR_TYPE;
  105. using code_t = CODE_T;
  106. static constexpr CODE_T (*Fetch)(ConstBufferView, offset_t) = FETCH;
  107. static constexpr void (*Store)(MutableBufferView, offset_t, CODE_T) = STORE;
  108. static constexpr ArmAlign (*Decode)(CODE_T, arm_disp_t*) = DECODE;
  109. static constexpr bool (*Encode)(arm_disp_t, CODE_T*) = ENCODE;
  110. static constexpr bool (*Read)(rva_t, CODE_T, rva_t*) = READ;
  111. static constexpr bool (*Write)(rva_t, rva_t, CODE_T*) = WRITE;
  112. };
  113. // Given THUMB2 instruction |code16|, returns 2 if it's from a 16-bit THUMB2
  114. // instruction, or 4 if it's from a 32-bit THUMB2 instruction.
  115. inline int GetThumb2InstructionSize(uint16_t code16) {
  116. return ((code16 & 0xF000) == 0xF000 || (code16 & 0xF800) == 0xE800) ? 4 : 2;
  117. }
  118. // A translator for ARM mode and THUMB2 mode with static functions that
  119. // translate among |code|, |disp|, and |target_rva|.
  120. class AArch32Rel32Translator {
  121. public:
  122. // Rel32 address types enumeration.
  123. enum AddrType : uint8_t {
  124. ADDR_NONE = 0xFF,
  125. // Naming: Here "A24" represents ARM mode instructions where |code|
  126. // dedicates 24 bits (including sign bit) to specify |disp|. Similarly, "T8"
  127. // represents THUMB2 mode instructions with 8 bits for |disp|. Currently
  128. // only {A24, T8, T11, T20, T24} are defined. These are not to be confused
  129. // with "B encoding A1", "B encoding T3", etc., which are specific encoding
  130. // schemes given by the manual for the "B" (or other) instructions (only
  131. // {A1, A2, T1, T2, T3, T4} are seen).
  132. ADDR_A24 = 0,
  133. ADDR_T8,
  134. ADDR_T11,
  135. ADDR_T20,
  136. ADDR_T24,
  137. NUM_ADDR_TYPE
  138. };
  139. AArch32Rel32Translator();
  140. AArch32Rel32Translator(const AArch32Rel32Translator&) = delete;
  141. const AArch32Rel32Translator& operator=(const AArch32Rel32Translator&) =
  142. delete;
  143. // Fetches the 32-bit ARM instruction |code| at |view[idx]|.
  144. static inline uint32_t FetchArmCode32(ConstBufferView view, offset_t idx) {
  145. return view.read<uint32_t>(idx);
  146. }
  147. // Fetches the 16-bit THUMB2 instruction |code| at |view[idx]|.
  148. static inline uint16_t FetchThumb2Code16(ConstBufferView view, offset_t idx) {
  149. return view.read<uint16_t>(idx);
  150. }
  151. // Fetches the 32-bit THUMB2 instruction |code| at |view[idx]|.
  152. static inline uint32_t FetchThumb2Code32(ConstBufferView view, offset_t idx) {
  153. // By convention, 32-bit THUMB2 instructions are written (as seen later) as:
  154. // [byte3, byte2, byte1, byte0].
  155. // However (assuming little-endian ARM) the in-memory representation is
  156. // [byte2, byte3, byte0, byte1].
  157. return (static_cast<uint32_t>(view.read<uint16_t>(idx)) << 16) |
  158. view.read<uint16_t>(idx + 2);
  159. }
  160. // Stores the 32-bit ARM instruction |code| to |mutable_view[idx]|.
  161. static inline void StoreArmCode32(MutableBufferView mutable_view,
  162. offset_t idx,
  163. uint32_t code) {
  164. mutable_view.write<uint32_t>(idx, code);
  165. }
  166. // Stores the 16-bit THUMB2 instruction |code| to |mutable_view[idx]|.
  167. static inline void StoreThumb2Code16(MutableBufferView mutable_view,
  168. offset_t idx,
  169. uint16_t code) {
  170. mutable_view.write<uint16_t>(idx, code);
  171. }
  172. // Stores the next 32-bit THUMB2 instruction |code| to |mutable_view[idx]|.
  173. static inline void StoreThumb2Code32(MutableBufferView mutable_view,
  174. offset_t idx,
  175. uint32_t code) {
  176. mutable_view.write<uint16_t>(idx, static_cast<uint16_t>(code >> 16));
  177. mutable_view.write<uint16_t>(idx + 2, static_cast<uint16_t>(code & 0xFFFF));
  178. }
  179. // The following functions convert |code| (16-bit or 32-bit) from/to |disp|
  180. // or |target_rva|, for specific branch instruction types.
  181. // Read*() and write*() functions convert between |code| and |target_rva|.
  182. // * Decode*() determines whether |code16/code32| is a branch instruction
  183. // of a specific type. If so, then extracts |*disp| and returns the required
  184. // ArmAlign. Otherwise returns kArmAlignFail.
  185. // * Encode*() determines whether |*code16/*code32| is a branch instruction of
  186. // a specific type, and whether it can accommodate |disp|. If so, then
  187. // re-encodes |*code32| using |disp|, and returns true. Otherwise returns
  188. // false.
  189. // * Read*() is similar to Decode*(), but on success, extracts |*target_rva|
  190. // using |instr_rva| as aid, performs the proper alignment, and returns
  191. // true. Otherwise returns false.
  192. // * Write*() is similar to Encode*(), takes |target_rva| instead, and uses
  193. // |instr_rva| as aid.
  194. static ArmAlign DecodeA24(uint32_t code32, arm_disp_t* disp);
  195. static bool EncodeA24(arm_disp_t disp, uint32_t* code32);
  196. // TODO(huangs): Refactor the Read*() functions: These are identical
  197. // except for Decode*() and Get*TargetRvaFromDisp().
  198. static bool ReadA24(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  199. static bool WriteA24(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  200. static ArmAlign DecodeT8(uint16_t code16, arm_disp_t* disp);
  201. static bool EncodeT8(arm_disp_t disp, uint16_t* code16);
  202. static bool ReadT8(rva_t instr_rva, uint16_t code16, rva_t* target_rva);
  203. static bool WriteT8(rva_t instr_rva, rva_t target_rva, uint16_t* code16);
  204. static ArmAlign DecodeT11(uint16_t code16, arm_disp_t* disp);
  205. static bool EncodeT11(arm_disp_t disp, uint16_t* code16);
  206. static bool ReadT11(rva_t instr_rva, uint16_t code16, rva_t* target_rva);
  207. static bool WriteT11(rva_t instr_rva, rva_t target_rva, uint16_t* code16);
  208. static ArmAlign DecodeT20(uint32_t code32, arm_disp_t* disp);
  209. static bool EncodeT20(arm_disp_t disp, uint32_t* code32);
  210. static bool ReadT20(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  211. static bool WriteT20(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  212. static ArmAlign DecodeT24(uint32_t code32, arm_disp_t* disp);
  213. static bool EncodeT24(arm_disp_t disp, uint32_t* code32);
  214. static bool ReadT24(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  215. static bool WriteT24(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  216. // Computes |target_rva| from |instr_rva| and |disp| in ARM mode.
  217. static inline rva_t GetArmTargetRvaFromDisp(rva_t instr_rva,
  218. arm_disp_t disp,
  219. ArmAlign align) {
  220. rva_t ret = static_cast<rva_t>(instr_rva + 8 + disp);
  221. // Align down.
  222. DCHECK_NE(align, kArmAlignFail);
  223. return ret - (ret & static_cast<rva_t>(align - 1));
  224. }
  225. // Computes |target_rva| from |instr_rva| and |disp| in THUMB2 mode.
  226. static inline rva_t GetThumb2TargetRvaFromDisp(rva_t instr_rva,
  227. arm_disp_t disp,
  228. ArmAlign align) {
  229. rva_t ret = static_cast<rva_t>(instr_rva + 4 + disp);
  230. // Align down.
  231. DCHECK_NE(align, kArmAlignFail);
  232. return ret - (ret & static_cast<rva_t>(align - 1));
  233. }
  234. // Computes |disp| from |instr_rva| and |target_rva| in ARM mode.
  235. static inline arm_disp_t GetArmDispFromTargetRva(rva_t instr_rva,
  236. rva_t target_rva,
  237. ArmAlign align) {
  238. // Assumes that |instr_rva + 8| does not overflow.
  239. arm_disp_t ret = static_cast<arm_disp_t>(target_rva) -
  240. static_cast<arm_disp_t>(instr_rva + 8);
  241. // Align up.
  242. DCHECK_NE(align, kArmAlignFail);
  243. return ret + ((-ret) & static_cast<arm_disp_t>(align - 1));
  244. }
  245. // Computes |disp| from |instr_rva| and |target_rva| in THUMB2 mode.
  246. static inline arm_disp_t GetThumb2DispFromTargetRva(rva_t instr_rva,
  247. rva_t target_rva,
  248. ArmAlign align) {
  249. // Assumes that |instr_rva + 4| does not overflow.
  250. arm_disp_t ret = static_cast<arm_disp_t>(target_rva) -
  251. static_cast<arm_disp_t>(instr_rva + 4);
  252. // Align up.
  253. DCHECK_NE(align, kArmAlignFail);
  254. return ret + ((-ret) & static_cast<arm_disp_t>(align - 1));
  255. }
  256. // Strategies to process each rel32 address type.
  257. using AddrTraits_A24 = ArmAddrTraits<AddrType,
  258. ADDR_A24,
  259. uint32_t,
  260. FetchArmCode32,
  261. StoreArmCode32,
  262. DecodeA24,
  263. EncodeA24,
  264. ReadA24,
  265. WriteA24>;
  266. using AddrTraits_T8 = ArmAddrTraits<AddrType,
  267. ADDR_T8,
  268. uint16_t,
  269. FetchThumb2Code16,
  270. StoreThumb2Code16,
  271. DecodeT8,
  272. EncodeT8,
  273. ReadT8,
  274. WriteT8>;
  275. using AddrTraits_T11 = ArmAddrTraits<AddrType,
  276. ADDR_T11,
  277. uint16_t,
  278. FetchThumb2Code16,
  279. StoreThumb2Code16,
  280. DecodeT11,
  281. EncodeT11,
  282. ReadT11,
  283. WriteT11>;
  284. using AddrTraits_T20 = ArmAddrTraits<AddrType,
  285. ADDR_T20,
  286. uint32_t,
  287. FetchThumb2Code32,
  288. StoreThumb2Code32,
  289. DecodeT20,
  290. EncodeT20,
  291. ReadT20,
  292. WriteT20>;
  293. using AddrTraits_T24 = ArmAddrTraits<AddrType,
  294. ADDR_T24,
  295. uint32_t,
  296. FetchThumb2Code32,
  297. StoreThumb2Code32,
  298. DecodeT24,
  299. EncodeT24,
  300. ReadT24,
  301. WriteT24>;
  302. };
  303. // Translator for AArch64, which is simpler than 32-bit ARM. Although pointers
  304. // are 64-bit, displacements are within 32-bit.
  305. class AArch64Rel32Translator {
  306. public:
  307. // Rel64 address types enumeration.
  308. enum AddrType : uint8_t {
  309. ADDR_NONE = 0xFF,
  310. ADDR_IMMD14 = 0,
  311. ADDR_IMMD19,
  312. ADDR_IMMD26,
  313. NUM_ADDR_TYPE
  314. };
  315. // Although RVA for 64-bit architecture can be 64-bit in length, we make the
  316. // bold assumption that for ELF images that RVA will stay nicely in 32-bit!
  317. AArch64Rel32Translator();
  318. AArch64Rel32Translator(const AArch64Rel32Translator&) = delete;
  319. const AArch64Rel32Translator& operator=(const AArch64Rel32Translator&) =
  320. delete;
  321. static inline uint32_t FetchCode32(ConstBufferView view, offset_t idx) {
  322. return view.read<uint32_t>(idx);
  323. }
  324. static inline void StoreCode32(MutableBufferView mutable_view,
  325. offset_t idx,
  326. uint32_t code) {
  327. mutable_view.write<uint32_t>(idx, code);
  328. }
  329. // Conversion functions for |code32| from/to |disp| or |target_rva|, similar
  330. // to the counterparts in AArch32Rel32Translator.
  331. static ArmAlign DecodeImmd14(uint32_t code32, arm_disp_t* disp);
  332. static bool EncodeImmd14(arm_disp_t disp, uint32_t* code32);
  333. // TODO(huangs): Refactor the Read*() functions: These are identical
  334. // except for Decode*().
  335. static bool ReadImmd14(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  336. static bool WriteImmd14(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  337. static ArmAlign DecodeImmd19(uint32_t code32, arm_disp_t* disp);
  338. static bool EncodeImmd19(arm_disp_t disp, uint32_t* code32);
  339. static bool ReadImmd19(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  340. static bool WriteImmd19(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  341. static ArmAlign DecodeImmd26(uint32_t code32, arm_disp_t* disp);
  342. static bool EncodeImmd26(arm_disp_t disp, uint32_t* code32);
  343. static bool ReadImmd26(rva_t instr_rva, uint32_t code32, rva_t* target_rva);
  344. static bool WriteImmd26(rva_t instr_rva, rva_t target_rva, uint32_t* code32);
  345. static inline rva_t GetTargetRvaFromDisp(rva_t instr_rva, arm_disp_t disp) {
  346. return static_cast<rva_t>(instr_rva + disp);
  347. }
  348. static inline arm_disp_t GetDispFromTargetRva(rva_t instr_rva,
  349. rva_t target_rva) {
  350. return static_cast<arm_disp_t>(target_rva - instr_rva);
  351. }
  352. // Strategies to process each rel32 address type.
  353. using AddrTraits_Immd14 = ArmAddrTraits<AddrType,
  354. ADDR_IMMD14,
  355. uint32_t,
  356. FetchCode32,
  357. StoreCode32,
  358. DecodeImmd14,
  359. EncodeImmd14,
  360. ReadImmd14,
  361. WriteImmd14>;
  362. using AddrTraits_Immd19 = ArmAddrTraits<AddrType,
  363. ADDR_IMMD19,
  364. uint32_t,
  365. FetchCode32,
  366. StoreCode32,
  367. DecodeImmd19,
  368. EncodeImmd19,
  369. ReadImmd19,
  370. WriteImmd19>;
  371. using AddrTraits_Immd26 = ArmAddrTraits<AddrType,
  372. ADDR_IMMD26,
  373. uint32_t,
  374. FetchCode32,
  375. StoreCode32,
  376. DecodeImmd26,
  377. EncodeImmd26,
  378. ReadImmd26,
  379. WriteImmd26>;
  380. };
  381. } // namespace zucchini
  382. #endif // COMPONENTS_ZUCCHINI_ARM_UTILS_H_