SkVM.cpp 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628
  1. /*
  2. * Copyright 2019 Google LLC
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "include/private/SkSpinlock.h"
  8. #include "include/private/SkTFitsIn.h"
  9. #include "include/private/SkThreadID.h"
  10. #include "include/private/SkVx.h"
  11. #include "src/core/SkCpu.h"
  12. #include "src/core/SkVM.h"
  13. #include <string.h>
  14. #if defined(SKVM_JIT)
  15. #include <sys/mman.h>
  16. #endif
  17. namespace skvm {
  18. Program Builder::done(const char* debug_name) {
  19. // Basic liveness analysis:
  20. // an instruction is live until all live instructions that need its input have retired.
  21. for (Val id = fProgram.size(); id --> 0; ) {
  22. Instruction& inst = fProgram[id];
  23. // All side-effect-only instructions (stores) are live.
  24. if (inst.op <= Op::store32) {
  25. inst.death = id;
  26. }
  27. // The arguments of a live instruction must live until at least that instruction.
  28. if (inst.death != 0) {
  29. // Notice how we're walking backward, storing the latest instruction in death.
  30. if (inst.x != NA && fProgram[inst.x].death == 0) { fProgram[inst.x].death = id; }
  31. if (inst.y != NA && fProgram[inst.y].death == 0) { fProgram[inst.y].death = id; }
  32. if (inst.z != NA && fProgram[inst.z].death == 0) { fProgram[inst.z].death = id; }
  33. }
  34. }
  35. // Mark which values don't depend on the loop and can be hoisted.
  36. for (Val id = 0; id < (Val)fProgram.size(); id++) {
  37. Builder::Instruction& inst = fProgram[id];
  38. // Loads and stores cannot be hoisted out of the loop.
  39. if (inst.op <= Op::load32) {
  40. inst.hoist = false;
  41. }
  42. // If any of an instruction's inputs can't be hoisted, it can't be hoisted itself.
  43. if (inst.hoist) {
  44. if (inst.x != NA) { inst.hoist &= fProgram[inst.x].hoist; }
  45. if (inst.y != NA) { inst.hoist &= fProgram[inst.y].hoist; }
  46. if (inst.z != NA) { inst.hoist &= fProgram[inst.z].hoist; }
  47. }
  48. }
  49. return {fProgram, fStrides, debug_name};
  50. }
  51. static bool operator==(const Builder::Instruction& a, const Builder::Instruction& b) {
  52. return a.op == b.op
  53. && a.x == b.x
  54. && a.y == b.y
  55. && a.z == b.z
  56. && a.imm == b.imm
  57. && a.death == b.death
  58. && a.hoist == b.hoist;
  59. }
  60. // Most instructions produce a value and return it by ID,
  61. // the value-producing instruction's own index in the program vector.
  62. Val Builder::push(Op op, Val x, Val y, Val z, int imm) {
  63. Instruction inst{op, x, y, z, imm, /*death=*/0, /*hoist=*/true};
  64. // Basic common subexpression elimination:
  65. // if we've already seen this exact Instruction, use it instead of creating a new one.
  66. if (Val* id = fIndex.find(inst)) {
  67. return *id;
  68. }
  69. Val id = static_cast<Val>(fProgram.size());
  70. fProgram.push_back(inst);
  71. fIndex.set(inst, id);
  72. return id;
  73. }
  74. bool Builder::isZero(Val id) const {
  75. return fProgram[id].op == Op::splat
  76. && fProgram[id].imm == 0;
  77. }
  78. Arg Builder::arg(int stride) {
  79. int ix = (int)fStrides.size();
  80. fStrides.push_back(stride);
  81. return {ix};
  82. }
  83. void Builder::store8 (Arg ptr, I32 val) { (void)this->push(Op::store8 , val.id,NA,NA, ptr.ix); }
  84. void Builder::store32(Arg ptr, I32 val) { (void)this->push(Op::store32, val.id,NA,NA, ptr.ix); }
  85. I32 Builder::load8 (Arg ptr) { return {this->push(Op::load8 , NA,NA,NA, ptr.ix) }; }
  86. I32 Builder::load32(Arg ptr) { return {this->push(Op::load32, NA,NA,NA, ptr.ix) }; }
  87. // The two splat() functions are just syntax sugar over splatting a 4-byte bit pattern.
  88. I32 Builder::splat(int n) { return {this->push(Op::splat, NA,NA,NA, n) }; }
  89. F32 Builder::splat(float f) {
  90. int bits;
  91. memcpy(&bits, &f, 4);
  92. return {this->push(Op::splat, NA,NA,NA, bits)};
  93. }
  94. F32 Builder::add(F32 x, F32 y ) { return {this->push(Op::add_f32, x.id, y.id)}; }
  95. F32 Builder::sub(F32 x, F32 y ) { return {this->push(Op::sub_f32, x.id, y.id)}; }
  96. F32 Builder::mul(F32 x, F32 y ) { return {this->push(Op::mul_f32, x.id, y.id)}; }
  97. F32 Builder::div(F32 x, F32 y ) { return {this->push(Op::div_f32, x.id, y.id)}; }
  98. F32 Builder::mad(F32 x, F32 y, F32 z) {
  99. if (this->isZero(z.id)) {
  100. return this->mul(x,y);
  101. }
  102. return {this->push(Op::mad_f32, x.id, y.id, z.id)};
  103. }
  104. I32 Builder::add(I32 x, I32 y) { return {this->push(Op::add_i32, x.id, y.id)}; }
  105. I32 Builder::sub(I32 x, I32 y) { return {this->push(Op::sub_i32, x.id, y.id)}; }
  106. I32 Builder::mul(I32 x, I32 y) { return {this->push(Op::mul_i32, x.id, y.id)}; }
  107. I32 Builder::sub_16x2(I32 x, I32 y) { return {this->push(Op::sub_i16x2, x.id, y.id)}; }
  108. I32 Builder::mul_16x2(I32 x, I32 y) { return {this->push(Op::mul_i16x2, x.id, y.id)}; }
  109. I32 Builder::shr_16x2(I32 x, int bits) { return {this->push(Op::shr_i16x2, x.id,NA,NA, bits)}; }
  110. I32 Builder::bit_and (I32 x, I32 y) { return {this->push(Op::bit_and , x.id, y.id)}; }
  111. I32 Builder::bit_or (I32 x, I32 y) { return {this->push(Op::bit_or , x.id, y.id)}; }
  112. I32 Builder::bit_xor (I32 x, I32 y) { return {this->push(Op::bit_xor , x.id, y.id)}; }
  113. I32 Builder::bit_clear(I32 x, I32 y) { return {this->push(Op::bit_clear, x.id, y.id)}; }
  114. I32 Builder::shl(I32 x, int bits) { return {this->push(Op::shl, x.id,NA,NA, bits)}; }
  115. I32 Builder::shr(I32 x, int bits) { return {this->push(Op::shr, x.id,NA,NA, bits)}; }
  116. I32 Builder::sra(I32 x, int bits) { return {this->push(Op::sra, x.id,NA,NA, bits)}; }
  117. I32 Builder::extract(I32 x, int bits, I32 y) {
  118. return {this->push(Op::extract, x.id,y.id,NA, bits)};
  119. }
  120. I32 Builder::pack(I32 x, I32 y, int bits) {
  121. return {this->push(Op::pack, x.id,y.id,NA, bits)};
  122. }
  123. I32 Builder::bytes(I32 x, int control) {
  124. return {this->push(Op::bytes, x.id,NA,NA, control)};
  125. }
  126. F32 Builder::to_f32(I32 x) { return {this->push(Op::to_f32, x.id)}; }
  127. I32 Builder::to_i32(F32 x) { return {this->push(Op::to_i32, x.id)}; }
  128. // ~~~~ Program::eval() and co. ~~~~ //
  129. // Handy references for x86-64 instruction encoding:
  130. // https://wiki.osdev.org/X86-64_Instruction_Encoding
  131. // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x64.htm
  132. // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x86.htm
  133. // http://ref.x86asm.net/coder64.html
  134. // Used for ModRM / immediate instruction encoding.
  135. static uint8_t _233(int a, int b, int c) {
  136. return (a & 3) << 6
  137. | (b & 7) << 3
  138. | (c & 7) << 0;
  139. }
  140. // ModRM byte encodes the arguments of an opcode.
  141. enum class Mod { Indirect, OneByteImm, FourByteImm, Direct };
  142. static uint8_t mod_rm(Mod mod, int reg, int rm) {
  143. return _233((int)mod, reg, rm);
  144. }
  145. #if 0
  146. // SIB byte encodes a memory address, base + (index * scale).
  147. enum class Scale { One, Two, Four, Eight };
  148. static uint8_t sib(Scale scale, int index, int base) {
  149. return _233((int)scale, index, base);
  150. }
  151. #endif
  152. // The REX prefix is used to extend most old 32-bit instructions to 64-bit.
  153. static uint8_t rex(bool W, // If set, operation is 64-bit, otherwise default, usually 32-bit.
  154. bool R, // Extra top bit to select ModRM reg, registers 8-15.
  155. bool X, // Extra top bit for SIB index register.
  156. bool B) { // Extra top bit for SIB base or ModRM rm register.
  157. return 0b01000000 // Fixed 0100 for top four bits.
  158. | (W << 3)
  159. | (R << 2)
  160. | (X << 1)
  161. | (B << 0);
  162. }
  163. // The VEX prefix extends SSE operations to AVX. Used generally, even with XMM.
  164. struct VEX {
  165. int len;
  166. uint8_t bytes[3];
  167. };
  168. static VEX vex(bool WE, // Like REX W for int operations, or opcode extension for float?
  169. bool R, // Same as REX R. Pass high bit of dst register, dst>>3.
  170. bool X, // Same as REX X.
  171. bool B, // Same as REX B. Pass y>>3 for 3-arg ops, x>>3 for 2-arg.
  172. int map, // SSE opcode map selector: 0x0f, 0x380f, 0x3a0f.
  173. int vvvv, // 4-bit second operand register. Pass our x for 3-arg ops.
  174. bool L, // Set for 256-bit ymm operations, off for 128-bit xmm.
  175. int pp) { // SSE mandatory prefix: 0x66, 0xf3, 0xf2, else none.
  176. // Pack x86 opcode map selector to 5-bit VEX encoding.
  177. map = [map]{
  178. switch (map) {
  179. case 0x0f: return 0b00001;
  180. case 0x380f: return 0b00010;
  181. case 0x3a0f: return 0b00011;
  182. // Several more cases only used by XOP / TBM.
  183. }
  184. SkASSERT(false);
  185. return 0b00000;
  186. }();
  187. // Pack mandatory SSE opcode prefix byte to 2-bit VEX encoding.
  188. pp = [pp]{
  189. switch (pp) {
  190. case 0x66: return 0b01;
  191. case 0xf3: return 0b10;
  192. case 0xf2: return 0b11;
  193. }
  194. return 0b00;
  195. }();
  196. VEX vex = {0, {0,0,0}};
  197. if (X == 0 && B == 0 && WE == 0 && map == 0b00001) {
  198. // With these conditions met, we can optionally compress VEX to 2-byte.
  199. vex.len = 2;
  200. vex.bytes[0] = 0xc5;
  201. vex.bytes[1] = (pp & 3) << 0
  202. | (L & 1) << 2
  203. | (~vvvv & 15) << 3
  204. | (~(int)R & 1) << 7;
  205. } else {
  206. // We could use this 3-byte VEX prefix all the time if we like.
  207. vex.len = 3;
  208. vex.bytes[0] = 0xc4;
  209. vex.bytes[1] = (map & 31) << 0
  210. | (~(int)B & 1) << 5
  211. | (~(int)X & 1) << 6
  212. | (~(int)R & 1) << 7;
  213. vex.bytes[2] = (pp & 3) << 0
  214. | (L & 1) << 2
  215. | (~vvvv & 15) << 3
  216. | (WE & 1) << 7;
  217. }
  218. return vex;
  219. }
  220. Assembler::Assembler(void* buf) : fCode((uint8_t*)buf), fCurr(fCode), fSize(0) {}
  221. size_t Assembler::size() const { return fSize; }
  222. void Assembler::bytes(const void* p, int n) {
  223. if (fCurr) {
  224. memcpy(fCurr, p, n);
  225. fCurr += n;
  226. }
  227. fSize += n;
  228. }
  229. void Assembler::byte(uint8_t b) { this->bytes(&b, 1); }
  230. void Assembler::word(uint32_t w) { this->bytes(&w, 4); }
  231. void Assembler::align(int mod) {
  232. while (this->size() % mod) {
  233. this->byte(0x00);
  234. }
  235. }
  236. void Assembler::vzeroupper() {
  237. this->byte(0xc5);
  238. this->byte(0xf8);
  239. this->byte(0x77);
  240. }
  241. void Assembler::ret() { this->byte(0xc3); }
  242. // Common instruction building for 64-bit opcodes with an immediate argument.
  243. void Assembler::op(int opcode, int opcode_ext, GP64 dst, int imm) {
  244. opcode |= 0b0000'0001; // low bit set for 64-bit operands
  245. opcode |= 0b1000'0000; // top bit set for instructions with any immediate
  246. int imm_bytes = 4;
  247. if (SkTFitsIn<int8_t>(imm)) {
  248. imm_bytes = 1;
  249. opcode |= 0b0000'0010; // second bit set for 8-bit immediate, else 32-bit.
  250. }
  251. this->byte(rex(1,0,0,dst>>3));
  252. this->byte(opcode);
  253. this->byte(mod_rm(Mod::Direct, opcode_ext, dst&7));
  254. this->bytes(&imm, imm_bytes);
  255. }
  256. void Assembler::add(GP64 dst, int imm) { this->op(0,0b000, dst,imm); }
  257. void Assembler::sub(GP64 dst, int imm) { this->op(0,0b101, dst,imm); }
  258. void Assembler::cmp(GP64 reg, int imm) { this->op(0,0b111, reg,imm); }
  259. void Assembler::op(int prefix, int map, int opcode, Ymm dst, Ymm x, Ymm y, bool W/*=false*/) {
  260. VEX v = vex(W, dst>>3, 0, y>>3,
  261. map, x, 1/*ymm, not xmm*/, prefix);
  262. this->bytes(v.bytes, v.len);
  263. this->byte(opcode);
  264. this->byte(mod_rm(Mod::Direct, dst&7, y&7));
  265. }
  266. void Assembler::vpaddd (Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0xfe, dst,x,y); }
  267. void Assembler::vpsubd (Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0xfa, dst,x,y); }
  268. void Assembler::vpmulld(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x40, dst,x,y); }
  269. void Assembler::vpsubw (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xf9, dst,x,y); }
  270. void Assembler::vpmullw(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xd5, dst,x,y); }
  271. void Assembler::vpand (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xdb, dst,x,y); }
  272. void Assembler::vpor (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xeb, dst,x,y); }
  273. void Assembler::vpxor (Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xef, dst,x,y); }
  274. void Assembler::vpandn(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x0f,0xdf, dst,x,y); }
  275. void Assembler::vaddps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x58, dst,x,y); }
  276. void Assembler::vsubps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x5c, dst,x,y); }
  277. void Assembler::vmulps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x59, dst,x,y); }
  278. void Assembler::vdivps(Ymm dst, Ymm x, Ymm y) { this->op(0,0x0f,0x5e, dst,x,y); }
  279. void Assembler::vfmadd132ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x98, dst,x,y); }
  280. void Assembler::vfmadd213ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0xa8, dst,x,y); }
  281. void Assembler::vfmadd231ps(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0xb8, dst,x,y); }
  282. void Assembler::vpackusdw(Ymm dst, Ymm x, Ymm y) { this->op(0x66,0x380f,0x2b, dst,x,y); }
  283. void Assembler::vpackuswb(Ymm dst, Ymm x, Ymm y) { this->op(0x66, 0x0f,0x67, dst,x,y); }
  284. // dst = x op /opcode_ext imm
  285. void Assembler::op(int prefix, int map, int opcode, int opcode_ext, Ymm dst, Ymm x, int imm) {
  286. // This is a little weird, but if we pass the opcode_ext as if it were the dst register,
  287. // the dst register as if x, and the x register as if y, all the bits end up where we want.
  288. this->op(prefix, map, opcode, (Ymm)opcode_ext,dst,x);
  289. this->byte(imm);
  290. }
  291. void Assembler::vpslld(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,6, dst,x,imm); }
  292. void Assembler::vpsrld(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,2, dst,x,imm); }
  293. void Assembler::vpsrad(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x72,4, dst,x,imm); }
  294. void Assembler::vpsrlw(Ymm dst, Ymm x, int imm) { this->op(0x66,0x0f,0x71,2, dst,x,imm); }
  295. void Assembler::vpermq(Ymm dst, Ymm x, int imm) {
  296. // A bit unusual among the instructions we use, this is 64-bit operation, so we set W.
  297. bool W = true;
  298. this->op(0x66,0x3a0f,0x00, dst,x,W);
  299. this->byte(imm);
  300. }
  301. void Assembler::vcvtdq2ps (Ymm dst, Ymm x) { this->op(0, 0x0f,0x5b, dst,x); }
  302. void Assembler::vcvttps2dq(Ymm dst, Ymm x) { this->op(0xf3,0x0f,0x5b, dst,x); }
  303. Assembler::Label Assembler::here() {
  304. return { (int)this->size(), Label::None, {} };
  305. }
  306. int Assembler::disp19(Label* l) {
  307. SkASSERT(l->kind == Label::None ||
  308. l->kind == Label::ARMDisp19);
  309. l->kind = Label::ARMDisp19;
  310. l->references.push_back(here().offset);
  311. // ARM 19-bit instruction count, from the beginning of this instruction.
  312. return (l->offset - here().offset) / 4;
  313. }
  314. int Assembler::disp32(Label* l) {
  315. SkASSERT(l->kind == Label::None ||
  316. l->kind == Label::X86Disp32);
  317. l->kind = Label::X86Disp32;
  318. l->references.push_back(here().offset);
  319. // x86 32-bit byte count, from the end of this instruction.
  320. return l->offset - (here().offset + 4);
  321. }
  322. void Assembler::op(int prefix, int map, int opcode, Ymm dst, Ymm x, Label* l) {
  323. // IP-relative addressing uses Mod::Indirect with the R/M encoded as-if rbp or r13.
  324. const int rip = rbp;
  325. VEX v = vex(0, dst>>3, 0, rip>>3,
  326. map, x, /*ymm?*/1, prefix);
  327. this->bytes(v.bytes, v.len);
  328. this->byte(opcode);
  329. this->byte(mod_rm(Mod::Indirect, dst&7, rip&7));
  330. this->word(this->disp32(l));
  331. }
  332. void Assembler::vbroadcastss(Ymm dst, Label* l) { this->op(0x66,0x380f,0x18, dst,l); }
  333. void Assembler::vpshufb(Ymm dst, Ymm x, Label* l) { this->op(0x66,0x380f,0x00, dst,x,l); }
  334. void Assembler::jump(uint8_t condition, Label* l) {
  335. // These conditional jumps can be either 2 bytes (short) or 6 bytes (near):
  336. // 7? one-byte-disp
  337. // 0F 8? four-byte-disp
  338. // We always use the near displacement to make updating labels simpler (no resizing).
  339. this->byte(0x0f);
  340. this->byte(condition);
  341. this->word(this->disp32(l));
  342. }
  343. void Assembler::je (Label* l) { this->jump(0x84, l); }
  344. void Assembler::jne(Label* l) { this->jump(0x85, l); }
  345. void Assembler::jl (Label* l) { this->jump(0x8c, l); }
  346. void Assembler::jmp(Label* l) {
  347. // Like above in jump(), we could use 8-bit displacement here, but always use 32-bit.
  348. this->byte(0xe9);
  349. this->word(this->disp32(l));
  350. }
  351. void Assembler::load_store(int prefix, int map, int opcode, Ymm ymm, GP64 ptr) {
  352. VEX v = vex(0, ymm>>3, 0, ptr>>3,
  353. map, 0, /*ymm?*/1, prefix);
  354. this->bytes(v.bytes, v.len);
  355. this->byte(opcode);
  356. this->byte(mod_rm(Mod::Indirect, ymm&7, ptr&7));
  357. }
  358. void Assembler::vmovups (Ymm dst, GP64 src) { this->load_store(0 , 0x0f,0x10, dst,src); }
  359. void Assembler::vpmovzxbd(Ymm dst, GP64 src) { this->load_store(0x66,0x380f,0x31, dst,src); }
  360. void Assembler::vmovups (GP64 dst, Ymm src) { this->load_store(0 , 0x0f,0x11, src,dst); }
  361. void Assembler::vmovq(GP64 dst, Xmm src) {
  362. int prefix = 0x66,
  363. map = 0x0f,
  364. opcode = 0xd6;
  365. VEX v = vex(0, src>>3, 0, dst>>3,
  366. map, 0, /*ymm?*/0, prefix);
  367. this->bytes(v.bytes, v.len);
  368. this->byte(opcode);
  369. this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
  370. }
  371. void Assembler::vmovd(GP64 dst, Xmm src) {
  372. int prefix = 0x66,
  373. map = 0x0f,
  374. opcode = 0x7e;
  375. VEX v = vex(0, src>>3, 0, dst>>3,
  376. map, 0, /*ymm?*/0, prefix);
  377. this->bytes(v.bytes, v.len);
  378. this->byte(opcode);
  379. this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
  380. }
  381. void Assembler::vmovd_direct(GP64 dst, Xmm src) {
  382. int prefix = 0x66,
  383. map = 0x0f,
  384. opcode = 0x7e;
  385. VEX v = vex(0, src>>3, 0, dst>>3,
  386. map, 0, /*ymm?*/0, prefix);
  387. this->bytes(v.bytes, v.len);
  388. this->byte(opcode);
  389. this->byte(mod_rm(Mod::Direct, src&7, dst&7));
  390. }
  391. void Assembler::vmovd(Xmm dst, GP64 src) {
  392. int prefix = 0x66,
  393. map = 0x0f,
  394. opcode = 0x6e;
  395. VEX v = vex(0, dst>>3, 0, src>>3,
  396. map, 0, /*ymm?*/0, prefix);
  397. this->bytes(v.bytes, v.len);
  398. this->byte(opcode);
  399. this->byte(mod_rm(Mod::Indirect, dst&7, src&7));
  400. }
  401. void Assembler::vmovd_direct(Xmm dst, GP64 src) {
  402. int prefix = 0x66,
  403. map = 0x0f,
  404. opcode = 0x6e;
  405. VEX v = vex(0, dst>>3, 0, src>>3,
  406. map, 0, /*ymm?*/0, prefix);
  407. this->bytes(v.bytes, v.len);
  408. this->byte(opcode);
  409. this->byte(mod_rm(Mod::Direct, dst&7, src&7));
  410. }
  411. void Assembler::movzbl(GP64 dst, GP64 src) {
  412. if ((dst>>3) || (src>>3)) {
  413. this->byte(rex(0,dst>>3,0,src>>3));
  414. }
  415. this->byte(0x0f);
  416. this->byte(0xb6);
  417. this->byte(mod_rm(Mod::Indirect, dst&7, src&7));
  418. }
  419. void Assembler::movb(GP64 dst, GP64 src) {
  420. if ((dst>>3) || (src>>3)) {
  421. this->byte(rex(0,src>>3,0,dst>>3));
  422. }
  423. this->byte(0x88);
  424. this->byte(mod_rm(Mod::Indirect, src&7, dst&7));
  425. }
  426. void Assembler::vpinsrb(Xmm dst, Xmm src, GP64 ptr, int imm) {
  427. int prefix = 0x66,
  428. map = 0x3a0f,
  429. opcode = 0x20;
  430. VEX v = vex(0, dst>>3, 0, ptr>>3,
  431. map, src, /*ymm?*/0, prefix);
  432. this->bytes(v.bytes, v.len);
  433. this->byte(opcode);
  434. this->byte(mod_rm(Mod::Indirect, dst&7, ptr&7));
  435. this->byte(imm);
  436. }
  437. void Assembler::vpextrb(GP64 ptr, Xmm src, int imm) {
  438. int prefix = 0x66,
  439. map = 0x3a0f,
  440. opcode = 0x14;
  441. VEX v = vex(0, src>>3, 0, ptr>>3,
  442. map, 0, /*ymm?*/0, prefix);
  443. this->bytes(v.bytes, v.len);
  444. this->byte(opcode);
  445. this->byte(mod_rm(Mod::Indirect, src&7, ptr&7));
  446. this->byte(imm);
  447. }
  448. // https://static.docs.arm.com/ddi0596/a/DDI_0596_ARM_a64_instruction_set_architecture.pdf
  449. static int operator"" _mask(unsigned long long bits) { return (1<<(int)bits)-1; }
  450. void Assembler::op(uint32_t hi, V m, uint32_t lo, V n, V d) {
  451. this->word( (hi & 11_mask) << 21
  452. | (m & 5_mask) << 16
  453. | (lo & 6_mask) << 10
  454. | (n & 5_mask) << 5
  455. | (d & 5_mask) << 0);
  456. }
  457. void Assembler::and16b(V d, V n, V m) { this->op(0b0'1'0'01110'00'1, m, 0b00011'1, n, d); }
  458. void Assembler::orr16b(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b00011'1, n, d); }
  459. void Assembler::eor16b(V d, V n, V m) { this->op(0b0'1'1'01110'00'1, m, 0b00011'1, n, d); }
  460. void Assembler::bic16b(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b00011'1, n, d); }
  461. void Assembler::add4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10000'1, n, d); }
  462. void Assembler::sub4s(V d, V n, V m) { this->op(0b0'1'1'01110'10'1, m, 0b10000'1, n, d); }
  463. void Assembler::mul4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10011'1, n, d); }
  464. void Assembler::sub8h(V d, V n, V m) { this->op(0b0'1'1'01110'01'1, m, 0b10000'1, n, d); }
  465. void Assembler::mul8h(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b10011'1, n, d); }
  466. void Assembler::fadd4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11010'1, n, d); }
  467. void Assembler::fsub4s(V d, V n, V m) { this->op(0b0'1'0'01110'1'0'1, m, 0b11010'1, n, d); }
  468. void Assembler::fmul4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11011'1, n, d); }
  469. void Assembler::fdiv4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11111'1, n, d); }
  470. void Assembler::fmla4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11001'1, n, d); }
  471. void Assembler::tbl(V d, V n, V m) { this->op(0b0'1'001110'00'0, m, 0b0'00'0'00, n, d); }
  472. void Assembler::op(uint32_t op22, int imm, V n, V d) {
  473. this->word( (op22 & 22_mask) << 10
  474. | imm << 16 // imm is embedded inside op, bit size depends on op
  475. | (n & 5_mask) << 5
  476. | (d & 5_mask) << 0);
  477. }
  478. void Assembler::sli4s(V d, V n, int imm) {
  479. this->op(0b0'1'1'011110'0100'000'01010'1, ( imm&31), n, d);
  480. }
  481. void Assembler::shl4s(V d, V n, int imm) {
  482. this->op(0b0'1'0'011110'0100'000'01010'1, ( imm&31), n, d);
  483. }
  484. void Assembler::sshr4s(V d, V n, int imm) {
  485. this->op(0b0'1'0'011110'0100'000'00'0'0'0'1, (-imm&31), n, d);
  486. }
  487. void Assembler::ushr4s(V d, V n, int imm) {
  488. this->op(0b0'1'1'011110'0100'000'00'0'0'0'1, (-imm&31), n, d);
  489. }
  490. void Assembler::ushr8h(V d, V n, int imm) {
  491. this->op(0b0'1'1'011110'0010'000'00'0'0'0'1, (-imm&15), n, d);
  492. }
  493. void Assembler::scvtf4s (V d, V n) { this->op(0b0'1'0'01110'0'0'10000'11101'10, n,d); }
  494. void Assembler::fcvtzs4s(V d, V n) { this->op(0b0'1'0'01110'1'0'10000'1101'1'10, n,d); }
  495. void Assembler::xtns2h(V d, V n) { this->op(0b0'0'0'01110'01'10000'10010'10, n,d); }
  496. void Assembler::xtnh2b(V d, V n) { this->op(0b0'0'0'01110'00'10000'10010'10, n,d); }
  497. void Assembler::uxtlb2h(V d, V n) { this->op(0b0'0'1'011110'0001'000'10100'1, n,d); }
  498. void Assembler::uxtlh2s(V d, V n) { this->op(0b0'0'1'011110'0010'000'10100'1, n,d); }
  499. void Assembler::ret(X n) {
  500. this->word(0b1101011'0'0'10'11111'0000'0'0 << 10
  501. | (n & 5_mask) << 5);
  502. }
  503. void Assembler::add(X d, X n, int imm12) {
  504. this->word(0b1'0'0'10001'00 << 22
  505. | (imm12 & 12_mask) << 10
  506. | (n & 5_mask) << 5
  507. | (d & 5_mask) << 0);
  508. }
  509. void Assembler::sub(X d, X n, int imm12) {
  510. this->word( 0b1'1'0'10001'00 << 22
  511. | (imm12 & 12_mask) << 10
  512. | (n & 5_mask) << 5
  513. | (d & 5_mask) << 0);
  514. }
  515. void Assembler::subs(X d, X n, int imm12) {
  516. this->word( 0b1'1'1'10001'00 << 22
  517. | (imm12 & 12_mask) << 10
  518. | (n & 5_mask) << 5
  519. | (d & 5_mask) << 0);
  520. }
  521. void Assembler::b(Condition cond, Label* l) {
  522. const int imm19 = this->disp19(l);
  523. this->word( 0b0101010'0 << 24
  524. | (imm19 & 19_mask) << 5
  525. | ((int)cond & 4_mask) << 0);
  526. }
  527. void Assembler::cbz(X t, Label* l) {
  528. const int imm19 = this->disp19(l);
  529. this->word( 0b1'011010'0 << 24
  530. | (imm19 & 19_mask) << 5
  531. | (t & 5_mask) << 0);
  532. }
  533. void Assembler::cbnz(X t, Label* l) {
  534. const int imm19 = this->disp19(l);
  535. this->word( 0b1'011010'1 << 24
  536. | (imm19 & 19_mask) << 5
  537. | (t & 5_mask) << 0);
  538. }
  539. void Assembler::ldrq(V dst, X src) { this->op(0b00'111'1'01'11'000000000000, src, dst); }
  540. void Assembler::ldrs(V dst, X src) { this->op(0b10'111'1'01'01'000000000000, src, dst); }
  541. void Assembler::ldrb(V dst, X src) { this->op(0b00'111'1'01'01'000000000000, src, dst); }
  542. void Assembler::strq(V src, X dst) { this->op(0b00'111'1'01'10'000000000000, dst, src); }
  543. void Assembler::strs(V src, X dst) { this->op(0b10'111'1'01'00'000000000000, dst, src); }
  544. void Assembler::strb(V src, X dst) { this->op(0b00'111'1'01'00'000000000000, dst, src); }
  545. void Assembler::ldrq(V dst, Label* l) {
  546. const int imm19 = this->disp19(l);
  547. this->word( 0b10'011'1'00 << 24
  548. | (imm19 & 19_mask) << 5
  549. | (dst & 5_mask) << 0);
  550. }
  551. void Assembler::label(Label* l) {
  552. if (fCode) {
  553. // The instructions all currently point to l->offset.
  554. // We'll want to add a delta to point them to here().
  555. int delta = here().offset - l->offset;
  556. l->offset = here().offset;
  557. if (l->kind == Label::ARMDisp19) {
  558. for (int ref : l->references) {
  559. // ref points to a 32-bit instruction with 19-bit displacement in instructions.
  560. uint32_t inst;
  561. memcpy(&inst, fCode + ref, 4);
  562. // [ 8 bits to preserve] [ 19 bit signed displacement ] [ 5 bits to preserve ]
  563. int disp = (int)(inst << 8) >> 13;
  564. disp += delta/4; // delta is in bytes, we want instructions.
  565. // Put it all back together, preserving the high 8 bits and low 5.
  566. inst = ((disp << 5) & (19_mask << 5))
  567. | ((inst ) & ~(19_mask << 5));
  568. memcpy(fCode + ref, &inst, 4);
  569. }
  570. }
  571. if (l->kind == Label::X86Disp32) {
  572. for (int ref : l->references) {
  573. // ref points to a 32-bit displacement in bytes.
  574. int disp;
  575. memcpy(&disp, fCode + ref, 4);
  576. disp += delta;
  577. memcpy(fCode + ref, &disp, 4);
  578. }
  579. }
  580. }
  581. }
  582. void Program::eval(int n, void* args[]) const {
  583. const int nargs = (int)fStrides.size();
  584. if (fJITBuf) {
  585. switch (nargs) {
  586. case 0: return ((void(*)(int ))fJITBuf)(n );
  587. case 1: return ((void(*)(int, void* ))fJITBuf)(n, args[0] );
  588. case 2: return ((void(*)(int, void*, void*))fJITBuf)(n, args[0], args[1]);
  589. default: SkUNREACHABLE; // TODO
  590. }
  591. }
  592. // We'll operate in SIMT style, knocking off K-size chunks from n while possible.
  593. constexpr int K = 16;
  594. using I32 = skvx::Vec<K, int>;
  595. using F32 = skvx::Vec<K, float>;
  596. using U32 = skvx::Vec<K, uint32_t>;
  597. using U8 = skvx::Vec<K, uint8_t>;
  598. using I16x2 = skvx::Vec<2*K, int16_t>;
  599. using U16x2 = skvx::Vec<2*K, uint16_t>;
  600. union Slot {
  601. I32 i32;
  602. U32 u32;
  603. F32 f32;
  604. };
  605. Slot few_regs[16];
  606. std::unique_ptr<char[]> many_regs;
  607. Slot* regs = few_regs;
  608. if (fRegs > (int)SK_ARRAY_COUNT(few_regs)) {
  609. // Annoyingly we can't trust that malloc() or new will work with Slot because
  610. // the skvx::Vec types may have alignment greater than what they provide.
  611. // We'll overallocate one extra register so we can align manually.
  612. many_regs.reset(new char[ sizeof(Slot) * (fRegs + 1) ]);
  613. uintptr_t addr = (uintptr_t)many_regs.get();
  614. addr += alignof(Slot) -
  615. (addr & (alignof(Slot) - 1));
  616. SkASSERT((addr & (alignof(Slot) - 1)) == 0);
  617. regs = (Slot*)addr;
  618. }
  619. auto r = [&](Reg id) -> Slot& {
  620. SkASSERT(0 <= id && id < fRegs);
  621. return regs[id];
  622. };
  623. auto arg = [&](int ix) {
  624. SkASSERT(0 <= ix && ix < nargs);
  625. return args[ix];
  626. };
  627. // Step each argument pointer ahead by its stride a number of times.
  628. auto step_args = [&](int times) {
  629. // Looping by marching pointers until *arg == nullptr helps the
  630. // compiler to keep this loop scalar. Otherwise it'd create a
  631. // rather large and useless autovectorized version.
  632. void** arg = args;
  633. const int* stride = fStrides.data();
  634. for (; *arg; arg++, stride++) {
  635. *arg = (void*)( (char*)*arg + times * *stride );
  636. }
  637. SkASSERT(arg == args + nargs);
  638. };
  639. int start = 0,
  640. stride;
  641. for ( ; n > 0; start = fLoop, n -= stride, step_args(stride)) {
  642. stride = n >= K ? K : 1;
  643. for (int i = start; i < (int)fInstructions.size(); i++) {
  644. Instruction inst = fInstructions[i];
  645. // d = op(x,y,z/imm)
  646. Reg d = inst.d,
  647. x = inst.x,
  648. y = inst.y,
  649. z = inst.z;
  650. int imm = inst.imm;
  651. // Ops that interact with memory need to know whether we're stride=1 or K,
  652. // but all non-memory ops can run the same code no matter the stride.
  653. switch (2*(int)inst.op + (stride == K ? 1 : 0)) {
  654. #define STRIDE_1(op) case 2*(int)op
  655. #define STRIDE_K(op) case 2*(int)op + 1
  656. STRIDE_1(Op::store8 ): memcpy(arg(imm), &r(x).i32, 1); break;
  657. STRIDE_1(Op::store32): memcpy(arg(imm), &r(x).i32, 4); break;
  658. STRIDE_K(Op::store8 ): skvx::cast<uint8_t>(r(x).i32).store(arg(imm)); break;
  659. STRIDE_K(Op::store32): (r(x).i32).store(arg(imm)); break;
  660. STRIDE_1(Op::load8 ): r(d).i32 = 0; memcpy(&r(d).i32, arg(imm), 1); break;
  661. STRIDE_1(Op::load32): r(d).i32 = 0; memcpy(&r(d).i32, arg(imm), 4); break;
  662. STRIDE_K(Op::load8 ): r(d).i32= skvx::cast<int>(U8 ::Load(arg(imm))); break;
  663. STRIDE_K(Op::load32): r(d).i32= I32::Load(arg(imm)) ; break;
  664. #undef STRIDE_1
  665. #undef STRIDE_K
  666. // Ops that don't interact with memory should never care about the stride.
  667. #define CASE(op) case 2*(int)op: /*fallthrough*/ case 2*(int)op+1
  668. CASE(Op::splat): r(d).i32 = imm; break;
  669. CASE(Op::add_f32): r(d).f32 = r(x).f32 + r(y).f32; break;
  670. CASE(Op::sub_f32): r(d).f32 = r(x).f32 - r(y).f32; break;
  671. CASE(Op::mul_f32): r(d).f32 = r(x).f32 * r(y).f32; break;
  672. CASE(Op::div_f32): r(d).f32 = r(x).f32 / r(y).f32; break;
  673. CASE(Op::mad_f32): r(d).f32 = r(x).f32 * r(y).f32 + r(z).f32; break;
  674. CASE(Op::add_i32): r(d).i32 = r(x).i32 + r(y).i32; break;
  675. CASE(Op::sub_i32): r(d).i32 = r(x).i32 - r(y).i32; break;
  676. CASE(Op::mul_i32): r(d).i32 = r(x).i32 * r(y).i32; break;
  677. CASE(Op::sub_i16x2):
  678. r(d).i32 = skvx::bit_pun<I32>(skvx::bit_pun<I16x2>(r(x).i32) -
  679. skvx::bit_pun<I16x2>(r(y).i32) ); break;
  680. CASE(Op::mul_i16x2):
  681. r(d).i32 = skvx::bit_pun<I32>(skvx::bit_pun<I16x2>(r(x).i32) *
  682. skvx::bit_pun<I16x2>(r(y).i32) ); break;
  683. CASE(Op::shr_i16x2):
  684. r(d).i32 = skvx::bit_pun<I32>(skvx::bit_pun<U16x2>(r(x).i32) >> imm);
  685. break;
  686. CASE(Op::bit_and): r(d).i32 = r(x).i32 & r(y).i32; break;
  687. CASE(Op::bit_or ): r(d).i32 = r(x).i32 | r(y).i32; break;
  688. CASE(Op::bit_xor): r(d).i32 = r(x).i32 ^ r(y).i32; break;
  689. CASE(Op::bit_clear): r(d).i32 = r(x).i32 & ~r(y).i32; break;
  690. CASE(Op::shl): r(d).i32 = r(x).i32 << imm; break;
  691. CASE(Op::sra): r(d).i32 = r(x).i32 >> imm; break;
  692. CASE(Op::shr): r(d).u32 = r(x).u32 >> imm; break;
  693. CASE(Op::extract): r(d).u32 = (r(x).u32 >> imm) & r(y).u32; break;
  694. CASE(Op::pack): r(d).u32 = r(x).u32 | (r(y).u32 << imm); break;
  695. CASE(Op::bytes): {
  696. const U32 table[] = {
  697. 0,
  698. (r(x).u32 ) & 0xff,
  699. (r(x).u32 >> 8) & 0xff,
  700. (r(x).u32 >> 16) & 0xff,
  701. (r(x).u32 >> 24) & 0xff,
  702. };
  703. r(d).u32 = table[(imm >> 0) & 0xf] << 0
  704. | table[(imm >> 4) & 0xf] << 8
  705. | table[(imm >> 8) & 0xf] << 16
  706. | table[(imm >> 12) & 0xf] << 24;
  707. } break;
  708. CASE(Op::to_f32): r(d).f32 = skvx::cast<float>(r(x).i32); break;
  709. CASE(Op::to_i32): r(d).i32 = skvx::cast<int> (r(x).f32); break;
  710. #undef CASE
  711. }
  712. }
  713. }
  714. }
  715. void Program::dropJIT() {
  716. #if defined(SKVM_JIT)
  717. if (fJITBuf) {
  718. munmap(fJITBuf, fJITSize);
  719. }
  720. #else
  721. SkASSERT(fJITBuf == nullptr);
  722. #endif
  723. fJITBuf = nullptr;
  724. fJITSize = 0;
  725. }
  726. Program::~Program() { this->dropJIT(); }
  727. Program::Program(Program&& other) {
  728. fInstructions = std::move(other.fInstructions);
  729. fRegs = other.fRegs;
  730. fLoop = other.fLoop;
  731. fStrides = std::move(other.fStrides);
  732. std::swap(fJITBuf , other.fJITBuf);
  733. std::swap(fJITSize , other.fJITSize);
  734. }
  735. Program& Program::operator=(Program&& other) {
  736. fInstructions = std::move(other.fInstructions);
  737. fRegs = other.fRegs;
  738. fLoop = other.fLoop;
  739. fStrides = std::move(other.fStrides);
  740. std::swap(fJITBuf , other.fJITBuf);
  741. std::swap(fJITSize , other.fJITSize);
  742. return *this;
  743. }
  744. Program::Program(const std::vector<Builder::Instruction>& instructions,
  745. const std::vector<int>& strides,
  746. const char* debug_name) : fStrides(strides) {
  747. this->setupInterpreter(instructions);
  748. #if defined(SKVM_JIT)
  749. this->setupJIT(instructions, debug_name);
  750. #endif
  751. }
  752. // Translate Builder::Instructions to Program::Instructions used by the interpreter.
  753. void Program::setupInterpreter(const std::vector<Builder::Instruction>& instructions) {
  754. // Register each instruction is assigned to.
  755. std::vector<Reg> reg(instructions.size());
  756. // This next bit is a bit more complicated than strictly necessary;
  757. // we could just assign every live instruction to its own register.
  758. //
  759. // But recycling registers in the loop is fairly cheap, and good practice
  760. // for the JITs where minimizing register pressure really is important.
  761. // (Also helps minimize unit test diffs.)
  762. // Assign a register to each live hoisted instruction. We'll never recycle these.
  763. fRegs = 0;
  764. int live_instructions = 0;
  765. for (Val id = 0; id < (Val)instructions.size(); id++) {
  766. const Builder::Instruction& inst = instructions[id];
  767. if (inst.death != 0 && inst.hoist) {
  768. live_instructions++;
  769. reg[id] = fRegs++;
  770. }
  771. }
  772. // Assign registers to each live loop instruction, recycling them when we can.
  773. std::vector<Reg> avail;
  774. for (Val id = 0; id < (Val)instructions.size(); id++) {
  775. const Builder::Instruction& inst = instructions[id];
  776. if (inst.death != 0 && !inst.hoist) {
  777. live_instructions++;
  778. /// If an instruction's input is no longer live, we can recycle its register.
  779. auto maybe_recycle_register = [&](Val input) {
  780. // If this is a real input and it's lifetime ends at this instruction,
  781. // we can recycle the register it's occupying.
  782. if (input != NA
  783. && !instructions[input].hoist
  784. && instructions[input].death == id) {
  785. avail.push_back(reg[input]);
  786. }
  787. };
  788. // Take care to not recycle the same register twice.
  789. if (true ) { maybe_recycle_register(inst.x); }
  790. if (inst.y != inst.x ) { maybe_recycle_register(inst.y); }
  791. if (inst.z != inst.x && inst.z != inst.y) { maybe_recycle_register(inst.z); }
  792. // Allocate a register if we have to, preferring to reuse anything available.
  793. if (avail.empty()) {
  794. reg[id] = fRegs++;
  795. } else {
  796. reg[id] = avail.back();
  797. avail.pop_back();
  798. }
  799. }
  800. }
  801. // Translate Builder::Instructions to Program::Instructions by mapping values to
  802. // registers. This will be two passes, first hoisted instructions, then inside the loop.
  803. // The loop begins at the fLoop'th Instruction.
  804. fLoop = 0;
  805. fInstructions.reserve(live_instructions);
  806. // Add a dummy mapping for the N/A sentinel Val to any arbitrary register
  807. // so lookups don't have to know which arguments are used by which Ops.
  808. auto lookup_register = [&](Val id) {
  809. return id == NA ? (Reg)0
  810. : reg[id];
  811. };
  812. auto push_instruction = [&](Val id, const Builder::Instruction& inst) {
  813. Program::Instruction pinst{
  814. inst.op,
  815. lookup_register(id),
  816. lookup_register(inst.x),
  817. lookup_register(inst.y),
  818. {lookup_register(inst.z)},
  819. };
  820. if (inst.z == NA) { pinst.imm = inst.imm; }
  821. fInstructions.push_back(pinst);
  822. };
  823. for (Val id = 0; id < (Val)instructions.size(); id++) {
  824. const Builder::Instruction& inst = instructions[id];
  825. if (inst.death != 0 && inst.hoist) {
  826. push_instruction(id, inst);
  827. fLoop++;
  828. }
  829. }
  830. for (Val id = 0; id < (Val)instructions.size(); id++) {
  831. const Builder::Instruction& inst = instructions[id];
  832. if (inst.death != 0 && !inst.hoist) {
  833. push_instruction(id, inst);
  834. }
  835. }
  836. }
  837. #if defined(SKVM_JIT)
  838. // Just so happens that we can translate the immediate control for our bytes() op
  839. // to a single 128-bit mask that can be consumed by both AVX2 vpshufb and NEON tbl!
  840. static void bytes_control(int imm, int mask[4]) {
  841. auto nibble_to_vpshufb = [](uint8_t n) -> uint8_t {
  842. // 0 -> 0xff, Fill with zero
  843. // 1 -> 0x00, Select byte 0
  844. // 2 -> 0x01, " 1
  845. // 3 -> 0x02, " 2
  846. // 4 -> 0x03, " 3
  847. return n - 1;
  848. };
  849. uint8_t control[] = {
  850. nibble_to_vpshufb( (imm >> 0) & 0xf ),
  851. nibble_to_vpshufb( (imm >> 4) & 0xf ),
  852. nibble_to_vpshufb( (imm >> 8) & 0xf ),
  853. nibble_to_vpshufb( (imm >> 12) & 0xf ),
  854. };
  855. for (int i = 0; i < 4; i++) {
  856. mask[i] = (int)control[0] << 0
  857. | (int)control[1] << 8
  858. | (int)control[2] << 16
  859. | (int)control[3] << 24;
  860. // Update each byte that refers to a byte index by 4 to
  861. // point into the next 32-bit lane, but leave any 0xff
  862. // that fills with zero alone.
  863. control[0] += control[0] == 0xff ? 0 : 4;
  864. control[1] += control[1] == 0xff ? 0 : 4;
  865. control[2] += control[2] == 0xff ? 0 : 4;
  866. control[3] += control[3] == 0xff ? 0 : 4;
  867. }
  868. }
  869. bool Program::jit(const std::vector<Builder::Instruction>& instructions,
  870. const bool hoist,
  871. Assembler* a) const {
  872. using A = Assembler;
  873. #if defined(__x86_64__)
  874. if (!SkCpu::Supports(SkCpu::HSW)) {
  875. return false;
  876. }
  877. A::GP64 N = A::rdi,
  878. arg[] = { A::rsi, A::rdx, A::rcx, A::r8, A::r9 };
  879. // All 16 ymm registers are available to use.
  880. using Reg = A::Ymm;
  881. uint32_t avail = 0xffff;
  882. #elif defined(__aarch64__)
  883. A::X N = A::x0,
  884. arg[] = { A::x1, A::x2, A::x3, A::x4, A::x5, A::x6, A::x7 };
  885. // We can use v0-v7 and v16-v31 freely; we'd need to preseve v8-v15.
  886. using Reg = A::V;
  887. uint32_t avail = 0xffff00ff;
  888. #endif
  889. if (SK_ARRAY_COUNT(arg) < fStrides.size()) {
  890. return false;
  891. }
  892. auto hoisted = [&](Val id) { return hoist && instructions[id].hoist; };
  893. std::vector<Reg> r(instructions.size());
  894. struct LabelAndReg {
  895. A::Label label;
  896. Reg reg;
  897. };
  898. SkTHashMap<int, LabelAndReg> splats,
  899. bytes_masks;
  900. auto warmup = [&](Val id) {
  901. const Builder::Instruction& inst = instructions[id];
  902. if (inst.death == 0) {
  903. return true;
  904. }
  905. Op op = inst.op;
  906. int imm = inst.imm;
  907. switch (op) {
  908. default: break;
  909. case Op::splat: if (!splats.find(imm)) { splats.set(imm, {}); }
  910. break;
  911. case Op::bytes: if (!bytes_masks.find(imm)) {
  912. bytes_masks.set(imm, {});
  913. if (hoist) {
  914. // vpshufb can always work with the mask from memory,
  915. // but it helps to hoist the mask to a register for tbl.
  916. #if defined(__aarch64__)
  917. LabelAndReg* entry = bytes_masks.find(imm);
  918. if (int found = __builtin_ffs(avail)) {
  919. entry->reg = (Reg)(found-1);
  920. avail ^= 1 << entry->reg;
  921. a->ldrq(entry->reg, &entry->label);
  922. } else {
  923. return false;
  924. }
  925. #endif
  926. }
  927. }
  928. break;
  929. }
  930. return true;
  931. };
  932. auto emit = [&](Val id, bool scalar) {
  933. const Builder::Instruction& inst = instructions[id];
  934. // No need to emit dead code instructions that produce values that are never used.
  935. if (inst.death == 0) {
  936. return true;
  937. }
  938. Op op = inst.op;
  939. Val x = inst.x,
  940. y = inst.y,
  941. z = inst.z;
  942. int imm = inst.imm;
  943. // Most (but not all) ops create an output value and need a register to hold it, dst.
  944. // We track each instruction's dst in r[] so we can thread it through as an input
  945. // to any future instructions needing that value.
  946. //
  947. // And some ops may need a temporary scratch register, tmp. Some need both tmp and dst.
  948. //
  949. // tmp and dst are very similar and can and will often be assigned the same register,
  950. // but tmp may never alias any of the instructions's inputs, while dst may when this
  951. // instruction consumes that input, i.e. if the input reaches its end of life here.
  952. //
  953. // We'll assign both registers lazily to keep register pressure as low as possible.
  954. bool tmp_is_set = false,
  955. dst_is_set = false;
  956. Reg tmp_reg = (Reg)0; // This initial value won't matter... anything legal is fine.
  957. bool ok = true; // Set to false if we need to assign a register and none's available.
  958. // First lock in how to choose tmp if we need to based on the registers
  959. // available before this instruction, not including any of its input registers.
  960. auto tmp = [&,avail/*important, closing over avail's current value*/]{
  961. if (!tmp_is_set) {
  962. tmp_is_set = true;
  963. if (int found = __builtin_ffs(avail)) {
  964. // This is a scratch register just for this op,
  965. // so we leave it marked available for future ops.
  966. tmp_reg = (Reg)(found - 1);
  967. } else {
  968. // We needed a tmp register but couldn't find one available. :'(
  969. // This will cause emit() to return false, in turn causing jit() to fail.
  970. ok = false;
  971. }
  972. }
  973. return tmp_reg;
  974. };
  975. // Now make available any registers that are consumed by this instruction.
  976. // (The register pool we can pick dst from is >= the pool for tmp, adding any of these.)
  977. if (x != NA && instructions[x].death == id && !hoisted(x)) { avail |= 1 << r[x]; }
  978. if (y != NA && instructions[y].death == id && !hoisted(y)) { avail |= 1 << r[y]; }
  979. if (z != NA && instructions[z].death == id && !hoisted(z)) { avail |= 1 << r[z]; }
  980. // set_dst() and dst() will work read/write with this perhaps-just-updated avail.
  981. // Some ops may decide dst on their own to best fit the instruction (see Op::mad_f32).
  982. auto set_dst = [&](Reg reg){
  983. SkASSERT(dst_is_set == false);
  984. dst_is_set = true;
  985. SkASSERT(avail & (1<<reg));
  986. avail ^= 1<<reg;
  987. r[id] = reg;
  988. };
  989. // Thanks to AVX and NEON's 3-argument instruction sets,
  990. // most ops can use any register as dst.
  991. auto dst = [&]{
  992. if (!dst_is_set) {
  993. if (int found = __builtin_ffs(avail)) {
  994. set_dst((Reg)(found-1));
  995. } else {
  996. // Same deal as with tmp... all the registers are occupied. Time to fail!
  997. ok = false;
  998. }
  999. }
  1000. return r[id];
  1001. };
  1002. // Because we use the same logic to pick an arbitrary dst and to pick tmp,
  1003. // and we know that tmp will never overlap any of the inputs, `dst() == tmp()`
  1004. // is a simple idiom to check that the destination does not overlap any of the inputs.
  1005. // Sometimes we can use this knowledge to do better instruction selection.
  1006. // Ok! Keep in mind that we haven't assigned tmp or dst yet,
  1007. // just laid out hooks for how to do so if we need them, depending on the instruction.
  1008. //
  1009. // Now let's actually assemble the instruction!
  1010. switch (op) {
  1011. #if defined(__x86_64__)
  1012. case Op::store8: if (scalar) { a->vpextrb (arg[imm], (A::Xmm)r[x], 0); }
  1013. else { a->vpackusdw(tmp(), r[x], r[x]);
  1014. a->vpermq (tmp(), tmp(), 0xd8);
  1015. a->vpackuswb(tmp(), tmp(), tmp());
  1016. a->vmovq (arg[imm], (A::Xmm)tmp()); }
  1017. break;
  1018. // TODO: the else case is a situation where we could use r[x]
  1019. // as tmp if it's available... we don't need it after the
  1020. // first instruction.
  1021. case Op::store32: if (scalar) { a->vmovd (arg[imm], (A::Xmm)r[x]); }
  1022. else { a->vmovups(arg[imm], r[x]); }
  1023. break;
  1024. case Op::load8: if (scalar) {
  1025. a->vpxor (dst(), dst(), dst());
  1026. a->vpinsrb((A::Xmm)dst(), (A::Xmm)dst(), arg[imm], 0);
  1027. } else {
  1028. a->vpmovzxbd(dst(), arg[imm]);
  1029. } break;
  1030. case Op::load32: if (scalar) { a->vmovd ((A::Xmm)dst(), arg[imm]); }
  1031. else { a->vmovups( dst(), arg[imm]); }
  1032. break;
  1033. case Op::splat: a->vbroadcastss(dst(), &splats.find(imm)->label);
  1034. break;
  1035. // TODO: many of these instructions have variants that
  1036. // can read one of their arugments from 32-byte memory
  1037. // instead of a register. Find a way to avoid needing
  1038. // to splat most* constants out at all?
  1039. // (*Might work for x - 255 but not 255 - x, so will
  1040. // always need to be able to splat to a register.)
  1041. case Op::add_f32: a->vaddps(dst(), r[x], r[y]); break;
  1042. case Op::sub_f32: a->vsubps(dst(), r[x], r[y]); break;
  1043. case Op::mul_f32: a->vmulps(dst(), r[x], r[y]); break;
  1044. case Op::div_f32: a->vdivps(dst(), r[x], r[y]); break;
  1045. case Op::mad_f32:
  1046. if (avail & (1<<r[x])) { set_dst(r[x]); a->vfmadd132ps(r[x], r[z], r[y]); }
  1047. else if (avail & (1<<r[y])) { set_dst(r[y]); a->vfmadd213ps(r[y], r[x], r[z]); }
  1048. else if (avail & (1<<r[z])) { set_dst(r[z]); a->vfmadd231ps(r[z], r[x], r[y]); }
  1049. else { SkASSERT(dst() == tmp());
  1050. // TODO: vpor -> vmovdqa here?
  1051. a->vpor (dst(),r[x], r[x]);
  1052. a->vfmadd132ps(dst(),r[z], r[y]); }
  1053. break;
  1054. case Op::add_i32: a->vpaddd (dst(), r[x], r[y]); break;
  1055. case Op::sub_i32: a->vpsubd (dst(), r[x], r[y]); break;
  1056. case Op::mul_i32: a->vpmulld(dst(), r[x], r[y]); break;
  1057. case Op::sub_i16x2: a->vpsubw (dst(), r[x], r[y]); break;
  1058. case Op::mul_i16x2: a->vpmullw(dst(), r[x], r[y]); break;
  1059. case Op::shr_i16x2: a->vpsrlw (dst(), r[x], imm); break;
  1060. case Op::bit_and: a->vpand (dst(), r[x], r[y]); break;
  1061. case Op::bit_or : a->vpor (dst(), r[x], r[y]); break;
  1062. case Op::bit_xor: a->vpxor (dst(), r[x], r[y]); break;
  1063. case Op::bit_clear: a->vpandn(dst(), r[y], r[x]); break; // N.B. Y then X.
  1064. case Op::shl: a->vpslld(dst(), r[x], imm); break;
  1065. case Op::shr: a->vpsrld(dst(), r[x], imm); break;
  1066. case Op::sra: a->vpsrad(dst(), r[x], imm); break;
  1067. case Op::extract: if (imm == 0) { a->vpand (dst(), r[x], r[y]); }
  1068. else { a->vpsrld(tmp(), r[x], imm);
  1069. a->vpand (dst(), tmp(), r[y]); }
  1070. break;
  1071. case Op::pack: a->vpslld(tmp(), r[y], imm);
  1072. a->vpor (dst(), tmp(), r[x]);
  1073. break;
  1074. case Op::to_f32: a->vcvtdq2ps (dst(), r[x]); break;
  1075. case Op::to_i32: a->vcvttps2dq(dst(), r[x]); break;
  1076. case Op::bytes: a->vpshufb(dst(), r[x], &bytes_masks.find(imm)->label);
  1077. break;
  1078. #elif defined(__aarch64__)
  1079. case Op::store8: a->xtns2h(tmp(), r[x]);
  1080. a->xtnh2b(tmp(), tmp());
  1081. if (scalar) { a->strb (tmp(), arg[imm]); }
  1082. else { a->strs (tmp(), arg[imm]); }
  1083. break;
  1084. // TODO: another case where it'd be okay to alias r[x] and tmp if r[x] dies here.
  1085. case Op::store32: if (scalar) { a->strs(r[x], arg[imm]); }
  1086. else { a->strq(r[x], arg[imm]); }
  1087. break;
  1088. case Op::load8: if (scalar) { a->ldrb(tmp(), arg[imm]); }
  1089. else { a->ldrs(tmp(), arg[imm]); }
  1090. a->uxtlb2h(tmp(), tmp());
  1091. a->uxtlh2s(dst(), tmp());
  1092. break;
  1093. case Op::load32: if (scalar) { a->ldrs(dst(), arg[imm]); }
  1094. else { a->ldrq(dst(), arg[imm]); }
  1095. break;
  1096. case Op::splat: a->ldrq(dst(), &splats.find(imm)->label);
  1097. break;
  1098. // TODO: If we hoist these, pack 4 values in each register
  1099. // and use vector/lane operations, cutting the register
  1100. // pressure cost of hoisting by 4?
  1101. case Op::add_f32: a->fadd4s(dst(), r[x], r[y]); break;
  1102. case Op::sub_f32: a->fsub4s(dst(), r[x], r[y]); break;
  1103. case Op::mul_f32: a->fmul4s(dst(), r[x], r[y]); break;
  1104. case Op::div_f32: a->fdiv4s(dst(), r[x], r[y]); break;
  1105. case Op::mad_f32:
  1106. if (avail & (1<<r[z])) { set_dst(r[z]); a->fmla4s( r[z], r[x], r[y]); }
  1107. else { a->orr16b(tmp(), r[z], r[z]);
  1108. a->fmla4s(tmp(), r[x], r[y]);
  1109. if(dst() != tmp()) { a->orr16b(dst(), tmp(), tmp()); } }
  1110. break;
  1111. case Op::add_i32: a->add4s(dst(), r[x], r[y]); break;
  1112. case Op::sub_i32: a->sub4s(dst(), r[x], r[y]); break;
  1113. case Op::mul_i32: a->mul4s(dst(), r[x], r[y]); break;
  1114. case Op::sub_i16x2: a->sub8h (dst(), r[x], r[y]); break;
  1115. case Op::mul_i16x2: a->mul8h (dst(), r[x], r[y]); break;
  1116. case Op::shr_i16x2: a->ushr8h(dst(), r[x], imm); break;
  1117. case Op::bit_and: a->and16b(dst(), r[x], r[y]); break;
  1118. case Op::bit_or : a->orr16b(dst(), r[x], r[y]); break;
  1119. case Op::bit_xor: a->eor16b(dst(), r[x], r[y]); break;
  1120. case Op::bit_clear: a->bic16b(dst(), r[x], r[y]); break;
  1121. case Op::shl: a-> shl4s(dst(), r[x], imm); break;
  1122. case Op::shr: a->ushr4s(dst(), r[x], imm); break;
  1123. case Op::sra: a->sshr4s(dst(), r[x], imm); break;
  1124. case Op::extract: if (imm) { a->ushr4s(tmp(), r[x], imm);
  1125. a->and16b(dst(), tmp(), r[y]); }
  1126. else { a->and16b(dst(), r[x], r[y]); }
  1127. break;
  1128. case Op::pack:
  1129. if (avail & (1<<r[x])) { set_dst(r[x]); a->sli4s ( r[x], r[y], imm); }
  1130. else { a->shl4s (tmp(), r[y], imm);
  1131. a->orr16b(dst(), tmp(), r[x]); }
  1132. break;
  1133. case Op::to_f32: a->scvtf4s (dst(), r[x]); break;
  1134. case Op::to_i32: a->fcvtzs4s(dst(), r[x]); break;
  1135. case Op::bytes: if (hoist) { a->tbl (dst(), r[x], bytes_masks.find(imm)->reg); }
  1136. else { a->ldrq(tmp(), &bytes_masks.find(imm)->label);
  1137. a->tbl (dst(), r[x], tmp()); }
  1138. break;
  1139. #endif
  1140. }
  1141. // Calls to tmp() or dst() might have flipped this false from its default true state.
  1142. return ok;
  1143. };
  1144. #if defined(__x86_64__)
  1145. const int K = 8;
  1146. auto jump_if_less = [&](A::Label* l) { a->jl (l); };
  1147. auto jump = [&](A::Label* l) { a->jmp(l); };
  1148. auto add = [&](A::GP64 gp, int imm) { a->add(gp, imm); };
  1149. auto sub = [&](A::GP64 gp, int imm) { a->sub(gp, imm); };
  1150. auto exit = [&]{ a->vzeroupper(); a->ret(); };
  1151. #elif defined(__aarch64__)
  1152. const int K = 4;
  1153. auto jump_if_less = [&](A::Label* l) { a->blt(l); };
  1154. auto jump = [&](A::Label* l) { a->b (l); };
  1155. auto add = [&](A::X gp, int imm) { a->add(gp, gp, imm); };
  1156. auto sub = [&](A::X gp, int imm) { a->sub(gp, gp, imm); };
  1157. auto exit = [&]{ a->ret(A::x30); };
  1158. #endif
  1159. A::Label body,
  1160. tail,
  1161. done;
  1162. for (Val id = 0; id < (Val)instructions.size(); id++) {
  1163. if (!warmup(id)) {
  1164. return false;
  1165. }
  1166. if (hoisted(id) && !emit(id, /*scalar=*/false)) {
  1167. return false;
  1168. }
  1169. }
  1170. a->label(&body);
  1171. {
  1172. a->cmp(N, K);
  1173. jump_if_less(&tail);
  1174. for (Val id = 0; id < (Val)instructions.size(); id++) {
  1175. if (!hoisted(id) && !emit(id, /*scalar=*/false)) {
  1176. return false;
  1177. }
  1178. }
  1179. for (int i = 0; i < (int)fStrides.size(); i++) {
  1180. add(arg[i], K*fStrides[i]);
  1181. }
  1182. sub(N, K);
  1183. jump(&body);
  1184. }
  1185. a->label(&tail);
  1186. {
  1187. a->cmp(N, 1);
  1188. jump_if_less(&done);
  1189. for (Val id = 0; id < (Val)instructions.size(); id++) {
  1190. if (!hoisted(id) && !emit(id, /*scalar=*/true)) {
  1191. return false;
  1192. }
  1193. }
  1194. for (int i = 0; i < (int)fStrides.size(); i++) {
  1195. add(arg[i], 1*fStrides[i]);
  1196. }
  1197. sub(N, 1);
  1198. jump(&tail);
  1199. }
  1200. a->label(&done);
  1201. {
  1202. exit();
  1203. }
  1204. bytes_masks.foreach([&](int imm, LabelAndReg* entry) {
  1205. // One 16-byte pattern for ARM tbl, that same pattern twice for x86-64 vpshufb.
  1206. #if defined(__x86_64__)
  1207. a->align(32);
  1208. #elif defined(__aarch64__)
  1209. a->align(4);
  1210. #endif
  1211. a->label(&entry->label);
  1212. int mask[4];
  1213. bytes_control(imm, mask);
  1214. a->bytes(mask, sizeof(mask));
  1215. #if defined(__x86_64__)
  1216. a->bytes(mask, sizeof(mask));
  1217. #endif
  1218. });
  1219. splats.foreach([&](int imm, LabelAndReg* entry) {
  1220. // vbroadcastss 4 bytes on x86-64, or simply load 16-bytes on aarch64.
  1221. a->align(4);
  1222. a->label(&entry->label);
  1223. a->word(imm);
  1224. #if defined(__aarch64__)
  1225. a->word(imm);
  1226. a->word(imm);
  1227. a->word(imm);
  1228. #endif
  1229. });
  1230. return true;
  1231. }
  1232. void Program::setupJIT(const std::vector<Builder::Instruction>& instructions,
  1233. const char* debug_name) {
  1234. // Assemble with no buffer to determine a.size(), the number of bytes we'll assemble.
  1235. Assembler a{nullptr};
  1236. // First try allowing code hoisting (faster code)
  1237. // then again without if that fails (lower register pressure).
  1238. bool hoist = true;
  1239. if (!this->jit(instructions, hoist, &a)) {
  1240. hoist = false;
  1241. if (!this->jit(instructions, hoist, &a)) {
  1242. return;
  1243. }
  1244. }
  1245. // Allocate space that we can remap as executable.
  1246. const size_t page = sysconf(_SC_PAGESIZE);
  1247. fJITSize = ((a.size() + page - 1) / page) * page; // mprotect works at page granularity.
  1248. fJITBuf = mmap(nullptr,fJITSize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1,0);
  1249. // Assemble the program for real.
  1250. a = Assembler{fJITBuf};
  1251. SkAssertResult(this->jit(instructions, hoist, &a));
  1252. SkASSERT(a.size() <= fJITSize);
  1253. // Remap as executable, and flush caches on platforms that need that.
  1254. mprotect(fJITBuf, fJITSize, PROT_READ|PROT_EXEC);
  1255. __builtin___clear_cache((char*)fJITBuf,
  1256. (char*)fJITBuf + fJITSize);
  1257. #if defined(SKVM_PERF_DUMPS)
  1258. this->dumpJIT(debug_name, a.size());
  1259. #endif
  1260. }
  1261. #endif
  1262. #if defined(SKVM_PERF_DUMPS)
  1263. void Program::dumpJIT(const char* debug_name, size_t size) const {
  1264. #if 0 && defined(__aarch64__)
  1265. if (debug_name) {
  1266. SkDebugf("\n%s:", debug_name);
  1267. }
  1268. // cat | llvm-mc -arch aarch64 -disassemble
  1269. auto cur = (const uint8_t*)fJITBuf;
  1270. for (int i = 0; i < (int)size; i++) {
  1271. if (i % 4 == 0) {
  1272. SkDebugf("\n");
  1273. }
  1274. SkDebugf("0x%02x ", *cur++);
  1275. }
  1276. SkDebugf("\n");
  1277. #endif
  1278. // We're doing some really stateful things below so one thread at a time please...
  1279. static SkSpinlock dump_lock;
  1280. SkAutoSpinlock lock(dump_lock);
  1281. auto fnv1a = [](const void* vbuf, size_t n) {
  1282. uint32_t hash = 2166136261;
  1283. for (auto buf = (const uint8_t*)vbuf; n --> 0; buf++) {
  1284. hash ^= *buf;
  1285. hash *= 16777619;
  1286. }
  1287. return hash;
  1288. };
  1289. char name[64];
  1290. uint32_t hash = fnv1a(fJITBuf, size);
  1291. if (debug_name) {
  1292. sprintf(name, "skvm-jit-%s", debug_name);
  1293. } else {
  1294. sprintf(name, "skvm-jit-%u", hash);
  1295. }
  1296. // Create a jit-<pid>.dump file that we can `perf inject -j` into a
  1297. // perf.data captured with `perf record -k 1`, letting us see each
  1298. // JIT'd Program as if a function named skvm-jit-<hash>. E.g.
  1299. //
  1300. // ninja -C out nanobench
  1301. // perf record -k 1 out/nanobench -m SkVM_4096_I32\$
  1302. // perf inject -j -i perf.data -o perf.data.jit
  1303. // perf report -i perf.data.jit
  1304. //
  1305. // Running `perf inject -j` will also dump an .so for each JIT'd
  1306. // program, named jitted-<pid>-<hash>.so.
  1307. //
  1308. // https://lwn.net/Articles/638566/
  1309. // https://v8.dev/docs/linux-perf
  1310. // https://cs.chromium.org/chromium/src/v8/src/diagnostics/perf-jit.cc
  1311. // https://lore.kernel.org/patchwork/patch/622240/
  1312. auto timestamp_ns = []() -> uint64_t {
  1313. // It's important to use CLOCK_MONOTONIC here so that perf can
  1314. // correlate our timestamps with those captured by `perf record
  1315. // -k 1`. That's also what `-k 1` does, by the way, tell perf
  1316. // record to use CLOCK_MONOTONIC.
  1317. struct timespec ts;
  1318. clock_gettime(CLOCK_MONOTONIC, &ts);
  1319. return ts.tv_sec * (uint64_t)1e9 + ts.tv_nsec;
  1320. };
  1321. // We'll open the jit-<pid>.dump file and write a small header once,
  1322. // and just leave it open forever because we're lazy.
  1323. static FILE* jitdump = [&]{
  1324. // Must map as w+ for the mmap() call below to work.
  1325. char path[64];
  1326. sprintf(path, "jit-%d.dump", getpid());
  1327. FILE* f = fopen(path, "w+");
  1328. // Calling mmap() on the file adds a "hey they mmap()'d this" record to
  1329. // the perf.data file that will point `perf inject -j` at this log file.
  1330. // Kind of a strange way to tell `perf inject` where the file is...
  1331. void* marker = mmap(nullptr, sysconf(_SC_PAGESIZE),
  1332. PROT_READ|PROT_EXEC, MAP_PRIVATE,
  1333. fileno(f), /*offset=*/0);
  1334. SkASSERT_RELEASE(marker != MAP_FAILED);
  1335. // Like never calling fclose(f), we'll also just always leave marker mmap()'d.
  1336. #if defined(__x86_64__)
  1337. const uint32_t elf_mach = 62;
  1338. #elif defined(__aarch64__)
  1339. const uint32_t elf_mach = 183;
  1340. #endif
  1341. struct Header {
  1342. uint32_t magic, version, header_size, elf_mach, reserved, pid;
  1343. uint64_t timestamp_us, flags;
  1344. } header = {
  1345. 0x4A695444, 1, sizeof(Header), elf_mach, 0, (uint32_t)getpid(),
  1346. timestamp_ns() / 1000, 0,
  1347. };
  1348. fwrite(&header, sizeof(header), 1, f);
  1349. return f;
  1350. }();
  1351. struct CodeLoad {
  1352. uint32_t event_type, event_size;
  1353. uint64_t timestamp_ns;
  1354. uint32_t pid, tid;
  1355. uint64_t vma/*???*/, code_addr, code_size, id;
  1356. } load = {
  1357. 0/*code load*/, (uint32_t)(sizeof(CodeLoad) + strlen(name) + 1 + size),
  1358. timestamp_ns(),
  1359. (uint32_t)getpid(), (uint32_t)SkGetThreadID(),
  1360. (uint64_t)fJITBuf, (uint64_t)fJITBuf, size, hash,
  1361. };
  1362. // Write the header, the JIT'd function name, and the JIT'd code itself.
  1363. fwrite(&load, sizeof(load), 1, jitdump);
  1364. fwrite(name, 1, strlen(name), jitdump);
  1365. fwrite("\0", 1, 1, jitdump);
  1366. fwrite(fJITBuf, 1, size, jitdump);
  1367. }
  1368. #endif
  1369. } // namespace skvm