kvm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Authors:
  7. * Alexander Graf <agraf@suse.de>
  8. */
  9. #include <linux/kvm_host.h>
  10. #include <linux/init.h>
  11. #include <linux/export.h>
  12. #include <linux/kmemleak.h>
  13. #include <linux/kvm_para.h>
  14. #include <linux/slab.h>
  15. #include <linux/of.h>
  16. #include <linux/pagemap.h>
  17. #include <asm/reg.h>
  18. #include <asm/sections.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/disassemble.h>
  21. #include <asm/ppc-opcode.h>
  22. #include <asm/epapr_hcalls.h>
  23. #define KVM_MAGIC_PAGE (-4096L)
  24. #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
  25. #define KVM_INST_LWZ 0x80000000
  26. #define KVM_INST_STW 0x90000000
  27. #define KVM_INST_LD 0xe8000000
  28. #define KVM_INST_STD 0xf8000000
  29. #define KVM_INST_NOP 0x60000000
  30. #define KVM_INST_B 0x48000000
  31. #define KVM_INST_B_MASK 0x03ffffff
  32. #define KVM_INST_B_MAX 0x01ffffff
  33. #define KVM_INST_LI 0x38000000
  34. #define KVM_MASK_RT 0x03e00000
  35. #define KVM_RT_30 0x03c00000
  36. #define KVM_MASK_RB 0x0000f800
  37. #define KVM_INST_MFMSR 0x7c0000a6
  38. #define SPR_FROM 0
  39. #define SPR_TO 0x100
  40. #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
  41. (((sprn) & 0x1f) << 16) | \
  42. (((sprn) & 0x3e0) << 6) | \
  43. (moveto))
  44. #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
  45. #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
  46. #define KVM_INST_TLBSYNC 0x7c00046c
  47. #define KVM_INST_MTMSRD_L0 0x7c000164
  48. #define KVM_INST_MTMSRD_L1 0x7c010164
  49. #define KVM_INST_MTMSR 0x7c000124
  50. #define KVM_INST_WRTEE 0x7c000106
  51. #define KVM_INST_WRTEEI_0 0x7c000146
  52. #define KVM_INST_WRTEEI_1 0x7c008146
  53. #define KVM_INST_MTSRIN 0x7c0001e4
  54. static bool kvm_patching_worked = true;
  55. extern char kvm_tmp[];
  56. extern char kvm_tmp_end[];
  57. static int kvm_tmp_index;
  58. static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
  59. {
  60. *inst = new_inst;
  61. flush_icache_range((ulong)inst, (ulong)inst + 4);
  62. }
  63. static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
  64. {
  65. #ifdef CONFIG_64BIT
  66. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  67. #else
  68. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
  69. #endif
  70. }
  71. static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
  72. {
  73. #ifdef CONFIG_64BIT
  74. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  75. #else
  76. kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
  77. #endif
  78. }
  79. static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
  80. {
  81. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
  82. }
  83. static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
  84. {
  85. #ifdef CONFIG_64BIT
  86. kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
  87. #else
  88. kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
  89. #endif
  90. }
  91. static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
  92. {
  93. kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
  94. }
  95. static void __init kvm_patch_ins_nop(u32 *inst)
  96. {
  97. kvm_patch_ins(inst, KVM_INST_NOP);
  98. }
  99. static void __init kvm_patch_ins_b(u32 *inst, int addr)
  100. {
  101. #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
  102. /* On relocatable kernels interrupts handlers and our code
  103. can be in different regions, so we don't patch them */
  104. if ((ulong)inst < (ulong)&__end_interrupts)
  105. return;
  106. #endif
  107. kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
  108. }
  109. static u32 * __init kvm_alloc(int len)
  110. {
  111. u32 *p;
  112. if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
  113. printk(KERN_ERR "KVM: No more space (%d + %d)\n",
  114. kvm_tmp_index, len);
  115. kvm_patching_worked = false;
  116. return NULL;
  117. }
  118. p = (void*)&kvm_tmp[kvm_tmp_index];
  119. kvm_tmp_index += len;
  120. return p;
  121. }
  122. extern u32 kvm_emulate_mtmsrd_branch_offs;
  123. extern u32 kvm_emulate_mtmsrd_reg_offs;
  124. extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
  125. extern u32 kvm_emulate_mtmsrd_len;
  126. extern u32 kvm_emulate_mtmsrd[];
  127. static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
  128. {
  129. u32 *p;
  130. int distance_start;
  131. int distance_end;
  132. ulong next_inst;
  133. p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
  134. if (!p)
  135. return;
  136. /* Find out where we are and put everything there */
  137. distance_start = (ulong)p - (ulong)inst;
  138. next_inst = ((ulong)inst + 4);
  139. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
  140. /* Make sure we only write valid b instructions */
  141. if (distance_start > KVM_INST_B_MAX) {
  142. kvm_patching_worked = false;
  143. return;
  144. }
  145. /* Modify the chunk to fit the invocation */
  146. memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
  147. p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
  148. switch (get_rt(rt)) {
  149. case 30:
  150. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  151. magic_var(scratch2), KVM_RT_30);
  152. break;
  153. case 31:
  154. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  155. magic_var(scratch1), KVM_RT_30);
  156. break;
  157. default:
  158. p[kvm_emulate_mtmsrd_reg_offs] |= rt;
  159. break;
  160. }
  161. p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
  162. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
  163. /* Patch the invocation */
  164. kvm_patch_ins_b(inst, distance_start);
  165. }
  166. extern u32 kvm_emulate_mtmsr_branch_offs;
  167. extern u32 kvm_emulate_mtmsr_reg1_offs;
  168. extern u32 kvm_emulate_mtmsr_reg2_offs;
  169. extern u32 kvm_emulate_mtmsr_orig_ins_offs;
  170. extern u32 kvm_emulate_mtmsr_len;
  171. extern u32 kvm_emulate_mtmsr[];
  172. static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
  173. {
  174. u32 *p;
  175. int distance_start;
  176. int distance_end;
  177. ulong next_inst;
  178. p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
  179. if (!p)
  180. return;
  181. /* Find out where we are and put everything there */
  182. distance_start = (ulong)p - (ulong)inst;
  183. next_inst = ((ulong)inst + 4);
  184. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
  185. /* Make sure we only write valid b instructions */
  186. if (distance_start > KVM_INST_B_MAX) {
  187. kvm_patching_worked = false;
  188. return;
  189. }
  190. /* Modify the chunk to fit the invocation */
  191. memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
  192. p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
  193. /* Make clobbered registers work too */
  194. switch (get_rt(rt)) {
  195. case 30:
  196. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  197. magic_var(scratch2), KVM_RT_30);
  198. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  199. magic_var(scratch2), KVM_RT_30);
  200. break;
  201. case 31:
  202. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  203. magic_var(scratch1), KVM_RT_30);
  204. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  205. magic_var(scratch1), KVM_RT_30);
  206. break;
  207. default:
  208. p[kvm_emulate_mtmsr_reg1_offs] |= rt;
  209. p[kvm_emulate_mtmsr_reg2_offs] |= rt;
  210. break;
  211. }
  212. p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
  213. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
  214. /* Patch the invocation */
  215. kvm_patch_ins_b(inst, distance_start);
  216. }
  217. #ifdef CONFIG_BOOKE
  218. extern u32 kvm_emulate_wrtee_branch_offs;
  219. extern u32 kvm_emulate_wrtee_reg_offs;
  220. extern u32 kvm_emulate_wrtee_orig_ins_offs;
  221. extern u32 kvm_emulate_wrtee_len;
  222. extern u32 kvm_emulate_wrtee[];
  223. static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
  224. {
  225. u32 *p;
  226. int distance_start;
  227. int distance_end;
  228. ulong next_inst;
  229. p = kvm_alloc(kvm_emulate_wrtee_len * 4);
  230. if (!p)
  231. return;
  232. /* Find out where we are and put everything there */
  233. distance_start = (ulong)p - (ulong)inst;
  234. next_inst = ((ulong)inst + 4);
  235. distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
  236. /* Make sure we only write valid b instructions */
  237. if (distance_start > KVM_INST_B_MAX) {
  238. kvm_patching_worked = false;
  239. return;
  240. }
  241. /* Modify the chunk to fit the invocation */
  242. memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
  243. p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
  244. if (imm_one) {
  245. p[kvm_emulate_wrtee_reg_offs] =
  246. KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
  247. } else {
  248. /* Make clobbered registers work too */
  249. switch (get_rt(rt)) {
  250. case 30:
  251. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  252. magic_var(scratch2), KVM_RT_30);
  253. break;
  254. case 31:
  255. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  256. magic_var(scratch1), KVM_RT_30);
  257. break;
  258. default:
  259. p[kvm_emulate_wrtee_reg_offs] |= rt;
  260. break;
  261. }
  262. }
  263. p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
  264. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
  265. /* Patch the invocation */
  266. kvm_patch_ins_b(inst, distance_start);
  267. }
  268. extern u32 kvm_emulate_wrteei_0_branch_offs;
  269. extern u32 kvm_emulate_wrteei_0_len;
  270. extern u32 kvm_emulate_wrteei_0[];
  271. static void __init kvm_patch_ins_wrteei_0(u32 *inst)
  272. {
  273. u32 *p;
  274. int distance_start;
  275. int distance_end;
  276. ulong next_inst;
  277. p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
  278. if (!p)
  279. return;
  280. /* Find out where we are and put everything there */
  281. distance_start = (ulong)p - (ulong)inst;
  282. next_inst = ((ulong)inst + 4);
  283. distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
  284. /* Make sure we only write valid b instructions */
  285. if (distance_start > KVM_INST_B_MAX) {
  286. kvm_patching_worked = false;
  287. return;
  288. }
  289. memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
  290. p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
  291. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
  292. /* Patch the invocation */
  293. kvm_patch_ins_b(inst, distance_start);
  294. }
  295. #endif
  296. #ifdef CONFIG_PPC_BOOK3S_32
  297. extern u32 kvm_emulate_mtsrin_branch_offs;
  298. extern u32 kvm_emulate_mtsrin_reg1_offs;
  299. extern u32 kvm_emulate_mtsrin_reg2_offs;
  300. extern u32 kvm_emulate_mtsrin_orig_ins_offs;
  301. extern u32 kvm_emulate_mtsrin_len;
  302. extern u32 kvm_emulate_mtsrin[];
  303. static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
  304. {
  305. u32 *p;
  306. int distance_start;
  307. int distance_end;
  308. ulong next_inst;
  309. p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
  310. if (!p)
  311. return;
  312. /* Find out where we are and put everything there */
  313. distance_start = (ulong)p - (ulong)inst;
  314. next_inst = ((ulong)inst + 4);
  315. distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
  316. /* Make sure we only write valid b instructions */
  317. if (distance_start > KVM_INST_B_MAX) {
  318. kvm_patching_worked = false;
  319. return;
  320. }
  321. /* Modify the chunk to fit the invocation */
  322. memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
  323. p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
  324. p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
  325. p[kvm_emulate_mtsrin_reg2_offs] |= rt;
  326. p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
  327. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
  328. /* Patch the invocation */
  329. kvm_patch_ins_b(inst, distance_start);
  330. }
  331. #endif
  332. static void __init kvm_map_magic_page(void *data)
  333. {
  334. u32 *features = data;
  335. ulong in[8] = {0};
  336. ulong out[8];
  337. in[0] = KVM_MAGIC_PAGE;
  338. in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
  339. epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
  340. *features = out[0];
  341. }
  342. static void __init kvm_check_ins(u32 *inst, u32 features)
  343. {
  344. u32 _inst = *inst;
  345. u32 inst_no_rt = _inst & ~KVM_MASK_RT;
  346. u32 inst_rt = _inst & KVM_MASK_RT;
  347. switch (inst_no_rt) {
  348. /* Loads */
  349. case KVM_INST_MFMSR:
  350. kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
  351. break;
  352. case KVM_INST_MFSPR(SPRN_SPRG0):
  353. kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
  354. break;
  355. case KVM_INST_MFSPR(SPRN_SPRG1):
  356. kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
  357. break;
  358. case KVM_INST_MFSPR(SPRN_SPRG2):
  359. kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
  360. break;
  361. case KVM_INST_MFSPR(SPRN_SPRG3):
  362. kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
  363. break;
  364. case KVM_INST_MFSPR(SPRN_SRR0):
  365. kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
  366. break;
  367. case KVM_INST_MFSPR(SPRN_SRR1):
  368. kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
  369. break;
  370. #ifdef CONFIG_BOOKE
  371. case KVM_INST_MFSPR(SPRN_DEAR):
  372. #else
  373. case KVM_INST_MFSPR(SPRN_DAR):
  374. #endif
  375. kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
  376. break;
  377. case KVM_INST_MFSPR(SPRN_DSISR):
  378. kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
  379. break;
  380. #ifdef CONFIG_PPC_BOOK3E_MMU
  381. case KVM_INST_MFSPR(SPRN_MAS0):
  382. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  383. kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
  384. break;
  385. case KVM_INST_MFSPR(SPRN_MAS1):
  386. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  387. kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
  388. break;
  389. case KVM_INST_MFSPR(SPRN_MAS2):
  390. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  391. kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
  392. break;
  393. case KVM_INST_MFSPR(SPRN_MAS3):
  394. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  395. kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
  396. break;
  397. case KVM_INST_MFSPR(SPRN_MAS4):
  398. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  399. kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
  400. break;
  401. case KVM_INST_MFSPR(SPRN_MAS6):
  402. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  403. kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
  404. break;
  405. case KVM_INST_MFSPR(SPRN_MAS7):
  406. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  407. kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
  408. break;
  409. #endif /* CONFIG_PPC_BOOK3E_MMU */
  410. case KVM_INST_MFSPR(SPRN_SPRG4):
  411. #ifdef CONFIG_BOOKE
  412. case KVM_INST_MFSPR(SPRN_SPRG4R):
  413. #endif
  414. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  415. kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
  416. break;
  417. case KVM_INST_MFSPR(SPRN_SPRG5):
  418. #ifdef CONFIG_BOOKE
  419. case KVM_INST_MFSPR(SPRN_SPRG5R):
  420. #endif
  421. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  422. kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
  423. break;
  424. case KVM_INST_MFSPR(SPRN_SPRG6):
  425. #ifdef CONFIG_BOOKE
  426. case KVM_INST_MFSPR(SPRN_SPRG6R):
  427. #endif
  428. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  429. kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
  430. break;
  431. case KVM_INST_MFSPR(SPRN_SPRG7):
  432. #ifdef CONFIG_BOOKE
  433. case KVM_INST_MFSPR(SPRN_SPRG7R):
  434. #endif
  435. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  436. kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
  437. break;
  438. #ifdef CONFIG_BOOKE
  439. case KVM_INST_MFSPR(SPRN_ESR):
  440. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  441. kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
  442. break;
  443. #endif
  444. case KVM_INST_MFSPR(SPRN_PIR):
  445. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  446. kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
  447. break;
  448. /* Stores */
  449. case KVM_INST_MTSPR(SPRN_SPRG0):
  450. kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
  451. break;
  452. case KVM_INST_MTSPR(SPRN_SPRG1):
  453. kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
  454. break;
  455. case KVM_INST_MTSPR(SPRN_SPRG2):
  456. kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
  457. break;
  458. case KVM_INST_MTSPR(SPRN_SPRG3):
  459. kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
  460. break;
  461. case KVM_INST_MTSPR(SPRN_SRR0):
  462. kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
  463. break;
  464. case KVM_INST_MTSPR(SPRN_SRR1):
  465. kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
  466. break;
  467. #ifdef CONFIG_BOOKE
  468. case KVM_INST_MTSPR(SPRN_DEAR):
  469. #else
  470. case KVM_INST_MTSPR(SPRN_DAR):
  471. #endif
  472. kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
  473. break;
  474. case KVM_INST_MTSPR(SPRN_DSISR):
  475. kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
  476. break;
  477. #ifdef CONFIG_PPC_BOOK3E_MMU
  478. case KVM_INST_MTSPR(SPRN_MAS0):
  479. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  480. kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
  481. break;
  482. case KVM_INST_MTSPR(SPRN_MAS1):
  483. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  484. kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
  485. break;
  486. case KVM_INST_MTSPR(SPRN_MAS2):
  487. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  488. kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
  489. break;
  490. case KVM_INST_MTSPR(SPRN_MAS3):
  491. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  492. kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
  493. break;
  494. case KVM_INST_MTSPR(SPRN_MAS4):
  495. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  496. kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
  497. break;
  498. case KVM_INST_MTSPR(SPRN_MAS6):
  499. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  500. kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
  501. break;
  502. case KVM_INST_MTSPR(SPRN_MAS7):
  503. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  504. kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
  505. break;
  506. #endif /* CONFIG_PPC_BOOK3E_MMU */
  507. case KVM_INST_MTSPR(SPRN_SPRG4):
  508. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  509. kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
  510. break;
  511. case KVM_INST_MTSPR(SPRN_SPRG5):
  512. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  513. kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
  514. break;
  515. case KVM_INST_MTSPR(SPRN_SPRG6):
  516. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  517. kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
  518. break;
  519. case KVM_INST_MTSPR(SPRN_SPRG7):
  520. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  521. kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
  522. break;
  523. #ifdef CONFIG_BOOKE
  524. case KVM_INST_MTSPR(SPRN_ESR):
  525. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  526. kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
  527. break;
  528. #endif
  529. /* Nops */
  530. case KVM_INST_TLBSYNC:
  531. kvm_patch_ins_nop(inst);
  532. break;
  533. /* Rewrites */
  534. case KVM_INST_MTMSRD_L1:
  535. kvm_patch_ins_mtmsrd(inst, inst_rt);
  536. break;
  537. case KVM_INST_MTMSR:
  538. case KVM_INST_MTMSRD_L0:
  539. kvm_patch_ins_mtmsr(inst, inst_rt);
  540. break;
  541. #ifdef CONFIG_BOOKE
  542. case KVM_INST_WRTEE:
  543. kvm_patch_ins_wrtee(inst, inst_rt, 0);
  544. break;
  545. #endif
  546. }
  547. switch (inst_no_rt & ~KVM_MASK_RB) {
  548. #ifdef CONFIG_PPC_BOOK3S_32
  549. case KVM_INST_MTSRIN:
  550. if (features & KVM_MAGIC_FEAT_SR) {
  551. u32 inst_rb = _inst & KVM_MASK_RB;
  552. kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
  553. }
  554. break;
  555. #endif
  556. }
  557. switch (_inst) {
  558. #ifdef CONFIG_BOOKE
  559. case KVM_INST_WRTEEI_0:
  560. kvm_patch_ins_wrteei_0(inst);
  561. break;
  562. case KVM_INST_WRTEEI_1:
  563. kvm_patch_ins_wrtee(inst, 0, 1);
  564. break;
  565. #endif
  566. }
  567. }
  568. extern u32 kvm_template_start[];
  569. extern u32 kvm_template_end[];
  570. static void __init kvm_use_magic_page(void)
  571. {
  572. u32 *p;
  573. u32 *start, *end;
  574. u32 features;
  575. /* Tell the host to map the magic page to -4096 on all CPUs */
  576. on_each_cpu(kvm_map_magic_page, &features, 1);
  577. /* Quick self-test to see if the mapping works */
  578. if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
  579. kvm_patching_worked = false;
  580. return;
  581. }
  582. /* Now loop through all code and find instructions */
  583. start = (void*)_stext;
  584. end = (void*)_etext;
  585. /*
  586. * Being interrupted in the middle of patching would
  587. * be bad for SPRG4-7, which KVM can't keep in sync
  588. * with emulated accesses because reads don't trap.
  589. */
  590. local_irq_disable();
  591. for (p = start; p < end; p++) {
  592. /* Avoid patching the template code */
  593. if (p >= kvm_template_start && p < kvm_template_end) {
  594. p = kvm_template_end - 1;
  595. continue;
  596. }
  597. kvm_check_ins(p, features);
  598. }
  599. local_irq_enable();
  600. printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
  601. kvm_patching_worked ? "worked" : "failed");
  602. }
  603. static int __init kvm_guest_init(void)
  604. {
  605. if (!kvm_para_available())
  606. return 0;
  607. if (!epapr_paravirt_enabled)
  608. return 0;
  609. if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
  610. kvm_use_magic_page();
  611. #ifdef CONFIG_PPC_BOOK3S_64
  612. /* Enable napping */
  613. powersave_nap = 1;
  614. #endif
  615. return 0;
  616. }
  617. postcore_initcall(kvm_guest_init);