book3s_32_mmu_host.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. */
  8. #include <linux/kvm_host.h>
  9. #include <asm/kvm_ppc.h>
  10. #include <asm/kvm_book3s.h>
  11. #include <asm/book3s/32/mmu-hash.h>
  12. #include <asm/machdep.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/hw_irq.h>
  15. #include "book3s.h"
  16. /* #define DEBUG_MMU */
  17. /* #define DEBUG_SR */
  18. #ifdef DEBUG_MMU
  19. #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  20. #else
  21. #define dprintk_mmu(a, ...) do { } while(0)
  22. #endif
  23. #ifdef DEBUG_SR
  24. #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  25. #else
  26. #define dprintk_sr(a, ...) do { } while(0)
  27. #endif
  28. #if PAGE_SHIFT != 12
  29. #error Unknown page size
  30. #endif
  31. #ifdef CONFIG_SMP
  32. #error XXX need to grab mmu_hash_lock
  33. #endif
  34. #ifdef CONFIG_PTE_64BIT
  35. #error Only 32 bit pages are supported for now
  36. #endif
  37. static ulong htab;
  38. static u32 htabmask;
  39. void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  40. {
  41. volatile u32 *pteg;
  42. /* Remove from host HTAB */
  43. pteg = (u32*)pte->slot;
  44. pteg[0] = 0;
  45. /* And make sure it's gone from the TLB too */
  46. asm volatile ("sync");
  47. asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
  48. asm volatile ("sync");
  49. asm volatile ("tlbsync");
  50. }
  51. /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  52. * a hash, so we don't waste cycles on looping */
  53. static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  54. {
  55. return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
  56. ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
  57. ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
  58. ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
  59. ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
  60. ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
  61. ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
  62. ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
  63. }
  64. static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  65. {
  66. struct kvmppc_sid_map *map;
  67. u16 sid_map_mask;
  68. if (kvmppc_get_msr(vcpu) & MSR_PR)
  69. gvsid |= VSID_PR;
  70. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  71. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  72. if (map->guest_vsid == gvsid) {
  73. dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
  74. gvsid, map->host_vsid);
  75. return map;
  76. }
  77. map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  78. if (map->guest_vsid == gvsid) {
  79. dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
  80. gvsid, map->host_vsid);
  81. return map;
  82. }
  83. dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
  84. return NULL;
  85. }
  86. static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
  87. bool primary)
  88. {
  89. u32 page, hash;
  90. ulong pteg = htab;
  91. page = (eaddr & ~ESID_MASK) >> 12;
  92. hash = ((vsid ^ page) << 6);
  93. if (!primary)
  94. hash = ~hash;
  95. hash &= htabmask;
  96. pteg |= hash;
  97. dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
  98. htab, hash, htabmask, pteg);
  99. return (u32*)pteg;
  100. }
  101. extern char etext[];
  102. int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
  103. bool iswrite)
  104. {
  105. kvm_pfn_t hpaddr;
  106. u64 vpn;
  107. u64 vsid;
  108. struct kvmppc_sid_map *map;
  109. volatile u32 *pteg;
  110. u32 eaddr = orig_pte->eaddr;
  111. u32 pteg0, pteg1;
  112. register int rr = 0;
  113. bool primary = false;
  114. bool evict = false;
  115. struct hpte_cache *pte;
  116. int r = 0;
  117. bool writable;
  118. /* Get host physical address for gpa */
  119. hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
  120. if (is_error_noslot_pfn(hpaddr)) {
  121. printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
  122. orig_pte->raddr);
  123. r = -EINVAL;
  124. goto out;
  125. }
  126. hpaddr <<= PAGE_SHIFT;
  127. /* and write the mapping ea -> hpa into the pt */
  128. vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
  129. map = find_sid_vsid(vcpu, vsid);
  130. if (!map) {
  131. kvmppc_mmu_map_segment(vcpu, eaddr);
  132. map = find_sid_vsid(vcpu, vsid);
  133. }
  134. BUG_ON(!map);
  135. vsid = map->host_vsid;
  136. vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
  137. ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
  138. next_pteg:
  139. if (rr == 16) {
  140. primary = !primary;
  141. evict = true;
  142. rr = 0;
  143. }
  144. pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
  145. /* not evicting yet */
  146. if (!evict && (pteg[rr] & PTE_V)) {
  147. rr += 2;
  148. goto next_pteg;
  149. }
  150. dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
  151. dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
  152. dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
  153. dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
  154. dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
  155. dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
  156. dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
  157. dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
  158. dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
  159. pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
  160. (primary ? 0 : PTE_SEC);
  161. pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
  162. if (orig_pte->may_write && writable) {
  163. pteg1 |= PP_RWRW;
  164. mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
  165. } else {
  166. pteg1 |= PP_RWRX;
  167. }
  168. if (orig_pte->may_execute)
  169. kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
  170. local_irq_disable();
  171. if (pteg[rr]) {
  172. pteg[rr] = 0;
  173. asm volatile ("sync");
  174. }
  175. pteg[rr + 1] = pteg1;
  176. pteg[rr] = pteg0;
  177. asm volatile ("sync");
  178. local_irq_enable();
  179. dprintk_mmu("KVM: new PTEG: %p\n", pteg);
  180. dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
  181. dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
  182. dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
  183. dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
  184. dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
  185. dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
  186. dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
  187. dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
  188. /* Now tell our Shadow PTE code about the new page */
  189. pte = kvmppc_mmu_hpte_cache_next(vcpu);
  190. if (!pte) {
  191. kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
  192. r = -EAGAIN;
  193. goto out;
  194. }
  195. dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
  196. orig_pte->may_write ? 'w' : '-',
  197. orig_pte->may_execute ? 'x' : '-',
  198. orig_pte->eaddr, (ulong)pteg, vpn,
  199. orig_pte->vpage, hpaddr);
  200. pte->slot = (ulong)&pteg[rr];
  201. pte->host_vpn = vpn;
  202. pte->pte = *orig_pte;
  203. pte->pfn = hpaddr >> PAGE_SHIFT;
  204. kvmppc_mmu_hpte_cache_map(vcpu, pte);
  205. kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
  206. out:
  207. return r;
  208. }
  209. void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
  210. {
  211. kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
  212. }
  213. static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
  214. {
  215. struct kvmppc_sid_map *map;
  216. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  217. u16 sid_map_mask;
  218. static int backwards_map = 0;
  219. if (kvmppc_get_msr(vcpu) & MSR_PR)
  220. gvsid |= VSID_PR;
  221. /* We might get collisions that trap in preceding order, so let's
  222. map them differently */
  223. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  224. if (backwards_map)
  225. sid_map_mask = SID_MAP_MASK - sid_map_mask;
  226. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  227. /* Make sure we're taking the other map next time */
  228. backwards_map = !backwards_map;
  229. /* Uh-oh ... out of mappings. Let's flush! */
  230. if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
  231. vcpu_book3s->vsid_next = 0;
  232. memset(vcpu_book3s->sid_map, 0,
  233. sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
  234. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  235. kvmppc_mmu_flush_segments(vcpu);
  236. }
  237. map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
  238. vcpu_book3s->vsid_next++;
  239. map->guest_vsid = gvsid;
  240. map->valid = true;
  241. return map;
  242. }
  243. int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
  244. {
  245. u32 esid = eaddr >> SID_SHIFT;
  246. u64 gvsid;
  247. u32 sr;
  248. struct kvmppc_sid_map *map;
  249. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  250. int r = 0;
  251. if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
  252. /* Invalidate an entry */
  253. svcpu->sr[esid] = SR_INVALID;
  254. r = -ENOENT;
  255. goto out;
  256. }
  257. map = find_sid_vsid(vcpu, gvsid);
  258. if (!map)
  259. map = create_sid_map(vcpu, gvsid);
  260. map->guest_esid = esid;
  261. sr = map->host_vsid | SR_KP;
  262. svcpu->sr[esid] = sr;
  263. dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
  264. out:
  265. svcpu_put(svcpu);
  266. return r;
  267. }
  268. void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
  269. {
  270. int i;
  271. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  272. dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
  273. for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
  274. svcpu->sr[i] = SR_INVALID;
  275. svcpu_put(svcpu);
  276. }
  277. void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
  278. {
  279. int i;
  280. kvmppc_mmu_hpte_destroy(vcpu);
  281. preempt_disable();
  282. for (i = 0; i < SID_CONTEXTS; i++)
  283. __destroy_context(to_book3s(vcpu)->context_id[i]);
  284. preempt_enable();
  285. }
  286. /* From mm/mmu_context_hash32.c */
  287. #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
  288. int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
  289. {
  290. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  291. int err;
  292. ulong sdr1;
  293. int i;
  294. int j;
  295. for (i = 0; i < SID_CONTEXTS; i++) {
  296. err = __init_new_context();
  297. if (err < 0)
  298. goto init_fail;
  299. vcpu3s->context_id[i] = err;
  300. /* Remember context id for this combination */
  301. for (j = 0; j < 16; j++)
  302. vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
  303. }
  304. vcpu3s->vsid_next = 0;
  305. /* Remember where the HTAB is */
  306. asm ( "mfsdr1 %0" : "=r"(sdr1) );
  307. htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
  308. htab = (ulong)__va(sdr1 & 0xffff0000);
  309. kvmppc_mmu_hpte_init(vcpu);
  310. return 0;
  311. init_fail:
  312. for (j = 0; j < i; j++) {
  313. if (!vcpu3s->context_id[j])
  314. continue;
  315. __destroy_context(to_book3s(vcpu)->context_id[j]);
  316. }
  317. return -1;
  318. }