book3s_64_vio_hv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  5. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  6. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/string.h>
  10. #include <linux/kvm.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/highmem.h>
  13. #include <linux/gfp.h>
  14. #include <linux/slab.h>
  15. #include <linux/hugetlb.h>
  16. #include <linux/list.h>
  17. #include <linux/stringify.h>
  18. #include <asm/kvm_ppc.h>
  19. #include <asm/kvm_book3s.h>
  20. #include <asm/book3s/64/mmu-hash.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/hvcall.h>
  23. #include <asm/synch.h>
  24. #include <asm/ppc-opcode.h>
  25. #include <asm/udbg.h>
  26. #include <asm/iommu.h>
  27. #include <asm/tce.h>
  28. #include <asm/pte-walk.h>
  29. #ifdef CONFIG_BUG
  30. #define WARN_ON_ONCE_RM(condition) ({ \
  31. static bool __section(".data.unlikely") __warned; \
  32. int __ret_warn_once = !!(condition); \
  33. \
  34. if (unlikely(__ret_warn_once && !__warned)) { \
  35. __warned = true; \
  36. pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
  37. __stringify(condition), \
  38. __func__, __LINE__); \
  39. dump_stack(); \
  40. } \
  41. unlikely(__ret_warn_once); \
  42. })
  43. #else
  44. #define WARN_ON_ONCE_RM(condition) ({ \
  45. int __ret_warn_on = !!(condition); \
  46. unlikely(__ret_warn_on); \
  47. })
  48. #endif
  49. /*
  50. * Finds a TCE table descriptor by LIOBN.
  51. *
  52. * WARNING: This will be called in real or virtual mode on HV KVM and virtual
  53. * mode on PR KVM
  54. */
  55. struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
  56. unsigned long liobn)
  57. {
  58. struct kvmppc_spapr_tce_table *stt;
  59. list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
  60. if (stt->liobn == liobn)
  61. return stt;
  62. return NULL;
  63. }
  64. EXPORT_SYMBOL_GPL(kvmppc_find_table);
  65. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  66. static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
  67. unsigned long tce, unsigned long *ua)
  68. {
  69. unsigned long gfn = tce >> PAGE_SHIFT;
  70. struct kvm_memory_slot *memslot;
  71. memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
  72. if (!memslot)
  73. return -EINVAL;
  74. *ua = __gfn_to_hva_memslot(memslot, gfn) |
  75. (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
  76. return 0;
  77. }
  78. /*
  79. * Validates TCE address.
  80. * At the moment flags and page mask are validated.
  81. * As the host kernel does not access those addresses (just puts them
  82. * to the table and user space is supposed to process them), we can skip
  83. * checking other things (such as TCE is a guest RAM address or the page
  84. * was actually allocated).
  85. */
  86. static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
  87. unsigned long tce)
  88. {
  89. unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  90. enum dma_data_direction dir = iommu_tce_direction(tce);
  91. struct kvmppc_spapr_tce_iommu_table *stit;
  92. unsigned long ua = 0;
  93. /* Allow userspace to poison TCE table */
  94. if (dir == DMA_NONE)
  95. return H_SUCCESS;
  96. if (iommu_tce_check_gpa(stt->page_shift, gpa))
  97. return H_PARAMETER;
  98. if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
  99. return H_TOO_HARD;
  100. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  101. unsigned long hpa = 0;
  102. struct mm_iommu_table_group_mem_t *mem;
  103. long shift = stit->tbl->it_page_shift;
  104. mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
  105. if (!mem)
  106. return H_TOO_HARD;
  107. if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
  108. return H_TOO_HARD;
  109. }
  110. return H_SUCCESS;
  111. }
  112. /* Note on the use of page_address() in real mode,
  113. *
  114. * It is safe to use page_address() in real mode on ppc64 because
  115. * page_address() is always defined as lowmem_page_address()
  116. * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
  117. * operation and does not access page struct.
  118. *
  119. * Theoretically page_address() could be defined different
  120. * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
  121. * would have to be enabled.
  122. * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
  123. * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
  124. * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
  125. * is not expected to be enabled on ppc32, page_address()
  126. * is safe for ppc32 as well.
  127. *
  128. * WARNING: This will be called in real-mode on HV KVM and virtual
  129. * mode on PR KVM
  130. */
  131. static u64 *kvmppc_page_address(struct page *page)
  132. {
  133. #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
  134. #error TODO: fix to avoid page_address() here
  135. #endif
  136. return (u64 *) page_address(page);
  137. }
  138. /*
  139. * Handles TCE requests for emulated devices.
  140. * Puts guest TCE values to the table and expects user space to convert them.
  141. * Cannot fail so kvmppc_rm_tce_validate must be called before it.
  142. */
  143. static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
  144. unsigned long idx, unsigned long tce)
  145. {
  146. struct page *page;
  147. u64 *tbl;
  148. idx -= stt->offset;
  149. page = stt->pages[idx / TCES_PER_PAGE];
  150. /*
  151. * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
  152. * being cleared, otherwise it returns H_TOO_HARD and we skip this.
  153. */
  154. if (!page) {
  155. WARN_ON_ONCE_RM(tce != 0);
  156. return;
  157. }
  158. tbl = kvmppc_page_address(page);
  159. tbl[idx % TCES_PER_PAGE] = tce;
  160. }
  161. /*
  162. * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
  163. * in real mode.
  164. * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
  165. * allocated or not required (when clearing a tce entry).
  166. */
  167. static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
  168. unsigned long ioba, unsigned long npages, bool clearing)
  169. {
  170. unsigned long i, idx, sttpage, sttpages;
  171. unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
  172. if (ret)
  173. return ret;
  174. /*
  175. * clearing==true says kvmppc_rm_tce_put won't be allocating pages
  176. * for empty tces.
  177. */
  178. if (clearing)
  179. return H_SUCCESS;
  180. idx = (ioba >> stt->page_shift) - stt->offset;
  181. sttpage = idx / TCES_PER_PAGE;
  182. sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
  183. TCES_PER_PAGE;
  184. for (i = sttpage; i < sttpage + sttpages; ++i)
  185. if (!stt->pages[i])
  186. return H_TOO_HARD;
  187. return H_SUCCESS;
  188. }
  189. static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
  190. struct iommu_table *tbl,
  191. unsigned long entry, unsigned long *hpa,
  192. enum dma_data_direction *direction)
  193. {
  194. long ret;
  195. ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
  196. if (!ret && ((*direction == DMA_FROM_DEVICE) ||
  197. (*direction == DMA_BIDIRECTIONAL))) {
  198. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  199. /*
  200. * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
  201. * calling this so we still get here a valid UA.
  202. */
  203. if (pua && *pua)
  204. mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
  205. }
  206. return ret;
  207. }
  208. static void iommu_tce_kill_rm(struct iommu_table *tbl,
  209. unsigned long entry, unsigned long pages)
  210. {
  211. if (tbl->it_ops->tce_kill)
  212. tbl->it_ops->tce_kill(tbl, entry, pages, true);
  213. }
  214. static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
  215. struct iommu_table *tbl, unsigned long entry)
  216. {
  217. unsigned long i;
  218. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  219. unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
  220. for (i = 0; i < subpages; ++i) {
  221. unsigned long hpa = 0;
  222. enum dma_data_direction dir = DMA_NONE;
  223. iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
  224. }
  225. }
  226. static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
  227. struct iommu_table *tbl, unsigned long entry)
  228. {
  229. struct mm_iommu_table_group_mem_t *mem = NULL;
  230. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  231. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  232. if (!pua)
  233. /* it_userspace allocation might be delayed */
  234. return H_TOO_HARD;
  235. mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
  236. if (!mem)
  237. return H_TOO_HARD;
  238. mm_iommu_mapped_dec(mem);
  239. *pua = cpu_to_be64(0);
  240. return H_SUCCESS;
  241. }
  242. static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
  243. struct iommu_table *tbl, unsigned long entry)
  244. {
  245. enum dma_data_direction dir = DMA_NONE;
  246. unsigned long hpa = 0;
  247. long ret;
  248. if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
  249. /*
  250. * real mode xchg can fail if struct page crosses
  251. * a page boundary
  252. */
  253. return H_TOO_HARD;
  254. if (dir == DMA_NONE)
  255. return H_SUCCESS;
  256. ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
  257. if (ret)
  258. iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
  259. return ret;
  260. }
  261. static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
  262. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  263. unsigned long entry)
  264. {
  265. unsigned long i, ret = H_SUCCESS;
  266. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  267. unsigned long io_entry = entry * subpages;
  268. for (i = 0; i < subpages; ++i) {
  269. ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  270. if (ret != H_SUCCESS)
  271. break;
  272. }
  273. iommu_tce_kill_rm(tbl, io_entry, subpages);
  274. return ret;
  275. }
  276. static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  277. unsigned long entry, unsigned long ua,
  278. enum dma_data_direction dir)
  279. {
  280. long ret;
  281. unsigned long hpa = 0;
  282. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  283. struct mm_iommu_table_group_mem_t *mem;
  284. if (!pua)
  285. /* it_userspace allocation might be delayed */
  286. return H_TOO_HARD;
  287. mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  288. if (!mem)
  289. return H_TOO_HARD;
  290. if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
  291. &hpa)))
  292. return H_TOO_HARD;
  293. if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
  294. return H_TOO_HARD;
  295. ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
  296. if (ret) {
  297. mm_iommu_mapped_dec(mem);
  298. /*
  299. * real mode xchg can fail if struct page crosses
  300. * a page boundary
  301. */
  302. return H_TOO_HARD;
  303. }
  304. if (dir != DMA_NONE)
  305. kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
  306. *pua = cpu_to_be64(ua);
  307. return 0;
  308. }
  309. static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
  310. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  311. unsigned long entry, unsigned long ua,
  312. enum dma_data_direction dir)
  313. {
  314. unsigned long i, pgoff, ret = H_SUCCESS;
  315. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  316. unsigned long io_entry = entry * subpages;
  317. for (i = 0, pgoff = 0; i < subpages;
  318. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  319. ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
  320. io_entry + i, ua + pgoff, dir);
  321. if (ret != H_SUCCESS)
  322. break;
  323. }
  324. iommu_tce_kill_rm(tbl, io_entry, subpages);
  325. return ret;
  326. }
  327. long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  328. unsigned long ioba, unsigned long tce)
  329. {
  330. struct kvmppc_spapr_tce_table *stt;
  331. long ret;
  332. struct kvmppc_spapr_tce_iommu_table *stit;
  333. unsigned long entry, ua = 0;
  334. enum dma_data_direction dir;
  335. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  336. /* liobn, ioba, tce); */
  337. /* For radix, we might be in virtual mode, so punt */
  338. if (kvm_is_radix(vcpu->kvm))
  339. return H_TOO_HARD;
  340. stt = kvmppc_find_table(vcpu->kvm, liobn);
  341. if (!stt)
  342. return H_TOO_HARD;
  343. ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
  344. if (ret != H_SUCCESS)
  345. return ret;
  346. ret = kvmppc_rm_tce_validate(stt, tce);
  347. if (ret != H_SUCCESS)
  348. return ret;
  349. dir = iommu_tce_direction(tce);
  350. if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
  351. return H_PARAMETER;
  352. entry = ioba >> stt->page_shift;
  353. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  354. if (dir == DMA_NONE)
  355. ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
  356. stit->tbl, entry);
  357. else
  358. ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
  359. stit->tbl, entry, ua, dir);
  360. if (ret != H_SUCCESS) {
  361. kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
  362. return ret;
  363. }
  364. }
  365. kvmppc_rm_tce_put(stt, entry, tce);
  366. return H_SUCCESS;
  367. }
  368. static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
  369. unsigned long ua, unsigned long *phpa)
  370. {
  371. pte_t *ptep, pte;
  372. unsigned shift = 0;
  373. /*
  374. * Called in real mode with MSR_EE = 0. We are safe here.
  375. * It is ok to do the lookup with arch.pgdir here, because
  376. * we are doing this on secondary cpus and current task there
  377. * is not the hypervisor. Also this is safe against THP in the
  378. * host, because an IPI to primary thread will wait for the secondary
  379. * to exit which will agains result in the below page table walk
  380. * to finish.
  381. */
  382. /* an rmap lock won't make it safe. because that just ensure hash
  383. * page table entries are removed with rmap lock held. After that
  384. * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
  385. */
  386. ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
  387. if (!ptep)
  388. return -ENXIO;
  389. pte = READ_ONCE(*ptep);
  390. if (!pte_present(pte))
  391. return -ENXIO;
  392. if (!shift)
  393. shift = PAGE_SHIFT;
  394. /* Avoid handling anything potentially complicated in realmode */
  395. if (shift > PAGE_SHIFT)
  396. return -EAGAIN;
  397. if (!pte_young(pte))
  398. return -EAGAIN;
  399. *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
  400. (ua & ~PAGE_MASK);
  401. return 0;
  402. }
  403. long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  404. unsigned long liobn, unsigned long ioba,
  405. unsigned long tce_list, unsigned long npages)
  406. {
  407. struct kvm *kvm = vcpu->kvm;
  408. struct kvmppc_spapr_tce_table *stt;
  409. long i, ret = H_SUCCESS;
  410. unsigned long tces, entry, ua = 0;
  411. unsigned long mmu_seq;
  412. bool prereg = false;
  413. struct kvmppc_spapr_tce_iommu_table *stit;
  414. /* For radix, we might be in virtual mode, so punt */
  415. if (kvm_is_radix(vcpu->kvm))
  416. return H_TOO_HARD;
  417. /*
  418. * used to check for invalidations in progress
  419. */
  420. mmu_seq = kvm->mmu_notifier_seq;
  421. smp_rmb();
  422. stt = kvmppc_find_table(vcpu->kvm, liobn);
  423. if (!stt)
  424. return H_TOO_HARD;
  425. entry = ioba >> stt->page_shift;
  426. /*
  427. * The spec says that the maximum size of the list is 512 TCEs
  428. * so the whole table addressed resides in 4K page
  429. */
  430. if (npages > 512)
  431. return H_PARAMETER;
  432. if (tce_list & (SZ_4K - 1))
  433. return H_PARAMETER;
  434. ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
  435. if (ret != H_SUCCESS)
  436. return ret;
  437. if (mm_iommu_preregistered(vcpu->kvm->mm)) {
  438. /*
  439. * We get here if guest memory was pre-registered which
  440. * is normally VFIO case and gpa->hpa translation does not
  441. * depend on hpt.
  442. */
  443. struct mm_iommu_table_group_mem_t *mem;
  444. if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
  445. return H_TOO_HARD;
  446. mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
  447. if (mem)
  448. prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
  449. IOMMU_PAGE_SHIFT_4K, &tces) == 0;
  450. }
  451. if (!prereg) {
  452. /*
  453. * This is usually a case of a guest with emulated devices only
  454. * when TCE list is not in preregistered memory.
  455. * We do not require memory to be preregistered in this case
  456. * so lock rmap and do __find_linux_pte_or_hugepte().
  457. */
  458. if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
  459. return H_TOO_HARD;
  460. arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
  461. if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
  462. ret = H_TOO_HARD;
  463. goto unlock_exit;
  464. }
  465. }
  466. for (i = 0; i < npages; ++i) {
  467. unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
  468. ret = kvmppc_rm_tce_validate(stt, tce);
  469. if (ret != H_SUCCESS)
  470. goto unlock_exit;
  471. }
  472. for (i = 0; i < npages; ++i) {
  473. unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
  474. ua = 0;
  475. if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
  476. ret = H_PARAMETER;
  477. goto unlock_exit;
  478. }
  479. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  480. ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
  481. stit->tbl, entry + i, ua,
  482. iommu_tce_direction(tce));
  483. if (ret != H_SUCCESS) {
  484. kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
  485. entry + i);
  486. goto unlock_exit;
  487. }
  488. }
  489. kvmppc_rm_tce_put(stt, entry + i, tce);
  490. }
  491. unlock_exit:
  492. if (!prereg)
  493. arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
  494. return ret;
  495. }
  496. long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
  497. unsigned long liobn, unsigned long ioba,
  498. unsigned long tce_value, unsigned long npages)
  499. {
  500. struct kvmppc_spapr_tce_table *stt;
  501. long i, ret;
  502. struct kvmppc_spapr_tce_iommu_table *stit;
  503. /* For radix, we might be in virtual mode, so punt */
  504. if (kvm_is_radix(vcpu->kvm))
  505. return H_TOO_HARD;
  506. stt = kvmppc_find_table(vcpu->kvm, liobn);
  507. if (!stt)
  508. return H_TOO_HARD;
  509. ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
  510. if (ret != H_SUCCESS)
  511. return ret;
  512. /* Check permission bits only to allow userspace poison TCE for debug */
  513. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  514. return H_PARAMETER;
  515. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  516. unsigned long entry = ioba >> stt->page_shift;
  517. for (i = 0; i < npages; ++i) {
  518. ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
  519. stit->tbl, entry + i);
  520. if (ret == H_SUCCESS)
  521. continue;
  522. if (ret == H_TOO_HARD)
  523. return ret;
  524. WARN_ON_ONCE_RM(1);
  525. kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
  526. }
  527. }
  528. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  529. kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
  530. return ret;
  531. }
  532. /* This can be called in either virtual mode or real mode */
  533. long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  534. unsigned long ioba)
  535. {
  536. struct kvmppc_spapr_tce_table *stt;
  537. long ret;
  538. unsigned long idx;
  539. struct page *page;
  540. u64 *tbl;
  541. stt = kvmppc_find_table(vcpu->kvm, liobn);
  542. if (!stt)
  543. return H_TOO_HARD;
  544. ret = kvmppc_ioba_validate(stt, ioba, 1);
  545. if (ret != H_SUCCESS)
  546. return ret;
  547. idx = (ioba >> stt->page_shift) - stt->offset;
  548. page = stt->pages[idx / TCES_PER_PAGE];
  549. if (!page) {
  550. vcpu->arch.regs.gpr[4] = 0;
  551. return H_SUCCESS;
  552. }
  553. tbl = (u64 *)page_address(page);
  554. vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
  555. return H_SUCCESS;
  556. }
  557. EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
  558. #endif /* KVM_BOOK3S_HV_POSSIBLE */