book3s_64_vio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  5. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  6. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/string.h>
  10. #include <linux/kvm.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/highmem.h>
  13. #include <linux/gfp.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched/signal.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/list.h>
  18. #include <linux/anon_inodes.h>
  19. #include <linux/iommu.h>
  20. #include <linux/file.h>
  21. #include <linux/mm.h>
  22. #include <asm/kvm_ppc.h>
  23. #include <asm/kvm_book3s.h>
  24. #include <asm/book3s/64/mmu-hash.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/synch.h>
  27. #include <asm/ppc-opcode.h>
  28. #include <asm/udbg.h>
  29. #include <asm/iommu.h>
  30. #include <asm/tce.h>
  31. #include <asm/mmu_context.h>
  32. static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  33. {
  34. return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  35. }
  36. static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  37. {
  38. unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  39. (tce_pages * sizeof(struct page *));
  40. return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  41. }
  42. static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  43. {
  44. struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  45. struct kvmppc_spapr_tce_iommu_table, rcu);
  46. iommu_tce_table_put(stit->tbl);
  47. kfree(stit);
  48. }
  49. static void kvm_spapr_tce_liobn_put(struct kref *kref)
  50. {
  51. struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  52. struct kvmppc_spapr_tce_iommu_table, kref);
  53. list_del_rcu(&stit->next);
  54. call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  55. }
  56. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  57. struct iommu_group *grp)
  58. {
  59. int i;
  60. struct kvmppc_spapr_tce_table *stt;
  61. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  62. struct iommu_table_group *table_group = NULL;
  63. rcu_read_lock();
  64. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  65. table_group = iommu_group_get_iommudata(grp);
  66. if (WARN_ON(!table_group))
  67. continue;
  68. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  69. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  70. if (table_group->tables[i] != stit->tbl)
  71. continue;
  72. kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  73. }
  74. }
  75. cond_resched_rcu();
  76. }
  77. rcu_read_unlock();
  78. }
  79. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  80. struct iommu_group *grp)
  81. {
  82. struct kvmppc_spapr_tce_table *stt = NULL;
  83. bool found = false;
  84. struct iommu_table *tbl = NULL;
  85. struct iommu_table_group *table_group;
  86. long i;
  87. struct kvmppc_spapr_tce_iommu_table *stit;
  88. struct fd f;
  89. f = fdget(tablefd);
  90. if (!f.file)
  91. return -EBADF;
  92. rcu_read_lock();
  93. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  94. if (stt == f.file->private_data) {
  95. found = true;
  96. break;
  97. }
  98. }
  99. rcu_read_unlock();
  100. fdput(f);
  101. if (!found)
  102. return -EINVAL;
  103. table_group = iommu_group_get_iommudata(grp);
  104. if (WARN_ON(!table_group))
  105. return -EFAULT;
  106. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  107. struct iommu_table *tbltmp = table_group->tables[i];
  108. if (!tbltmp)
  109. continue;
  110. /* Make sure hardware table parameters are compatible */
  111. if ((tbltmp->it_page_shift <= stt->page_shift) &&
  112. (tbltmp->it_offset << tbltmp->it_page_shift ==
  113. stt->offset << stt->page_shift) &&
  114. (tbltmp->it_size << tbltmp->it_page_shift >=
  115. stt->size << stt->page_shift)) {
  116. /*
  117. * Reference the table to avoid races with
  118. * add/remove DMA windows.
  119. */
  120. tbl = iommu_tce_table_get(tbltmp);
  121. break;
  122. }
  123. }
  124. if (!tbl)
  125. return -EINVAL;
  126. rcu_read_lock();
  127. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  128. if (tbl != stit->tbl)
  129. continue;
  130. if (!kref_get_unless_zero(&stit->kref)) {
  131. /* stit is being destroyed */
  132. iommu_tce_table_put(tbl);
  133. rcu_read_unlock();
  134. return -ENOTTY;
  135. }
  136. /*
  137. * The table is already known to this KVM, we just increased
  138. * its KVM reference counter and can return.
  139. */
  140. rcu_read_unlock();
  141. return 0;
  142. }
  143. rcu_read_unlock();
  144. stit = kzalloc(sizeof(*stit), GFP_KERNEL);
  145. if (!stit) {
  146. iommu_tce_table_put(tbl);
  147. return -ENOMEM;
  148. }
  149. stit->tbl = tbl;
  150. kref_init(&stit->kref);
  151. list_add_rcu(&stit->next, &stt->iommu_tables);
  152. return 0;
  153. }
  154. static void release_spapr_tce_table(struct rcu_head *head)
  155. {
  156. struct kvmppc_spapr_tce_table *stt = container_of(head,
  157. struct kvmppc_spapr_tce_table, rcu);
  158. unsigned long i, npages = kvmppc_tce_pages(stt->size);
  159. for (i = 0; i < npages; i++)
  160. if (stt->pages[i])
  161. __free_page(stt->pages[i]);
  162. kfree(stt);
  163. }
  164. static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
  165. unsigned long sttpage)
  166. {
  167. struct page *page = stt->pages[sttpage];
  168. if (page)
  169. return page;
  170. mutex_lock(&stt->alloc_lock);
  171. page = stt->pages[sttpage];
  172. if (!page) {
  173. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  174. WARN_ON_ONCE(!page);
  175. if (page)
  176. stt->pages[sttpage] = page;
  177. }
  178. mutex_unlock(&stt->alloc_lock);
  179. return page;
  180. }
  181. static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
  182. {
  183. struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
  184. struct page *page;
  185. if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
  186. return VM_FAULT_SIGBUS;
  187. page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
  188. if (!page)
  189. return VM_FAULT_OOM;
  190. get_page(page);
  191. vmf->page = page;
  192. return 0;
  193. }
  194. static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
  195. .fault = kvm_spapr_tce_fault,
  196. };
  197. static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
  198. {
  199. vma->vm_ops = &kvm_spapr_tce_vm_ops;
  200. return 0;
  201. }
  202. static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
  203. {
  204. struct kvmppc_spapr_tce_table *stt = filp->private_data;
  205. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  206. struct kvm *kvm = stt->kvm;
  207. mutex_lock(&kvm->lock);
  208. list_del_rcu(&stt->list);
  209. mutex_unlock(&kvm->lock);
  210. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  211. WARN_ON(!kref_read(&stit->kref));
  212. while (1) {
  213. if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
  214. break;
  215. }
  216. }
  217. account_locked_vm(kvm->mm,
  218. kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
  219. kvm_put_kvm(stt->kvm);
  220. call_rcu(&stt->rcu, release_spapr_tce_table);
  221. return 0;
  222. }
  223. static const struct file_operations kvm_spapr_tce_fops = {
  224. .mmap = kvm_spapr_tce_mmap,
  225. .release = kvm_spapr_tce_release,
  226. };
  227. long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  228. struct kvm_create_spapr_tce_64 *args)
  229. {
  230. struct kvmppc_spapr_tce_table *stt = NULL;
  231. struct kvmppc_spapr_tce_table *siter;
  232. struct mm_struct *mm = kvm->mm;
  233. unsigned long npages, size = args->size;
  234. int ret;
  235. if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
  236. (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
  237. return -EINVAL;
  238. npages = kvmppc_tce_pages(size);
  239. ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
  240. if (ret)
  241. return ret;
  242. ret = -ENOMEM;
  243. stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
  244. GFP_KERNEL);
  245. if (!stt)
  246. goto fail_acct;
  247. stt->liobn = args->liobn;
  248. stt->page_shift = args->page_shift;
  249. stt->offset = args->offset;
  250. stt->size = size;
  251. stt->kvm = kvm;
  252. mutex_init(&stt->alloc_lock);
  253. INIT_LIST_HEAD_RCU(&stt->iommu_tables);
  254. mutex_lock(&kvm->lock);
  255. /* Check this LIOBN hasn't been previously allocated */
  256. ret = 0;
  257. list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
  258. if (siter->liobn == args->liobn) {
  259. ret = -EBUSY;
  260. break;
  261. }
  262. }
  263. kvm_get_kvm(kvm);
  264. if (!ret)
  265. ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
  266. stt, O_RDWR | O_CLOEXEC);
  267. if (ret >= 0)
  268. list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
  269. else
  270. kvm_put_kvm_no_destroy(kvm);
  271. mutex_unlock(&kvm->lock);
  272. if (ret >= 0)
  273. return ret;
  274. kfree(stt);
  275. fail_acct:
  276. account_locked_vm(mm, kvmppc_stt_pages(npages), false);
  277. return ret;
  278. }
  279. static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
  280. unsigned long *ua)
  281. {
  282. unsigned long gfn = tce >> PAGE_SHIFT;
  283. struct kvm_memory_slot *memslot;
  284. memslot = search_memslots(kvm_memslots(kvm), gfn);
  285. if (!memslot)
  286. return -EINVAL;
  287. *ua = __gfn_to_hva_memslot(memslot, gfn) |
  288. (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
  289. return 0;
  290. }
  291. static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
  292. unsigned long tce)
  293. {
  294. unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  295. enum dma_data_direction dir = iommu_tce_direction(tce);
  296. struct kvmppc_spapr_tce_iommu_table *stit;
  297. unsigned long ua = 0;
  298. /* Allow userspace to poison TCE table */
  299. if (dir == DMA_NONE)
  300. return H_SUCCESS;
  301. if (iommu_tce_check_gpa(stt->page_shift, gpa))
  302. return H_TOO_HARD;
  303. if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
  304. return H_TOO_HARD;
  305. rcu_read_lock();
  306. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  307. unsigned long hpa = 0;
  308. struct mm_iommu_table_group_mem_t *mem;
  309. long shift = stit->tbl->it_page_shift;
  310. mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
  311. if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
  312. rcu_read_unlock();
  313. return H_TOO_HARD;
  314. }
  315. }
  316. rcu_read_unlock();
  317. return H_SUCCESS;
  318. }
  319. /*
  320. * Handles TCE requests for emulated devices.
  321. * Puts guest TCE values to the table and expects user space to convert them.
  322. * Cannot fail so kvmppc_tce_validate must be called before it.
  323. */
  324. static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
  325. unsigned long idx, unsigned long tce)
  326. {
  327. struct page *page;
  328. u64 *tbl;
  329. unsigned long sttpage;
  330. idx -= stt->offset;
  331. sttpage = idx / TCES_PER_PAGE;
  332. page = stt->pages[sttpage];
  333. if (!page) {
  334. /* We allow any TCE, not just with read|write permissions */
  335. if (!tce)
  336. return;
  337. page = kvm_spapr_get_tce_page(stt, sttpage);
  338. if (!page)
  339. return;
  340. }
  341. tbl = page_to_virt(page);
  342. tbl[idx % TCES_PER_PAGE] = tce;
  343. }
  344. static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
  345. struct iommu_table *tbl, unsigned long entry)
  346. {
  347. unsigned long i;
  348. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  349. unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
  350. for (i = 0; i < subpages; ++i) {
  351. unsigned long hpa = 0;
  352. enum dma_data_direction dir = DMA_NONE;
  353. iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
  354. }
  355. }
  356. static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
  357. struct iommu_table *tbl, unsigned long entry)
  358. {
  359. struct mm_iommu_table_group_mem_t *mem = NULL;
  360. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  361. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  362. if (!pua)
  363. return H_SUCCESS;
  364. mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
  365. if (!mem)
  366. return H_TOO_HARD;
  367. mm_iommu_mapped_dec(mem);
  368. *pua = cpu_to_be64(0);
  369. return H_SUCCESS;
  370. }
  371. static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
  372. struct iommu_table *tbl, unsigned long entry)
  373. {
  374. enum dma_data_direction dir = DMA_NONE;
  375. unsigned long hpa = 0;
  376. long ret;
  377. if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
  378. &dir)))
  379. return H_TOO_HARD;
  380. if (dir == DMA_NONE)
  381. return H_SUCCESS;
  382. ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  383. if (ret != H_SUCCESS)
  384. iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
  385. return ret;
  386. }
  387. static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
  388. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  389. unsigned long entry)
  390. {
  391. unsigned long i, ret = H_SUCCESS;
  392. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  393. unsigned long io_entry = entry * subpages;
  394. for (i = 0; i < subpages; ++i) {
  395. ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  396. if (ret != H_SUCCESS)
  397. break;
  398. }
  399. iommu_tce_kill(tbl, io_entry, subpages);
  400. return ret;
  401. }
  402. static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  403. unsigned long entry, unsigned long ua,
  404. enum dma_data_direction dir)
  405. {
  406. long ret;
  407. unsigned long hpa;
  408. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  409. struct mm_iommu_table_group_mem_t *mem;
  410. if (!pua)
  411. /* it_userspace allocation might be delayed */
  412. return H_TOO_HARD;
  413. mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  414. if (!mem)
  415. /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
  416. return H_TOO_HARD;
  417. if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
  418. return H_TOO_HARD;
  419. if (mm_iommu_mapped_inc(mem))
  420. return H_TOO_HARD;
  421. ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
  422. if (WARN_ON_ONCE(ret)) {
  423. mm_iommu_mapped_dec(mem);
  424. return H_TOO_HARD;
  425. }
  426. if (dir != DMA_NONE)
  427. kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  428. *pua = cpu_to_be64(ua);
  429. return 0;
  430. }
  431. static long kvmppc_tce_iommu_map(struct kvm *kvm,
  432. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  433. unsigned long entry, unsigned long ua,
  434. enum dma_data_direction dir)
  435. {
  436. unsigned long i, pgoff, ret = H_SUCCESS;
  437. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  438. unsigned long io_entry = entry * subpages;
  439. for (i = 0, pgoff = 0; i < subpages;
  440. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  441. ret = kvmppc_tce_iommu_do_map(kvm, tbl,
  442. io_entry + i, ua + pgoff, dir);
  443. if (ret != H_SUCCESS)
  444. break;
  445. }
  446. iommu_tce_kill(tbl, io_entry, subpages);
  447. return ret;
  448. }
  449. long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  450. unsigned long ioba, unsigned long tce)
  451. {
  452. struct kvmppc_spapr_tce_table *stt;
  453. long ret, idx;
  454. struct kvmppc_spapr_tce_iommu_table *stit;
  455. unsigned long entry, ua = 0;
  456. enum dma_data_direction dir;
  457. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  458. /* liobn, ioba, tce); */
  459. stt = kvmppc_find_table(vcpu->kvm, liobn);
  460. if (!stt)
  461. return H_TOO_HARD;
  462. ret = kvmppc_ioba_validate(stt, ioba, 1);
  463. if (ret != H_SUCCESS)
  464. return ret;
  465. idx = srcu_read_lock(&vcpu->kvm->srcu);
  466. ret = kvmppc_tce_validate(stt, tce);
  467. if (ret != H_SUCCESS)
  468. goto unlock_exit;
  469. dir = iommu_tce_direction(tce);
  470. if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
  471. ret = H_PARAMETER;
  472. goto unlock_exit;
  473. }
  474. entry = ioba >> stt->page_shift;
  475. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  476. if (dir == DMA_NONE)
  477. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  478. stit->tbl, entry);
  479. else
  480. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
  481. entry, ua, dir);
  482. if (ret != H_SUCCESS) {
  483. kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
  484. goto unlock_exit;
  485. }
  486. }
  487. kvmppc_tce_put(stt, entry, tce);
  488. unlock_exit:
  489. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  490. return ret;
  491. }
  492. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
  493. long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  494. unsigned long liobn, unsigned long ioba,
  495. unsigned long tce_list, unsigned long npages)
  496. {
  497. struct kvmppc_spapr_tce_table *stt;
  498. long i, ret = H_SUCCESS, idx;
  499. unsigned long entry, ua = 0;
  500. u64 __user *tces;
  501. u64 tce;
  502. struct kvmppc_spapr_tce_iommu_table *stit;
  503. stt = kvmppc_find_table(vcpu->kvm, liobn);
  504. if (!stt)
  505. return H_TOO_HARD;
  506. entry = ioba >> stt->page_shift;
  507. /*
  508. * SPAPR spec says that the maximum size of the list is 512 TCEs
  509. * so the whole table fits in 4K page
  510. */
  511. if (npages > 512)
  512. return H_PARAMETER;
  513. if (tce_list & (SZ_4K - 1))
  514. return H_PARAMETER;
  515. ret = kvmppc_ioba_validate(stt, ioba, npages);
  516. if (ret != H_SUCCESS)
  517. return ret;
  518. idx = srcu_read_lock(&vcpu->kvm->srcu);
  519. if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
  520. ret = H_TOO_HARD;
  521. goto unlock_exit;
  522. }
  523. tces = (u64 __user *) ua;
  524. for (i = 0; i < npages; ++i) {
  525. if (get_user(tce, tces + i)) {
  526. ret = H_TOO_HARD;
  527. goto unlock_exit;
  528. }
  529. tce = be64_to_cpu(tce);
  530. ret = kvmppc_tce_validate(stt, tce);
  531. if (ret != H_SUCCESS)
  532. goto unlock_exit;
  533. }
  534. for (i = 0; i < npages; ++i) {
  535. /*
  536. * This looks unsafe, because we validate, then regrab
  537. * the TCE from userspace which could have been changed by
  538. * another thread.
  539. *
  540. * But it actually is safe, because the relevant checks will be
  541. * re-executed in the following code. If userspace tries to
  542. * change this dodgily it will result in a messier failure mode
  543. * but won't threaten the host.
  544. */
  545. if (get_user(tce, tces + i)) {
  546. ret = H_TOO_HARD;
  547. goto unlock_exit;
  548. }
  549. tce = be64_to_cpu(tce);
  550. if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
  551. ret = H_PARAMETER;
  552. goto unlock_exit;
  553. }
  554. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  555. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
  556. stit->tbl, entry + i, ua,
  557. iommu_tce_direction(tce));
  558. if (ret != H_SUCCESS) {
  559. kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
  560. entry + i);
  561. goto unlock_exit;
  562. }
  563. }
  564. kvmppc_tce_put(stt, entry + i, tce);
  565. }
  566. unlock_exit:
  567. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  568. return ret;
  569. }
  570. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
  571. long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  572. unsigned long liobn, unsigned long ioba,
  573. unsigned long tce_value, unsigned long npages)
  574. {
  575. struct kvmppc_spapr_tce_table *stt;
  576. long i, ret;
  577. struct kvmppc_spapr_tce_iommu_table *stit;
  578. stt = kvmppc_find_table(vcpu->kvm, liobn);
  579. if (!stt)
  580. return H_TOO_HARD;
  581. ret = kvmppc_ioba_validate(stt, ioba, npages);
  582. if (ret != H_SUCCESS)
  583. return ret;
  584. /* Check permission bits only to allow userspace poison TCE for debug */
  585. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  586. return H_PARAMETER;
  587. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  588. unsigned long entry = ioba >> stt->page_shift;
  589. for (i = 0; i < npages; ++i) {
  590. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  591. stit->tbl, entry + i);
  592. if (ret == H_SUCCESS)
  593. continue;
  594. if (ret == H_TOO_HARD)
  595. return ret;
  596. WARN_ON_ONCE(1);
  597. kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
  598. }
  599. }
  600. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  601. kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
  602. return ret;
  603. }
  604. EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);