vfio_iommu_type1.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VFIO: IOMMU DMA mapping support for Type1 IOMMU
  4. *
  5. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  6. * Author: Alex Williamson <alex.williamson@redhat.com>
  7. *
  8. * Derived from original vfio:
  9. * Copyright 2010 Cisco Systems, Inc. All rights reserved.
  10. * Author: Tom Lyon, pugs@cisco.com
  11. *
  12. * We arbitrarily define a Type1 IOMMU as one matching the below code.
  13. * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
  14. * VT-d, but that makes it harder to re-use as theoretically anyone
  15. * implementing a similar IOMMU could make use of this. We expect the
  16. * IOMMU to support the IOMMU API and have few to no restrictions around
  17. * the IOVA range that can be mapped. The Type1 IOMMU is currently
  18. * optimized for relatively static mappings of a userspace process with
  19. * userpsace pages pinned into memory. We also assume devices and IOMMU
  20. * domains are PCI based as the IOMMU API is still centered around a
  21. * device/bus interface rather than a group interface.
  22. */
  23. #include <linux/compat.h>
  24. #include <linux/device.h>
  25. #include <linux/fs.h>
  26. #include <linux/highmem.h>
  27. #include <linux/iommu.h>
  28. #include <linux/module.h>
  29. #include <linux/mm.h>
  30. #include <linux/kthread.h>
  31. #include <linux/rbtree.h>
  32. #include <linux/sched/signal.h>
  33. #include <linux/sched/mm.h>
  34. #include <linux/slab.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/vfio.h>
  37. #include <linux/workqueue.h>
  38. #include <linux/mdev.h>
  39. #include <linux/notifier.h>
  40. #include <linux/dma-iommu.h>
  41. #include <linux/irqdomain.h>
  42. #define DRIVER_VERSION "0.2"
  43. #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
  44. #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
  45. static bool allow_unsafe_interrupts;
  46. module_param_named(allow_unsafe_interrupts,
  47. allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
  48. MODULE_PARM_DESC(allow_unsafe_interrupts,
  49. "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
  50. static bool disable_hugepages;
  51. module_param_named(disable_hugepages,
  52. disable_hugepages, bool, S_IRUGO | S_IWUSR);
  53. MODULE_PARM_DESC(disable_hugepages,
  54. "Disable VFIO IOMMU support for IOMMU hugepages.");
  55. static unsigned int dma_entry_limit __read_mostly = U16_MAX;
  56. module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
  57. MODULE_PARM_DESC(dma_entry_limit,
  58. "Maximum number of user DMA mappings per container (65535).");
  59. struct vfio_iommu {
  60. struct list_head domain_list;
  61. struct list_head iova_list;
  62. struct vfio_domain *external_domain; /* domain for external user */
  63. struct mutex lock;
  64. struct rb_root dma_list;
  65. struct blocking_notifier_head notifier;
  66. unsigned int dma_avail;
  67. uint64_t pgsize_bitmap;
  68. bool v2;
  69. bool nesting;
  70. bool dirty_page_tracking;
  71. bool pinned_page_dirty_scope;
  72. };
  73. struct vfio_domain {
  74. struct iommu_domain *domain;
  75. struct list_head next;
  76. struct list_head group_list;
  77. int prot; /* IOMMU_CACHE */
  78. bool fgsp; /* Fine-grained super pages */
  79. };
  80. struct vfio_dma {
  81. struct rb_node node;
  82. dma_addr_t iova; /* Device address */
  83. unsigned long vaddr; /* Process virtual addr */
  84. size_t size; /* Map size (bytes) */
  85. int prot; /* IOMMU_READ/WRITE */
  86. bool iommu_mapped;
  87. bool lock_cap; /* capable(CAP_IPC_LOCK) */
  88. struct task_struct *task;
  89. struct rb_root pfn_list; /* Ex-user pinned pfn list */
  90. unsigned long *bitmap;
  91. };
  92. struct vfio_group {
  93. struct iommu_group *iommu_group;
  94. struct list_head next;
  95. bool mdev_group; /* An mdev group */
  96. bool pinned_page_dirty_scope;
  97. };
  98. struct vfio_iova {
  99. struct list_head list;
  100. dma_addr_t start;
  101. dma_addr_t end;
  102. };
  103. /*
  104. * Guest RAM pinning working set or DMA target
  105. */
  106. struct vfio_pfn {
  107. struct rb_node node;
  108. dma_addr_t iova; /* Device address */
  109. unsigned long pfn; /* Host pfn */
  110. unsigned int ref_count;
  111. };
  112. struct vfio_regions {
  113. struct list_head list;
  114. dma_addr_t iova;
  115. phys_addr_t phys;
  116. size_t len;
  117. };
  118. #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
  119. (!list_empty(&iommu->domain_list))
  120. #define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
  121. /*
  122. * Input argument of number of bits to bitmap_set() is unsigned integer, which
  123. * further casts to signed integer for unaligned multi-bit operation,
  124. * __bitmap_set().
  125. * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
  126. * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
  127. * system.
  128. */
  129. #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
  130. #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
  131. static int put_pfn(unsigned long pfn, int prot);
  132. static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
  133. struct iommu_group *iommu_group);
  134. static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
  135. /*
  136. * This code handles mapping and unmapping of user data buffers
  137. * into DMA'ble space using the IOMMU
  138. */
  139. static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
  140. dma_addr_t start, size_t size)
  141. {
  142. struct rb_node *node = iommu->dma_list.rb_node;
  143. while (node) {
  144. struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
  145. if (start + size <= dma->iova)
  146. node = node->rb_left;
  147. else if (start >= dma->iova + dma->size)
  148. node = node->rb_right;
  149. else
  150. return dma;
  151. }
  152. return NULL;
  153. }
  154. static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
  155. {
  156. struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
  157. struct vfio_dma *dma;
  158. while (*link) {
  159. parent = *link;
  160. dma = rb_entry(parent, struct vfio_dma, node);
  161. if (new->iova + new->size <= dma->iova)
  162. link = &(*link)->rb_left;
  163. else
  164. link = &(*link)->rb_right;
  165. }
  166. rb_link_node(&new->node, parent, link);
  167. rb_insert_color(&new->node, &iommu->dma_list);
  168. }
  169. static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
  170. {
  171. rb_erase(&old->node, &iommu->dma_list);
  172. }
  173. static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
  174. {
  175. uint64_t npages = dma->size / pgsize;
  176. if (npages > DIRTY_BITMAP_PAGES_MAX)
  177. return -EINVAL;
  178. /*
  179. * Allocate extra 64 bits that are used to calculate shift required for
  180. * bitmap_shift_left() to manipulate and club unaligned number of pages
  181. * in adjacent vfio_dma ranges.
  182. */
  183. dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
  184. GFP_KERNEL);
  185. if (!dma->bitmap)
  186. return -ENOMEM;
  187. return 0;
  188. }
  189. static void vfio_dma_bitmap_free(struct vfio_dma *dma)
  190. {
  191. kfree(dma->bitmap);
  192. dma->bitmap = NULL;
  193. }
  194. static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
  195. {
  196. struct rb_node *p;
  197. unsigned long pgshift = __ffs(pgsize);
  198. for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
  199. struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
  200. bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
  201. }
  202. }
  203. static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
  204. {
  205. struct rb_node *n;
  206. unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
  207. for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
  208. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  209. bitmap_set(dma->bitmap, 0, dma->size >> pgshift);
  210. }
  211. }
  212. static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
  213. {
  214. struct rb_node *n;
  215. for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
  216. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  217. int ret;
  218. ret = vfio_dma_bitmap_alloc(dma, pgsize);
  219. if (ret) {
  220. struct rb_node *p;
  221. for (p = rb_prev(n); p; p = rb_prev(p)) {
  222. struct vfio_dma *dma = rb_entry(n,
  223. struct vfio_dma, node);
  224. vfio_dma_bitmap_free(dma);
  225. }
  226. return ret;
  227. }
  228. vfio_dma_populate_bitmap(dma, pgsize);
  229. }
  230. return 0;
  231. }
  232. static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
  233. {
  234. struct rb_node *n;
  235. for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
  236. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  237. vfio_dma_bitmap_free(dma);
  238. }
  239. }
  240. /*
  241. * Helper Functions for host iova-pfn list
  242. */
  243. static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
  244. {
  245. struct vfio_pfn *vpfn;
  246. struct rb_node *node = dma->pfn_list.rb_node;
  247. while (node) {
  248. vpfn = rb_entry(node, struct vfio_pfn, node);
  249. if (iova < vpfn->iova)
  250. node = node->rb_left;
  251. else if (iova > vpfn->iova)
  252. node = node->rb_right;
  253. else
  254. return vpfn;
  255. }
  256. return NULL;
  257. }
  258. static void vfio_link_pfn(struct vfio_dma *dma,
  259. struct vfio_pfn *new)
  260. {
  261. struct rb_node **link, *parent = NULL;
  262. struct vfio_pfn *vpfn;
  263. link = &dma->pfn_list.rb_node;
  264. while (*link) {
  265. parent = *link;
  266. vpfn = rb_entry(parent, struct vfio_pfn, node);
  267. if (new->iova < vpfn->iova)
  268. link = &(*link)->rb_left;
  269. else
  270. link = &(*link)->rb_right;
  271. }
  272. rb_link_node(&new->node, parent, link);
  273. rb_insert_color(&new->node, &dma->pfn_list);
  274. }
  275. static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
  276. {
  277. rb_erase(&old->node, &dma->pfn_list);
  278. }
  279. static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
  280. unsigned long pfn)
  281. {
  282. struct vfio_pfn *vpfn;
  283. vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
  284. if (!vpfn)
  285. return -ENOMEM;
  286. vpfn->iova = iova;
  287. vpfn->pfn = pfn;
  288. vpfn->ref_count = 1;
  289. vfio_link_pfn(dma, vpfn);
  290. return 0;
  291. }
  292. static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
  293. struct vfio_pfn *vpfn)
  294. {
  295. vfio_unlink_pfn(dma, vpfn);
  296. kfree(vpfn);
  297. }
  298. static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
  299. unsigned long iova)
  300. {
  301. struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
  302. if (vpfn)
  303. vpfn->ref_count++;
  304. return vpfn;
  305. }
  306. static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
  307. {
  308. int ret = 0;
  309. vpfn->ref_count--;
  310. if (!vpfn->ref_count) {
  311. ret = put_pfn(vpfn->pfn, dma->prot);
  312. vfio_remove_from_pfn_list(dma, vpfn);
  313. }
  314. return ret;
  315. }
  316. static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
  317. {
  318. struct mm_struct *mm;
  319. int ret;
  320. if (!npage)
  321. return 0;
  322. mm = async ? get_task_mm(dma->task) : dma->task->mm;
  323. if (!mm)
  324. return -ESRCH; /* process exited */
  325. ret = mmap_write_lock_killable(mm);
  326. if (!ret) {
  327. ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
  328. dma->lock_cap);
  329. mmap_write_unlock(mm);
  330. }
  331. if (async)
  332. mmput(mm);
  333. return ret;
  334. }
  335. /*
  336. * Some mappings aren't backed by a struct page, for example an mmap'd
  337. * MMIO range for our own or another device. These use a different
  338. * pfn conversion and shouldn't be tracked as locked pages.
  339. * For compound pages, any driver that sets the reserved bit in head
  340. * page needs to set the reserved bit in all subpages to be safe.
  341. */
  342. static bool is_invalid_reserved_pfn(unsigned long pfn)
  343. {
  344. if (pfn_valid(pfn))
  345. return PageReserved(pfn_to_page(pfn));
  346. return true;
  347. }
  348. static int put_pfn(unsigned long pfn, int prot)
  349. {
  350. if (!is_invalid_reserved_pfn(pfn)) {
  351. struct page *page = pfn_to_page(pfn);
  352. unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
  353. return 1;
  354. }
  355. return 0;
  356. }
  357. static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
  358. unsigned long vaddr, unsigned long *pfn,
  359. bool write_fault)
  360. {
  361. pte_t *ptep;
  362. spinlock_t *ptl;
  363. int ret;
  364. ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
  365. if (ret) {
  366. bool unlocked = false;
  367. ret = fixup_user_fault(mm, vaddr,
  368. FAULT_FLAG_REMOTE |
  369. (write_fault ? FAULT_FLAG_WRITE : 0),
  370. &unlocked);
  371. if (unlocked)
  372. return -EAGAIN;
  373. if (ret)
  374. return ret;
  375. ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
  376. if (ret)
  377. return ret;
  378. }
  379. if (write_fault && !pte_write(*ptep))
  380. ret = -EFAULT;
  381. else
  382. *pfn = pte_pfn(*ptep);
  383. pte_unmap_unlock(ptep, ptl);
  384. return ret;
  385. }
  386. static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
  387. int prot, unsigned long *pfn)
  388. {
  389. struct page *page[1];
  390. struct vm_area_struct *vma;
  391. unsigned int flags = 0;
  392. int ret;
  393. if (prot & IOMMU_WRITE)
  394. flags |= FOLL_WRITE;
  395. mmap_read_lock(mm);
  396. ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
  397. page, NULL, NULL);
  398. if (ret == 1) {
  399. *pfn = page_to_pfn(page[0]);
  400. ret = 0;
  401. goto done;
  402. }
  403. vaddr = untagged_addr(vaddr);
  404. retry:
  405. vma = find_vma_intersection(mm, vaddr, vaddr + 1);
  406. if (vma && vma->vm_flags & VM_PFNMAP) {
  407. ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
  408. if (ret == -EAGAIN)
  409. goto retry;
  410. if (!ret && !is_invalid_reserved_pfn(*pfn))
  411. ret = -EFAULT;
  412. }
  413. done:
  414. mmap_read_unlock(mm);
  415. return ret;
  416. }
  417. /*
  418. * Attempt to pin pages. We really don't want to track all the pfns and
  419. * the iommu can only map chunks of consecutive pfns anyway, so get the
  420. * first page and all consecutive pages with the same locking.
  421. */
  422. static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
  423. long npage, unsigned long *pfn_base,
  424. unsigned long limit)
  425. {
  426. unsigned long pfn = 0;
  427. long ret, pinned = 0, lock_acct = 0;
  428. bool rsvd;
  429. dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
  430. /* This code path is only user initiated */
  431. if (!current->mm)
  432. return -ENODEV;
  433. ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
  434. if (ret)
  435. return ret;
  436. pinned++;
  437. rsvd = is_invalid_reserved_pfn(*pfn_base);
  438. /*
  439. * Reserved pages aren't counted against the user, externally pinned
  440. * pages are already counted against the user.
  441. */
  442. if (!rsvd && !vfio_find_vpfn(dma, iova)) {
  443. if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
  444. put_pfn(*pfn_base, dma->prot);
  445. pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
  446. limit << PAGE_SHIFT);
  447. return -ENOMEM;
  448. }
  449. lock_acct++;
  450. }
  451. if (unlikely(disable_hugepages))
  452. goto out;
  453. /* Lock all the consecutive pages from pfn_base */
  454. for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
  455. pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
  456. ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
  457. if (ret)
  458. break;
  459. if (pfn != *pfn_base + pinned ||
  460. rsvd != is_invalid_reserved_pfn(pfn)) {
  461. put_pfn(pfn, dma->prot);
  462. break;
  463. }
  464. if (!rsvd && !vfio_find_vpfn(dma, iova)) {
  465. if (!dma->lock_cap &&
  466. current->mm->locked_vm + lock_acct + 1 > limit) {
  467. put_pfn(pfn, dma->prot);
  468. pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
  469. __func__, limit << PAGE_SHIFT);
  470. ret = -ENOMEM;
  471. goto unpin_out;
  472. }
  473. lock_acct++;
  474. }
  475. }
  476. out:
  477. ret = vfio_lock_acct(dma, lock_acct, false);
  478. unpin_out:
  479. if (ret) {
  480. if (!rsvd) {
  481. for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
  482. put_pfn(pfn, dma->prot);
  483. }
  484. return ret;
  485. }
  486. return pinned;
  487. }
  488. static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
  489. unsigned long pfn, long npage,
  490. bool do_accounting)
  491. {
  492. long unlocked = 0, locked = 0;
  493. long i;
  494. for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
  495. if (put_pfn(pfn++, dma->prot)) {
  496. unlocked++;
  497. if (vfio_find_vpfn(dma, iova))
  498. locked++;
  499. }
  500. }
  501. if (do_accounting)
  502. vfio_lock_acct(dma, locked - unlocked, true);
  503. return unlocked;
  504. }
  505. static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
  506. unsigned long *pfn_base, bool do_accounting)
  507. {
  508. struct mm_struct *mm;
  509. int ret;
  510. mm = get_task_mm(dma->task);
  511. if (!mm)
  512. return -ENODEV;
  513. ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
  514. if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
  515. ret = vfio_lock_acct(dma, 1, true);
  516. if (ret) {
  517. put_pfn(*pfn_base, dma->prot);
  518. if (ret == -ENOMEM)
  519. pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
  520. "(%ld) exceeded\n", __func__,
  521. dma->task->comm, task_pid_nr(dma->task),
  522. task_rlimit(dma->task, RLIMIT_MEMLOCK));
  523. }
  524. }
  525. mmput(mm);
  526. return ret;
  527. }
  528. static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
  529. bool do_accounting)
  530. {
  531. int unlocked;
  532. struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
  533. if (!vpfn)
  534. return 0;
  535. unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
  536. if (do_accounting)
  537. vfio_lock_acct(dma, -unlocked, true);
  538. return unlocked;
  539. }
  540. static int vfio_iommu_type1_pin_pages(void *iommu_data,
  541. struct iommu_group *iommu_group,
  542. unsigned long *user_pfn,
  543. int npage, int prot,
  544. unsigned long *phys_pfn)
  545. {
  546. struct vfio_iommu *iommu = iommu_data;
  547. struct vfio_group *group;
  548. int i, j, ret;
  549. unsigned long remote_vaddr;
  550. struct vfio_dma *dma;
  551. bool do_accounting;
  552. if (!iommu || !user_pfn || !phys_pfn)
  553. return -EINVAL;
  554. /* Supported for v2 version only */
  555. if (!iommu->v2)
  556. return -EACCES;
  557. mutex_lock(&iommu->lock);
  558. /* Fail if notifier list is empty */
  559. if (!iommu->notifier.head) {
  560. ret = -EINVAL;
  561. goto pin_done;
  562. }
  563. /*
  564. * If iommu capable domain exist in the container then all pages are
  565. * already pinned and accounted. Accouting should be done if there is no
  566. * iommu capable domain in the container.
  567. */
  568. do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
  569. for (i = 0; i < npage; i++) {
  570. dma_addr_t iova;
  571. struct vfio_pfn *vpfn;
  572. iova = user_pfn[i] << PAGE_SHIFT;
  573. dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
  574. if (!dma) {
  575. ret = -EINVAL;
  576. goto pin_unwind;
  577. }
  578. if ((dma->prot & prot) != prot) {
  579. ret = -EPERM;
  580. goto pin_unwind;
  581. }
  582. vpfn = vfio_iova_get_vfio_pfn(dma, iova);
  583. if (vpfn) {
  584. phys_pfn[i] = vpfn->pfn;
  585. continue;
  586. }
  587. remote_vaddr = dma->vaddr + (iova - dma->iova);
  588. ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
  589. do_accounting);
  590. if (ret)
  591. goto pin_unwind;
  592. ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
  593. if (ret) {
  594. if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
  595. vfio_lock_acct(dma, -1, true);
  596. goto pin_unwind;
  597. }
  598. if (iommu->dirty_page_tracking) {
  599. unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
  600. /*
  601. * Bitmap populated with the smallest supported page
  602. * size
  603. */
  604. bitmap_set(dma->bitmap,
  605. (iova - dma->iova) >> pgshift, 1);
  606. }
  607. }
  608. ret = i;
  609. group = vfio_iommu_find_iommu_group(iommu, iommu_group);
  610. if (!group->pinned_page_dirty_scope) {
  611. group->pinned_page_dirty_scope = true;
  612. update_pinned_page_dirty_scope(iommu);
  613. }
  614. goto pin_done;
  615. pin_unwind:
  616. phys_pfn[i] = 0;
  617. for (j = 0; j < i; j++) {
  618. dma_addr_t iova;
  619. iova = user_pfn[j] << PAGE_SHIFT;
  620. dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
  621. vfio_unpin_page_external(dma, iova, do_accounting);
  622. phys_pfn[j] = 0;
  623. }
  624. pin_done:
  625. mutex_unlock(&iommu->lock);
  626. return ret;
  627. }
  628. static int vfio_iommu_type1_unpin_pages(void *iommu_data,
  629. unsigned long *user_pfn,
  630. int npage)
  631. {
  632. struct vfio_iommu *iommu = iommu_data;
  633. bool do_accounting;
  634. int i;
  635. if (!iommu || !user_pfn)
  636. return -EINVAL;
  637. /* Supported for v2 version only */
  638. if (!iommu->v2)
  639. return -EACCES;
  640. mutex_lock(&iommu->lock);
  641. do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
  642. for (i = 0; i < npage; i++) {
  643. struct vfio_dma *dma;
  644. dma_addr_t iova;
  645. iova = user_pfn[i] << PAGE_SHIFT;
  646. dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
  647. if (!dma)
  648. goto unpin_exit;
  649. vfio_unpin_page_external(dma, iova, do_accounting);
  650. }
  651. unpin_exit:
  652. mutex_unlock(&iommu->lock);
  653. return i > npage ? npage : (i > 0 ? i : -EINVAL);
  654. }
  655. static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
  656. struct list_head *regions,
  657. struct iommu_iotlb_gather *iotlb_gather)
  658. {
  659. long unlocked = 0;
  660. struct vfio_regions *entry, *next;
  661. iommu_iotlb_sync(domain->domain, iotlb_gather);
  662. list_for_each_entry_safe(entry, next, regions, list) {
  663. unlocked += vfio_unpin_pages_remote(dma,
  664. entry->iova,
  665. entry->phys >> PAGE_SHIFT,
  666. entry->len >> PAGE_SHIFT,
  667. false);
  668. list_del(&entry->list);
  669. kfree(entry);
  670. }
  671. cond_resched();
  672. return unlocked;
  673. }
  674. /*
  675. * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
  676. * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
  677. * of these regions (currently using a list).
  678. *
  679. * This value specifies maximum number of regions for each IOTLB flush sync.
  680. */
  681. #define VFIO_IOMMU_TLB_SYNC_MAX 512
  682. static size_t unmap_unpin_fast(struct vfio_domain *domain,
  683. struct vfio_dma *dma, dma_addr_t *iova,
  684. size_t len, phys_addr_t phys, long *unlocked,
  685. struct list_head *unmapped_list,
  686. int *unmapped_cnt,
  687. struct iommu_iotlb_gather *iotlb_gather)
  688. {
  689. size_t unmapped = 0;
  690. struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  691. if (entry) {
  692. unmapped = iommu_unmap_fast(domain->domain, *iova, len,
  693. iotlb_gather);
  694. if (!unmapped) {
  695. kfree(entry);
  696. } else {
  697. entry->iova = *iova;
  698. entry->phys = phys;
  699. entry->len = unmapped;
  700. list_add_tail(&entry->list, unmapped_list);
  701. *iova += unmapped;
  702. (*unmapped_cnt)++;
  703. }
  704. }
  705. /*
  706. * Sync if the number of fast-unmap regions hits the limit
  707. * or in case of errors.
  708. */
  709. if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
  710. *unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
  711. iotlb_gather);
  712. *unmapped_cnt = 0;
  713. }
  714. return unmapped;
  715. }
  716. static size_t unmap_unpin_slow(struct vfio_domain *domain,
  717. struct vfio_dma *dma, dma_addr_t *iova,
  718. size_t len, phys_addr_t phys,
  719. long *unlocked)
  720. {
  721. size_t unmapped = iommu_unmap(domain->domain, *iova, len);
  722. if (unmapped) {
  723. *unlocked += vfio_unpin_pages_remote(dma, *iova,
  724. phys >> PAGE_SHIFT,
  725. unmapped >> PAGE_SHIFT,
  726. false);
  727. *iova += unmapped;
  728. cond_resched();
  729. }
  730. return unmapped;
  731. }
  732. static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
  733. bool do_accounting)
  734. {
  735. dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
  736. struct vfio_domain *domain, *d;
  737. LIST_HEAD(unmapped_region_list);
  738. struct iommu_iotlb_gather iotlb_gather;
  739. int unmapped_region_cnt = 0;
  740. long unlocked = 0;
  741. if (!dma->size)
  742. return 0;
  743. if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
  744. return 0;
  745. /*
  746. * We use the IOMMU to track the physical addresses, otherwise we'd
  747. * need a much more complicated tracking system. Unfortunately that
  748. * means we need to use one of the iommu domains to figure out the
  749. * pfns to unpin. The rest need to be unmapped in advance so we have
  750. * no iommu translations remaining when the pages are unpinned.
  751. */
  752. domain = d = list_first_entry(&iommu->domain_list,
  753. struct vfio_domain, next);
  754. list_for_each_entry_continue(d, &iommu->domain_list, next) {
  755. iommu_unmap(d->domain, dma->iova, dma->size);
  756. cond_resched();
  757. }
  758. iommu_iotlb_gather_init(&iotlb_gather);
  759. while (iova < end) {
  760. size_t unmapped, len;
  761. phys_addr_t phys, next;
  762. phys = iommu_iova_to_phys(domain->domain, iova);
  763. if (WARN_ON(!phys)) {
  764. iova += PAGE_SIZE;
  765. continue;
  766. }
  767. /*
  768. * To optimize for fewer iommu_unmap() calls, each of which
  769. * may require hardware cache flushing, try to find the
  770. * largest contiguous physical memory chunk to unmap.
  771. */
  772. for (len = PAGE_SIZE;
  773. !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
  774. next = iommu_iova_to_phys(domain->domain, iova + len);
  775. if (next != phys + len)
  776. break;
  777. }
  778. /*
  779. * First, try to use fast unmap/unpin. In case of failure,
  780. * switch to slow unmap/unpin path.
  781. */
  782. unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
  783. &unlocked, &unmapped_region_list,
  784. &unmapped_region_cnt,
  785. &iotlb_gather);
  786. if (!unmapped) {
  787. unmapped = unmap_unpin_slow(domain, dma, &iova, len,
  788. phys, &unlocked);
  789. if (WARN_ON(!unmapped))
  790. break;
  791. }
  792. }
  793. dma->iommu_mapped = false;
  794. if (unmapped_region_cnt) {
  795. unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
  796. &iotlb_gather);
  797. }
  798. if (do_accounting) {
  799. vfio_lock_acct(dma, -unlocked, true);
  800. return 0;
  801. }
  802. return unlocked;
  803. }
  804. static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
  805. {
  806. WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
  807. vfio_unmap_unpin(iommu, dma, true);
  808. vfio_unlink_dma(iommu, dma);
  809. put_task_struct(dma->task);
  810. vfio_dma_bitmap_free(dma);
  811. kfree(dma);
  812. iommu->dma_avail++;
  813. }
  814. static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
  815. {
  816. struct vfio_domain *domain;
  817. iommu->pgsize_bitmap = ULONG_MAX;
  818. list_for_each_entry(domain, &iommu->domain_list, next)
  819. iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
  820. /*
  821. * In case the IOMMU supports page sizes smaller than PAGE_SIZE
  822. * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
  823. * That way the user will be able to map/unmap buffers whose size/
  824. * start address is aligned with PAGE_SIZE. Pinning code uses that
  825. * granularity while iommu driver can use the sub-PAGE_SIZE size
  826. * to map the buffer.
  827. */
  828. if (iommu->pgsize_bitmap & ~PAGE_MASK) {
  829. iommu->pgsize_bitmap &= PAGE_MASK;
  830. iommu->pgsize_bitmap |= PAGE_SIZE;
  831. }
  832. }
  833. static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
  834. struct vfio_dma *dma, dma_addr_t base_iova,
  835. size_t pgsize)
  836. {
  837. unsigned long pgshift = __ffs(pgsize);
  838. unsigned long nbits = dma->size >> pgshift;
  839. unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
  840. unsigned long copy_offset = bit_offset / BITS_PER_LONG;
  841. unsigned long shift = bit_offset % BITS_PER_LONG;
  842. unsigned long leftover;
  843. /*
  844. * mark all pages dirty if any IOMMU capable device is not able
  845. * to report dirty pages and all pages are pinned and mapped.
  846. */
  847. if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
  848. bitmap_set(dma->bitmap, 0, nbits);
  849. if (shift) {
  850. bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
  851. nbits + shift);
  852. if (copy_from_user(&leftover,
  853. (void __user *)(bitmap + copy_offset),
  854. sizeof(leftover)))
  855. return -EFAULT;
  856. bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
  857. }
  858. if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
  859. DIRTY_BITMAP_BYTES(nbits + shift)))
  860. return -EFAULT;
  861. return 0;
  862. }
  863. static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
  864. dma_addr_t iova, size_t size, size_t pgsize)
  865. {
  866. struct vfio_dma *dma;
  867. struct rb_node *n;
  868. unsigned long pgshift = __ffs(pgsize);
  869. int ret;
  870. /*
  871. * GET_BITMAP request must fully cover vfio_dma mappings. Multiple
  872. * vfio_dma mappings may be clubbed by specifying large ranges, but
  873. * there must not be any previous mappings bisected by the range.
  874. * An error will be returned if these conditions are not met.
  875. */
  876. dma = vfio_find_dma(iommu, iova, 1);
  877. if (dma && dma->iova != iova)
  878. return -EINVAL;
  879. dma = vfio_find_dma(iommu, iova + size - 1, 0);
  880. if (dma && dma->iova + dma->size != iova + size)
  881. return -EINVAL;
  882. for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
  883. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  884. if (dma->iova < iova)
  885. continue;
  886. if (dma->iova > iova + size - 1)
  887. break;
  888. ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
  889. if (ret)
  890. return ret;
  891. /*
  892. * Re-populate bitmap to include all pinned pages which are
  893. * considered as dirty but exclude pages which are unpinned and
  894. * pages which are marked dirty by vfio_dma_rw()
  895. */
  896. bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
  897. vfio_dma_populate_bitmap(dma, pgsize);
  898. }
  899. return 0;
  900. }
  901. static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
  902. {
  903. if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
  904. (bitmap_size < DIRTY_BITMAP_BYTES(npages)))
  905. return -EINVAL;
  906. return 0;
  907. }
  908. static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
  909. struct vfio_iommu_type1_dma_unmap *unmap,
  910. struct vfio_bitmap *bitmap)
  911. {
  912. struct vfio_dma *dma, *dma_last = NULL;
  913. size_t unmapped = 0, pgsize;
  914. int ret = 0, retries = 0;
  915. unsigned long pgshift;
  916. mutex_lock(&iommu->lock);
  917. pgshift = __ffs(iommu->pgsize_bitmap);
  918. pgsize = (size_t)1 << pgshift;
  919. if (unmap->iova & (pgsize - 1)) {
  920. ret = -EINVAL;
  921. goto unlock;
  922. }
  923. if (!unmap->size || unmap->size & (pgsize - 1)) {
  924. ret = -EINVAL;
  925. goto unlock;
  926. }
  927. if (unmap->iova + unmap->size - 1 < unmap->iova ||
  928. unmap->size > SIZE_MAX) {
  929. ret = -EINVAL;
  930. goto unlock;
  931. }
  932. /* When dirty tracking is enabled, allow only min supported pgsize */
  933. if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
  934. (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
  935. ret = -EINVAL;
  936. goto unlock;
  937. }
  938. WARN_ON((pgsize - 1) & PAGE_MASK);
  939. again:
  940. /*
  941. * vfio-iommu-type1 (v1) - User mappings were coalesced together to
  942. * avoid tracking individual mappings. This means that the granularity
  943. * of the original mapping was lost and the user was allowed to attempt
  944. * to unmap any range. Depending on the contiguousness of physical
  945. * memory and page sizes supported by the IOMMU, arbitrary unmaps may
  946. * or may not have worked. We only guaranteed unmap granularity
  947. * matching the original mapping; even though it was untracked here,
  948. * the original mappings are reflected in IOMMU mappings. This
  949. * resulted in a couple unusual behaviors. First, if a range is not
  950. * able to be unmapped, ex. a set of 4k pages that was mapped as a
  951. * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
  952. * a zero sized unmap. Also, if an unmap request overlaps the first
  953. * address of a hugepage, the IOMMU will unmap the entire hugepage.
  954. * This also returns success and the returned unmap size reflects the
  955. * actual size unmapped.
  956. *
  957. * We attempt to maintain compatibility with this "v1" interface, but
  958. * we take control out of the hands of the IOMMU. Therefore, an unmap
  959. * request offset from the beginning of the original mapping will
  960. * return success with zero sized unmap. And an unmap request covering
  961. * the first iova of mapping will unmap the entire range.
  962. *
  963. * The v2 version of this interface intends to be more deterministic.
  964. * Unmap requests must fully cover previous mappings. Multiple
  965. * mappings may still be unmaped by specifying large ranges, but there
  966. * must not be any previous mappings bisected by the range. An error
  967. * will be returned if these conditions are not met. The v2 interface
  968. * will only return success and a size of zero if there were no
  969. * mappings within the range.
  970. */
  971. if (iommu->v2) {
  972. dma = vfio_find_dma(iommu, unmap->iova, 1);
  973. if (dma && dma->iova != unmap->iova) {
  974. ret = -EINVAL;
  975. goto unlock;
  976. }
  977. dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
  978. if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
  979. ret = -EINVAL;
  980. goto unlock;
  981. }
  982. }
  983. while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
  984. if (!iommu->v2 && unmap->iova > dma->iova)
  985. break;
  986. /*
  987. * Task with same address space who mapped this iova range is
  988. * allowed to unmap the iova range.
  989. */
  990. if (dma->task->mm != current->mm)
  991. break;
  992. if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
  993. struct vfio_iommu_type1_dma_unmap nb_unmap;
  994. if (dma_last == dma) {
  995. BUG_ON(++retries > 10);
  996. } else {
  997. dma_last = dma;
  998. retries = 0;
  999. }
  1000. nb_unmap.iova = dma->iova;
  1001. nb_unmap.size = dma->size;
  1002. /*
  1003. * Notify anyone (mdev vendor drivers) to invalidate and
  1004. * unmap iovas within the range we're about to unmap.
  1005. * Vendor drivers MUST unpin pages in response to an
  1006. * invalidation.
  1007. */
  1008. mutex_unlock(&iommu->lock);
  1009. blocking_notifier_call_chain(&iommu->notifier,
  1010. VFIO_IOMMU_NOTIFY_DMA_UNMAP,
  1011. &nb_unmap);
  1012. mutex_lock(&iommu->lock);
  1013. goto again;
  1014. }
  1015. if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
  1016. ret = update_user_bitmap(bitmap->data, iommu, dma,
  1017. unmap->iova, pgsize);
  1018. if (ret)
  1019. break;
  1020. }
  1021. unmapped += dma->size;
  1022. vfio_remove_dma(iommu, dma);
  1023. }
  1024. unlock:
  1025. mutex_unlock(&iommu->lock);
  1026. /* Report how much was unmapped */
  1027. unmap->size = unmapped;
  1028. return ret;
  1029. }
  1030. static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
  1031. unsigned long pfn, long npage, int prot)
  1032. {
  1033. struct vfio_domain *d;
  1034. int ret;
  1035. list_for_each_entry(d, &iommu->domain_list, next) {
  1036. ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
  1037. npage << PAGE_SHIFT, prot | d->prot);
  1038. if (ret)
  1039. goto unwind;
  1040. cond_resched();
  1041. }
  1042. return 0;
  1043. unwind:
  1044. list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
  1045. iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
  1046. cond_resched();
  1047. }
  1048. return ret;
  1049. }
  1050. static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
  1051. size_t map_size)
  1052. {
  1053. dma_addr_t iova = dma->iova;
  1054. unsigned long vaddr = dma->vaddr;
  1055. size_t size = map_size;
  1056. long npage;
  1057. unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  1058. int ret = 0;
  1059. while (size) {
  1060. /* Pin a contiguous chunk of memory */
  1061. npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
  1062. size >> PAGE_SHIFT, &pfn, limit);
  1063. if (npage <= 0) {
  1064. WARN_ON(!npage);
  1065. ret = (int)npage;
  1066. break;
  1067. }
  1068. /* Map it! */
  1069. ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
  1070. dma->prot);
  1071. if (ret) {
  1072. vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
  1073. npage, true);
  1074. break;
  1075. }
  1076. size -= npage << PAGE_SHIFT;
  1077. dma->size += npage << PAGE_SHIFT;
  1078. }
  1079. dma->iommu_mapped = true;
  1080. if (ret)
  1081. vfio_remove_dma(iommu, dma);
  1082. return ret;
  1083. }
  1084. /*
  1085. * Check dma map request is within a valid iova range
  1086. */
  1087. static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
  1088. dma_addr_t start, dma_addr_t end)
  1089. {
  1090. struct list_head *iova = &iommu->iova_list;
  1091. struct vfio_iova *node;
  1092. list_for_each_entry(node, iova, list) {
  1093. if (start >= node->start && end <= node->end)
  1094. return true;
  1095. }
  1096. /*
  1097. * Check for list_empty() as well since a container with
  1098. * a single mdev device will have an empty list.
  1099. */
  1100. return list_empty(iova);
  1101. }
  1102. static int vfio_dma_do_map(struct vfio_iommu *iommu,
  1103. struct vfio_iommu_type1_dma_map *map)
  1104. {
  1105. dma_addr_t iova = map->iova;
  1106. unsigned long vaddr = map->vaddr;
  1107. size_t size = map->size;
  1108. int ret = 0, prot = 0;
  1109. size_t pgsize;
  1110. struct vfio_dma *dma;
  1111. /* Verify that none of our __u64 fields overflow */
  1112. if (map->size != size || map->vaddr != vaddr || map->iova != iova)
  1113. return -EINVAL;
  1114. /* READ/WRITE from device perspective */
  1115. if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
  1116. prot |= IOMMU_WRITE;
  1117. if (map->flags & VFIO_DMA_MAP_FLAG_READ)
  1118. prot |= IOMMU_READ;
  1119. mutex_lock(&iommu->lock);
  1120. pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
  1121. WARN_ON((pgsize - 1) & PAGE_MASK);
  1122. if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
  1123. ret = -EINVAL;
  1124. goto out_unlock;
  1125. }
  1126. /* Don't allow IOVA or virtual address wrap */
  1127. if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
  1128. ret = -EINVAL;
  1129. goto out_unlock;
  1130. }
  1131. if (vfio_find_dma(iommu, iova, size)) {
  1132. ret = -EEXIST;
  1133. goto out_unlock;
  1134. }
  1135. if (!iommu->dma_avail) {
  1136. ret = -ENOSPC;
  1137. goto out_unlock;
  1138. }
  1139. if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
  1140. ret = -EINVAL;
  1141. goto out_unlock;
  1142. }
  1143. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  1144. if (!dma) {
  1145. ret = -ENOMEM;
  1146. goto out_unlock;
  1147. }
  1148. iommu->dma_avail--;
  1149. dma->iova = iova;
  1150. dma->vaddr = vaddr;
  1151. dma->prot = prot;
  1152. /*
  1153. * We need to be able to both add to a task's locked memory and test
  1154. * against the locked memory limit and we need to be able to do both
  1155. * outside of this call path as pinning can be asynchronous via the
  1156. * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
  1157. * task_struct and VM locked pages requires an mm_struct, however
  1158. * holding an indefinite mm reference is not recommended, therefore we
  1159. * only hold a reference to a task. We could hold a reference to
  1160. * current, however QEMU uses this call path through vCPU threads,
  1161. * which can be killed resulting in a NULL mm and failure in the unmap
  1162. * path when called via a different thread. Avoid this problem by
  1163. * using the group_leader as threads within the same group require
  1164. * both CLONE_THREAD and CLONE_VM and will therefore use the same
  1165. * mm_struct.
  1166. *
  1167. * Previously we also used the task for testing CAP_IPC_LOCK at the
  1168. * time of pinning and accounting, however has_capability() makes use
  1169. * of real_cred, a copy-on-write field, so we can't guarantee that it
  1170. * matches group_leader, or in fact that it might not change by the
  1171. * time it's evaluated. If a process were to call MAP_DMA with
  1172. * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
  1173. * possibly see different results for an iommu_mapped vfio_dma vs
  1174. * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
  1175. * time of calling MAP_DMA.
  1176. */
  1177. get_task_struct(current->group_leader);
  1178. dma->task = current->group_leader;
  1179. dma->lock_cap = capable(CAP_IPC_LOCK);
  1180. dma->pfn_list = RB_ROOT;
  1181. /* Insert zero-sized and grow as we map chunks of it */
  1182. vfio_link_dma(iommu, dma);
  1183. /* Don't pin and map if container doesn't contain IOMMU capable domain*/
  1184. if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
  1185. dma->size = size;
  1186. else
  1187. ret = vfio_pin_map_dma(iommu, dma, size);
  1188. if (!ret && iommu->dirty_page_tracking) {
  1189. ret = vfio_dma_bitmap_alloc(dma, pgsize);
  1190. if (ret)
  1191. vfio_remove_dma(iommu, dma);
  1192. }
  1193. out_unlock:
  1194. mutex_unlock(&iommu->lock);
  1195. return ret;
  1196. }
  1197. static int vfio_bus_type(struct device *dev, void *data)
  1198. {
  1199. struct bus_type **bus = data;
  1200. if (*bus && *bus != dev->bus)
  1201. return -EINVAL;
  1202. *bus = dev->bus;
  1203. return 0;
  1204. }
  1205. static int vfio_iommu_replay(struct vfio_iommu *iommu,
  1206. struct vfio_domain *domain)
  1207. {
  1208. struct vfio_domain *d = NULL;
  1209. struct rb_node *n;
  1210. unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  1211. int ret;
  1212. /* Arbitrarily pick the first domain in the list for lookups */
  1213. if (!list_empty(&iommu->domain_list))
  1214. d = list_first_entry(&iommu->domain_list,
  1215. struct vfio_domain, next);
  1216. n = rb_first(&iommu->dma_list);
  1217. for (; n; n = rb_next(n)) {
  1218. struct vfio_dma *dma;
  1219. dma_addr_t iova;
  1220. dma = rb_entry(n, struct vfio_dma, node);
  1221. iova = dma->iova;
  1222. while (iova < dma->iova + dma->size) {
  1223. phys_addr_t phys;
  1224. size_t size;
  1225. if (dma->iommu_mapped) {
  1226. phys_addr_t p;
  1227. dma_addr_t i;
  1228. if (WARN_ON(!d)) { /* mapped w/o a domain?! */
  1229. ret = -EINVAL;
  1230. goto unwind;
  1231. }
  1232. phys = iommu_iova_to_phys(d->domain, iova);
  1233. if (WARN_ON(!phys)) {
  1234. iova += PAGE_SIZE;
  1235. continue;
  1236. }
  1237. size = PAGE_SIZE;
  1238. p = phys + size;
  1239. i = iova + size;
  1240. while (i < dma->iova + dma->size &&
  1241. p == iommu_iova_to_phys(d->domain, i)) {
  1242. size += PAGE_SIZE;
  1243. p += PAGE_SIZE;
  1244. i += PAGE_SIZE;
  1245. }
  1246. } else {
  1247. unsigned long pfn;
  1248. unsigned long vaddr = dma->vaddr +
  1249. (iova - dma->iova);
  1250. size_t n = dma->iova + dma->size - iova;
  1251. long npage;
  1252. npage = vfio_pin_pages_remote(dma, vaddr,
  1253. n >> PAGE_SHIFT,
  1254. &pfn, limit);
  1255. if (npage <= 0) {
  1256. WARN_ON(!npage);
  1257. ret = (int)npage;
  1258. goto unwind;
  1259. }
  1260. phys = pfn << PAGE_SHIFT;
  1261. size = npage << PAGE_SHIFT;
  1262. }
  1263. ret = iommu_map(domain->domain, iova, phys,
  1264. size, dma->prot | domain->prot);
  1265. if (ret) {
  1266. if (!dma->iommu_mapped)
  1267. vfio_unpin_pages_remote(dma, iova,
  1268. phys >> PAGE_SHIFT,
  1269. size >> PAGE_SHIFT,
  1270. true);
  1271. goto unwind;
  1272. }
  1273. iova += size;
  1274. }
  1275. }
  1276. /* All dmas are now mapped, defer to second tree walk for unwind */
  1277. for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
  1278. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  1279. dma->iommu_mapped = true;
  1280. }
  1281. return 0;
  1282. unwind:
  1283. for (; n; n = rb_prev(n)) {
  1284. struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
  1285. dma_addr_t iova;
  1286. if (dma->iommu_mapped) {
  1287. iommu_unmap(domain->domain, dma->iova, dma->size);
  1288. continue;
  1289. }
  1290. iova = dma->iova;
  1291. while (iova < dma->iova + dma->size) {
  1292. phys_addr_t phys, p;
  1293. size_t size;
  1294. dma_addr_t i;
  1295. phys = iommu_iova_to_phys(domain->domain, iova);
  1296. if (!phys) {
  1297. iova += PAGE_SIZE;
  1298. continue;
  1299. }
  1300. size = PAGE_SIZE;
  1301. p = phys + size;
  1302. i = iova + size;
  1303. while (i < dma->iova + dma->size &&
  1304. p == iommu_iova_to_phys(domain->domain, i)) {
  1305. size += PAGE_SIZE;
  1306. p += PAGE_SIZE;
  1307. i += PAGE_SIZE;
  1308. }
  1309. iommu_unmap(domain->domain, iova, size);
  1310. vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
  1311. size >> PAGE_SHIFT, true);
  1312. }
  1313. }
  1314. return ret;
  1315. }
  1316. /*
  1317. * We change our unmap behavior slightly depending on whether the IOMMU
  1318. * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
  1319. * for practically any contiguous power-of-two mapping we give it. This means
  1320. * we don't need to look for contiguous chunks ourselves to make unmapping
  1321. * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
  1322. * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
  1323. * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
  1324. * hugetlbfs is in use.
  1325. */
  1326. static void vfio_test_domain_fgsp(struct vfio_domain *domain)
  1327. {
  1328. struct page *pages;
  1329. int ret, order = get_order(PAGE_SIZE * 2);
  1330. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  1331. if (!pages)
  1332. return;
  1333. ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
  1334. IOMMU_READ | IOMMU_WRITE | domain->prot);
  1335. if (!ret) {
  1336. size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
  1337. if (unmapped == PAGE_SIZE)
  1338. iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
  1339. else
  1340. domain->fgsp = true;
  1341. }
  1342. __free_pages(pages, order);
  1343. }
  1344. static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
  1345. struct iommu_group *iommu_group)
  1346. {
  1347. struct vfio_group *g;
  1348. list_for_each_entry(g, &domain->group_list, next) {
  1349. if (g->iommu_group == iommu_group)
  1350. return g;
  1351. }
  1352. return NULL;
  1353. }
  1354. static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
  1355. struct iommu_group *iommu_group)
  1356. {
  1357. struct vfio_domain *domain;
  1358. struct vfio_group *group = NULL;
  1359. list_for_each_entry(domain, &iommu->domain_list, next) {
  1360. group = find_iommu_group(domain, iommu_group);
  1361. if (group)
  1362. return group;
  1363. }
  1364. if (iommu->external_domain)
  1365. group = find_iommu_group(iommu->external_domain, iommu_group);
  1366. return group;
  1367. }
  1368. static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
  1369. {
  1370. struct vfio_domain *domain;
  1371. struct vfio_group *group;
  1372. list_for_each_entry(domain, &iommu->domain_list, next) {
  1373. list_for_each_entry(group, &domain->group_list, next) {
  1374. if (!group->pinned_page_dirty_scope) {
  1375. iommu->pinned_page_dirty_scope = false;
  1376. return;
  1377. }
  1378. }
  1379. }
  1380. if (iommu->external_domain) {
  1381. domain = iommu->external_domain;
  1382. list_for_each_entry(group, &domain->group_list, next) {
  1383. if (!group->pinned_page_dirty_scope) {
  1384. iommu->pinned_page_dirty_scope = false;
  1385. return;
  1386. }
  1387. }
  1388. }
  1389. iommu->pinned_page_dirty_scope = true;
  1390. }
  1391. static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
  1392. phys_addr_t *base)
  1393. {
  1394. struct iommu_resv_region *region;
  1395. bool ret = false;
  1396. list_for_each_entry(region, group_resv_regions, list) {
  1397. /*
  1398. * The presence of any 'real' MSI regions should take
  1399. * precedence over the software-managed one if the
  1400. * IOMMU driver happens to advertise both types.
  1401. */
  1402. if (region->type == IOMMU_RESV_MSI) {
  1403. ret = false;
  1404. break;
  1405. }
  1406. if (region->type == IOMMU_RESV_SW_MSI) {
  1407. *base = region->start;
  1408. ret = true;
  1409. }
  1410. }
  1411. return ret;
  1412. }
  1413. static struct device *vfio_mdev_get_iommu_device(struct device *dev)
  1414. {
  1415. struct device *(*fn)(struct device *dev);
  1416. struct device *iommu_device;
  1417. fn = symbol_get(mdev_get_iommu_device);
  1418. if (fn) {
  1419. iommu_device = fn(dev);
  1420. symbol_put(mdev_get_iommu_device);
  1421. return iommu_device;
  1422. }
  1423. return NULL;
  1424. }
  1425. static int vfio_mdev_attach_domain(struct device *dev, void *data)
  1426. {
  1427. struct iommu_domain *domain = data;
  1428. struct device *iommu_device;
  1429. iommu_device = vfio_mdev_get_iommu_device(dev);
  1430. if (iommu_device) {
  1431. if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
  1432. return iommu_aux_attach_device(domain, iommu_device);
  1433. else
  1434. return iommu_attach_device(domain, iommu_device);
  1435. }
  1436. return -EINVAL;
  1437. }
  1438. static int vfio_mdev_detach_domain(struct device *dev, void *data)
  1439. {
  1440. struct iommu_domain *domain = data;
  1441. struct device *iommu_device;
  1442. iommu_device = vfio_mdev_get_iommu_device(dev);
  1443. if (iommu_device) {
  1444. if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
  1445. iommu_aux_detach_device(domain, iommu_device);
  1446. else
  1447. iommu_detach_device(domain, iommu_device);
  1448. }
  1449. return 0;
  1450. }
  1451. static int vfio_iommu_attach_group(struct vfio_domain *domain,
  1452. struct vfio_group *group)
  1453. {
  1454. if (group->mdev_group)
  1455. return iommu_group_for_each_dev(group->iommu_group,
  1456. domain->domain,
  1457. vfio_mdev_attach_domain);
  1458. else
  1459. return iommu_attach_group(domain->domain, group->iommu_group);
  1460. }
  1461. static void vfio_iommu_detach_group(struct vfio_domain *domain,
  1462. struct vfio_group *group)
  1463. {
  1464. if (group->mdev_group)
  1465. iommu_group_for_each_dev(group->iommu_group, domain->domain,
  1466. vfio_mdev_detach_domain);
  1467. else
  1468. iommu_detach_group(domain->domain, group->iommu_group);
  1469. }
  1470. static bool vfio_bus_is_mdev(struct bus_type *bus)
  1471. {
  1472. struct bus_type *mdev_bus;
  1473. bool ret = false;
  1474. mdev_bus = symbol_get(mdev_bus_type);
  1475. if (mdev_bus) {
  1476. ret = (bus == mdev_bus);
  1477. symbol_put(mdev_bus_type);
  1478. }
  1479. return ret;
  1480. }
  1481. static int vfio_mdev_iommu_device(struct device *dev, void *data)
  1482. {
  1483. struct device **old = data, *new;
  1484. new = vfio_mdev_get_iommu_device(dev);
  1485. if (!new || (*old && *old != new))
  1486. return -EINVAL;
  1487. *old = new;
  1488. return 0;
  1489. }
  1490. /*
  1491. * This is a helper function to insert an address range to iova list.
  1492. * The list is initially created with a single entry corresponding to
  1493. * the IOMMU domain geometry to which the device group is attached.
  1494. * The list aperture gets modified when a new domain is added to the
  1495. * container if the new aperture doesn't conflict with the current one
  1496. * or with any existing dma mappings. The list is also modified to
  1497. * exclude any reserved regions associated with the device group.
  1498. */
  1499. static int vfio_iommu_iova_insert(struct list_head *head,
  1500. dma_addr_t start, dma_addr_t end)
  1501. {
  1502. struct vfio_iova *region;
  1503. region = kmalloc(sizeof(*region), GFP_KERNEL);
  1504. if (!region)
  1505. return -ENOMEM;
  1506. INIT_LIST_HEAD(&region->list);
  1507. region->start = start;
  1508. region->end = end;
  1509. list_add_tail(&region->list, head);
  1510. return 0;
  1511. }
  1512. /*
  1513. * Check the new iommu aperture conflicts with existing aper or with any
  1514. * existing dma mappings.
  1515. */
  1516. static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu,
  1517. dma_addr_t start, dma_addr_t end)
  1518. {
  1519. struct vfio_iova *first, *last;
  1520. struct list_head *iova = &iommu->iova_list;
  1521. if (list_empty(iova))
  1522. return false;
  1523. /* Disjoint sets, return conflict */
  1524. first = list_first_entry(iova, struct vfio_iova, list);
  1525. last = list_last_entry(iova, struct vfio_iova, list);
  1526. if (start > last->end || end < first->start)
  1527. return true;
  1528. /* Check for any existing dma mappings below the new start */
  1529. if (start > first->start) {
  1530. if (vfio_find_dma(iommu, first->start, start - first->start))
  1531. return true;
  1532. }
  1533. /* Check for any existing dma mappings beyond the new end */
  1534. if (end < last->end) {
  1535. if (vfio_find_dma(iommu, end + 1, last->end - end))
  1536. return true;
  1537. }
  1538. return false;
  1539. }
  1540. /*
  1541. * Resize iommu iova aperture window. This is called only if the new
  1542. * aperture has no conflict with existing aperture and dma mappings.
  1543. */
  1544. static int vfio_iommu_aper_resize(struct list_head *iova,
  1545. dma_addr_t start, dma_addr_t end)
  1546. {
  1547. struct vfio_iova *node, *next;
  1548. if (list_empty(iova))
  1549. return vfio_iommu_iova_insert(iova, start, end);
  1550. /* Adjust iova list start */
  1551. list_for_each_entry_safe(node, next, iova, list) {
  1552. if (start < node->start)
  1553. break;
  1554. if (start >= node->start && start < node->end) {
  1555. node->start = start;
  1556. break;
  1557. }
  1558. /* Delete nodes before new start */
  1559. list_del(&node->list);
  1560. kfree(node);
  1561. }
  1562. /* Adjust iova list end */
  1563. list_for_each_entry_safe(node, next, iova, list) {
  1564. if (end > node->end)
  1565. continue;
  1566. if (end > node->start && end <= node->end) {
  1567. node->end = end;
  1568. continue;
  1569. }
  1570. /* Delete nodes after new end */
  1571. list_del(&node->list);
  1572. kfree(node);
  1573. }
  1574. return 0;
  1575. }
  1576. /*
  1577. * Check reserved region conflicts with existing dma mappings
  1578. */
  1579. static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,
  1580. struct list_head *resv_regions)
  1581. {
  1582. struct iommu_resv_region *region;
  1583. /* Check for conflict with existing dma mappings */
  1584. list_for_each_entry(region, resv_regions, list) {
  1585. if (region->type == IOMMU_RESV_DIRECT_RELAXABLE)
  1586. continue;
  1587. if (vfio_find_dma(iommu, region->start, region->length))
  1588. return true;
  1589. }
  1590. return false;
  1591. }
  1592. /*
  1593. * Check iova region overlap with reserved regions and
  1594. * exclude them from the iommu iova range
  1595. */
  1596. static int vfio_iommu_resv_exclude(struct list_head *iova,
  1597. struct list_head *resv_regions)
  1598. {
  1599. struct iommu_resv_region *resv;
  1600. struct vfio_iova *n, *next;
  1601. list_for_each_entry(resv, resv_regions, list) {
  1602. phys_addr_t start, end;
  1603. if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
  1604. continue;
  1605. start = resv->start;
  1606. end = resv->start + resv->length - 1;
  1607. list_for_each_entry_safe(n, next, iova, list) {
  1608. int ret = 0;
  1609. /* No overlap */
  1610. if (start > n->end || end < n->start)
  1611. continue;
  1612. /*
  1613. * Insert a new node if current node overlaps with the
  1614. * reserve region to exlude that from valid iova range.
  1615. * Note that, new node is inserted before the current
  1616. * node and finally the current node is deleted keeping
  1617. * the list updated and sorted.
  1618. */
  1619. if (start > n->start)
  1620. ret = vfio_iommu_iova_insert(&n->list, n->start,
  1621. start - 1);
  1622. if (!ret && end < n->end)
  1623. ret = vfio_iommu_iova_insert(&n->list, end + 1,
  1624. n->end);
  1625. if (ret)
  1626. return ret;
  1627. list_del(&n->list);
  1628. kfree(n);
  1629. }
  1630. }
  1631. if (list_empty(iova))
  1632. return -EINVAL;
  1633. return 0;
  1634. }
  1635. static void vfio_iommu_resv_free(struct list_head *resv_regions)
  1636. {
  1637. struct iommu_resv_region *n, *next;
  1638. list_for_each_entry_safe(n, next, resv_regions, list) {
  1639. list_del(&n->list);
  1640. kfree(n);
  1641. }
  1642. }
  1643. static void vfio_iommu_iova_free(struct list_head *iova)
  1644. {
  1645. struct vfio_iova *n, *next;
  1646. list_for_each_entry_safe(n, next, iova, list) {
  1647. list_del(&n->list);
  1648. kfree(n);
  1649. }
  1650. }
  1651. static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu,
  1652. struct list_head *iova_copy)
  1653. {
  1654. struct list_head *iova = &iommu->iova_list;
  1655. struct vfio_iova *n;
  1656. int ret;
  1657. list_for_each_entry(n, iova, list) {
  1658. ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end);
  1659. if (ret)
  1660. goto out_free;
  1661. }
  1662. return 0;
  1663. out_free:
  1664. vfio_iommu_iova_free(iova_copy);
  1665. return ret;
  1666. }
  1667. static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
  1668. struct list_head *iova_copy)
  1669. {
  1670. struct list_head *iova = &iommu->iova_list;
  1671. vfio_iommu_iova_free(iova);
  1672. list_splice_tail(iova_copy, iova);
  1673. }
  1674. static int vfio_iommu_type1_attach_group(void *iommu_data,
  1675. struct iommu_group *iommu_group)
  1676. {
  1677. struct vfio_iommu *iommu = iommu_data;
  1678. struct vfio_group *group;
  1679. struct vfio_domain *domain, *d;
  1680. struct bus_type *bus = NULL;
  1681. int ret;
  1682. bool resv_msi, msi_remap;
  1683. phys_addr_t resv_msi_base = 0;
  1684. struct iommu_domain_geometry geo;
  1685. LIST_HEAD(iova_copy);
  1686. LIST_HEAD(group_resv_regions);
  1687. mutex_lock(&iommu->lock);
  1688. /* Check for duplicates */
  1689. if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
  1690. mutex_unlock(&iommu->lock);
  1691. return -EINVAL;
  1692. }
  1693. group = kzalloc(sizeof(*group), GFP_KERNEL);
  1694. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  1695. if (!group || !domain) {
  1696. ret = -ENOMEM;
  1697. goto out_free;
  1698. }
  1699. group->iommu_group = iommu_group;
  1700. /* Determine bus_type in order to allocate a domain */
  1701. ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
  1702. if (ret)
  1703. goto out_free;
  1704. if (vfio_bus_is_mdev(bus)) {
  1705. struct device *iommu_device = NULL;
  1706. group->mdev_group = true;
  1707. /* Determine the isolation type */
  1708. ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
  1709. vfio_mdev_iommu_device);
  1710. if (ret || !iommu_device) {
  1711. if (!iommu->external_domain) {
  1712. INIT_LIST_HEAD(&domain->group_list);
  1713. iommu->external_domain = domain;
  1714. vfio_update_pgsize_bitmap(iommu);
  1715. } else {
  1716. kfree(domain);
  1717. }
  1718. list_add(&group->next,
  1719. &iommu->external_domain->group_list);
  1720. /*
  1721. * Non-iommu backed group cannot dirty memory directly,
  1722. * it can only use interfaces that provide dirty
  1723. * tracking.
  1724. * The iommu scope can only be promoted with the
  1725. * addition of a dirty tracking group.
  1726. */
  1727. group->pinned_page_dirty_scope = true;
  1728. if (!iommu->pinned_page_dirty_scope)
  1729. update_pinned_page_dirty_scope(iommu);
  1730. mutex_unlock(&iommu->lock);
  1731. return 0;
  1732. }
  1733. bus = iommu_device->bus;
  1734. }
  1735. domain->domain = iommu_domain_alloc(bus);
  1736. if (!domain->domain) {
  1737. ret = -EIO;
  1738. goto out_free;
  1739. }
  1740. if (iommu->nesting) {
  1741. int attr = 1;
  1742. ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
  1743. &attr);
  1744. if (ret)
  1745. goto out_domain;
  1746. }
  1747. ret = vfio_iommu_attach_group(domain, group);
  1748. if (ret)
  1749. goto out_domain;
  1750. /* Get aperture info */
  1751. iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo);
  1752. if (vfio_iommu_aper_conflict(iommu, geo.aperture_start,
  1753. geo.aperture_end)) {
  1754. ret = -EINVAL;
  1755. goto out_detach;
  1756. }
  1757. ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions);
  1758. if (ret)
  1759. goto out_detach;
  1760. if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
  1761. ret = -EINVAL;
  1762. goto out_detach;
  1763. }
  1764. /*
  1765. * We don't want to work on the original iova list as the list
  1766. * gets modified and in case of failure we have to retain the
  1767. * original list. Get a copy here.
  1768. */
  1769. ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
  1770. if (ret)
  1771. goto out_detach;
  1772. ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start,
  1773. geo.aperture_end);
  1774. if (ret)
  1775. goto out_detach;
  1776. ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);
  1777. if (ret)
  1778. goto out_detach;
  1779. resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base);
  1780. INIT_LIST_HEAD(&domain->group_list);
  1781. list_add(&group->next, &domain->group_list);
  1782. msi_remap = irq_domain_check_msi_remap() ||
  1783. iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
  1784. if (!allow_unsafe_interrupts && !msi_remap) {
  1785. pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
  1786. __func__);
  1787. ret = -EPERM;
  1788. goto out_detach;
  1789. }
  1790. if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
  1791. domain->prot |= IOMMU_CACHE;
  1792. /*
  1793. * Try to match an existing compatible domain. We don't want to
  1794. * preclude an IOMMU driver supporting multiple bus_types and being
  1795. * able to include different bus_types in the same IOMMU domain, so
  1796. * we test whether the domains use the same iommu_ops rather than
  1797. * testing if they're on the same bus_type.
  1798. */
  1799. list_for_each_entry(d, &iommu->domain_list, next) {
  1800. if (d->domain->ops == domain->domain->ops &&
  1801. d->prot == domain->prot) {
  1802. vfio_iommu_detach_group(domain, group);
  1803. if (!vfio_iommu_attach_group(d, group)) {
  1804. list_add(&group->next, &d->group_list);
  1805. iommu_domain_free(domain->domain);
  1806. kfree(domain);
  1807. goto done;
  1808. }
  1809. ret = vfio_iommu_attach_group(domain, group);
  1810. if (ret)
  1811. goto out_domain;
  1812. }
  1813. }
  1814. vfio_test_domain_fgsp(domain);
  1815. /* replay mappings on new domains */
  1816. ret = vfio_iommu_replay(iommu, domain);
  1817. if (ret)
  1818. goto out_detach;
  1819. if (resv_msi) {
  1820. ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
  1821. if (ret && ret != -ENODEV)
  1822. goto out_detach;
  1823. }
  1824. list_add(&domain->next, &iommu->domain_list);
  1825. vfio_update_pgsize_bitmap(iommu);
  1826. done:
  1827. /* Delete the old one and insert new iova list */
  1828. vfio_iommu_iova_insert_copy(iommu, &iova_copy);
  1829. /*
  1830. * An iommu backed group can dirty memory directly and therefore
  1831. * demotes the iommu scope until it declares itself dirty tracking
  1832. * capable via the page pinning interface.
  1833. */
  1834. iommu->pinned_page_dirty_scope = false;
  1835. mutex_unlock(&iommu->lock);
  1836. vfio_iommu_resv_free(&group_resv_regions);
  1837. return 0;
  1838. out_detach:
  1839. vfio_iommu_detach_group(domain, group);
  1840. out_domain:
  1841. iommu_domain_free(domain->domain);
  1842. vfio_iommu_iova_free(&iova_copy);
  1843. vfio_iommu_resv_free(&group_resv_regions);
  1844. out_free:
  1845. kfree(domain);
  1846. kfree(group);
  1847. mutex_unlock(&iommu->lock);
  1848. return ret;
  1849. }
  1850. static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
  1851. {
  1852. struct rb_node *node;
  1853. while ((node = rb_first(&iommu->dma_list)))
  1854. vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
  1855. }
  1856. static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
  1857. {
  1858. struct rb_node *n, *p;
  1859. n = rb_first(&iommu->dma_list);
  1860. for (; n; n = rb_next(n)) {
  1861. struct vfio_dma *dma;
  1862. long locked = 0, unlocked = 0;
  1863. dma = rb_entry(n, struct vfio_dma, node);
  1864. unlocked += vfio_unmap_unpin(iommu, dma, false);
  1865. p = rb_first(&dma->pfn_list);
  1866. for (; p; p = rb_next(p)) {
  1867. struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
  1868. node);
  1869. if (!is_invalid_reserved_pfn(vpfn->pfn))
  1870. locked++;
  1871. }
  1872. vfio_lock_acct(dma, locked - unlocked, true);
  1873. }
  1874. }
  1875. /*
  1876. * Called when a domain is removed in detach. It is possible that
  1877. * the removed domain decided the iova aperture window. Modify the
  1878. * iova aperture with the smallest window among existing domains.
  1879. */
  1880. static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
  1881. struct list_head *iova_copy)
  1882. {
  1883. struct vfio_domain *domain;
  1884. struct iommu_domain_geometry geo;
  1885. struct vfio_iova *node;
  1886. dma_addr_t start = 0;
  1887. dma_addr_t end = (dma_addr_t)~0;
  1888. if (list_empty(iova_copy))
  1889. return;
  1890. list_for_each_entry(domain, &iommu->domain_list, next) {
  1891. iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,
  1892. &geo);
  1893. if (geo.aperture_start > start)
  1894. start = geo.aperture_start;
  1895. if (geo.aperture_end < end)
  1896. end = geo.aperture_end;
  1897. }
  1898. /* Modify aperture limits. The new aper is either same or bigger */
  1899. node = list_first_entry(iova_copy, struct vfio_iova, list);
  1900. node->start = start;
  1901. node = list_last_entry(iova_copy, struct vfio_iova, list);
  1902. node->end = end;
  1903. }
  1904. /*
  1905. * Called when a group is detached. The reserved regions for that
  1906. * group can be part of valid iova now. But since reserved regions
  1907. * may be duplicated among groups, populate the iova valid regions
  1908. * list again.
  1909. */
  1910. static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
  1911. struct list_head *iova_copy)
  1912. {
  1913. struct vfio_domain *d;
  1914. struct vfio_group *g;
  1915. struct vfio_iova *node;
  1916. dma_addr_t start, end;
  1917. LIST_HEAD(resv_regions);
  1918. int ret;
  1919. if (list_empty(iova_copy))
  1920. return -EINVAL;
  1921. list_for_each_entry(d, &iommu->domain_list, next) {
  1922. list_for_each_entry(g, &d->group_list, next) {
  1923. ret = iommu_get_group_resv_regions(g->iommu_group,
  1924. &resv_regions);
  1925. if (ret)
  1926. goto done;
  1927. }
  1928. }
  1929. node = list_first_entry(iova_copy, struct vfio_iova, list);
  1930. start = node->start;
  1931. node = list_last_entry(iova_copy, struct vfio_iova, list);
  1932. end = node->end;
  1933. /* purge the iova list and create new one */
  1934. vfio_iommu_iova_free(iova_copy);
  1935. ret = vfio_iommu_aper_resize(iova_copy, start, end);
  1936. if (ret)
  1937. goto done;
  1938. /* Exclude current reserved regions from iova ranges */
  1939. ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);
  1940. done:
  1941. vfio_iommu_resv_free(&resv_regions);
  1942. return ret;
  1943. }
  1944. static void vfio_iommu_type1_detach_group(void *iommu_data,
  1945. struct iommu_group *iommu_group)
  1946. {
  1947. struct vfio_iommu *iommu = iommu_data;
  1948. struct vfio_domain *domain;
  1949. struct vfio_group *group;
  1950. bool update_dirty_scope = false;
  1951. LIST_HEAD(iova_copy);
  1952. mutex_lock(&iommu->lock);
  1953. if (iommu->external_domain) {
  1954. group = find_iommu_group(iommu->external_domain, iommu_group);
  1955. if (group) {
  1956. update_dirty_scope = !group->pinned_page_dirty_scope;
  1957. list_del(&group->next);
  1958. kfree(group);
  1959. if (list_empty(&iommu->external_domain->group_list)) {
  1960. if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
  1961. WARN_ON(iommu->notifier.head);
  1962. vfio_iommu_unmap_unpin_all(iommu);
  1963. }
  1964. kfree(iommu->external_domain);
  1965. iommu->external_domain = NULL;
  1966. }
  1967. goto detach_group_done;
  1968. }
  1969. }
  1970. /*
  1971. * Get a copy of iova list. This will be used to update
  1972. * and to replace the current one later. Please note that
  1973. * we will leave the original list as it is if update fails.
  1974. */
  1975. vfio_iommu_iova_get_copy(iommu, &iova_copy);
  1976. list_for_each_entry(domain, &iommu->domain_list, next) {
  1977. group = find_iommu_group(domain, iommu_group);
  1978. if (!group)
  1979. continue;
  1980. vfio_iommu_detach_group(domain, group);
  1981. update_dirty_scope = !group->pinned_page_dirty_scope;
  1982. list_del(&group->next);
  1983. kfree(group);
  1984. /*
  1985. * Group ownership provides privilege, if the group list is
  1986. * empty, the domain goes away. If it's the last domain with
  1987. * iommu and external domain doesn't exist, then all the
  1988. * mappings go away too. If it's the last domain with iommu and
  1989. * external domain exist, update accounting
  1990. */
  1991. if (list_empty(&domain->group_list)) {
  1992. if (list_is_singular(&iommu->domain_list)) {
  1993. if (!iommu->external_domain) {
  1994. WARN_ON(iommu->notifier.head);
  1995. vfio_iommu_unmap_unpin_all(iommu);
  1996. } else {
  1997. vfio_iommu_unmap_unpin_reaccount(iommu);
  1998. }
  1999. }
  2000. iommu_domain_free(domain->domain);
  2001. list_del(&domain->next);
  2002. kfree(domain);
  2003. vfio_iommu_aper_expand(iommu, &iova_copy);
  2004. vfio_update_pgsize_bitmap(iommu);
  2005. }
  2006. break;
  2007. }
  2008. if (!vfio_iommu_resv_refresh(iommu, &iova_copy))
  2009. vfio_iommu_iova_insert_copy(iommu, &iova_copy);
  2010. else
  2011. vfio_iommu_iova_free(&iova_copy);
  2012. detach_group_done:
  2013. /*
  2014. * Removal of a group without dirty tracking may allow the iommu scope
  2015. * to be promoted.
  2016. */
  2017. if (update_dirty_scope) {
  2018. update_pinned_page_dirty_scope(iommu);
  2019. if (iommu->dirty_page_tracking)
  2020. vfio_iommu_populate_bitmap_full(iommu);
  2021. }
  2022. mutex_unlock(&iommu->lock);
  2023. }
  2024. static void *vfio_iommu_type1_open(unsigned long arg)
  2025. {
  2026. struct vfio_iommu *iommu;
  2027. iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
  2028. if (!iommu)
  2029. return ERR_PTR(-ENOMEM);
  2030. switch (arg) {
  2031. case VFIO_TYPE1_IOMMU:
  2032. break;
  2033. case VFIO_TYPE1_NESTING_IOMMU:
  2034. iommu->nesting = true;
  2035. fallthrough;
  2036. case VFIO_TYPE1v2_IOMMU:
  2037. iommu->v2 = true;
  2038. break;
  2039. default:
  2040. kfree(iommu);
  2041. return ERR_PTR(-EINVAL);
  2042. }
  2043. INIT_LIST_HEAD(&iommu->domain_list);
  2044. INIT_LIST_HEAD(&iommu->iova_list);
  2045. iommu->dma_list = RB_ROOT;
  2046. iommu->dma_avail = dma_entry_limit;
  2047. mutex_init(&iommu->lock);
  2048. BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
  2049. return iommu;
  2050. }
  2051. static void vfio_release_domain(struct vfio_domain *domain, bool external)
  2052. {
  2053. struct vfio_group *group, *group_tmp;
  2054. list_for_each_entry_safe(group, group_tmp,
  2055. &domain->group_list, next) {
  2056. if (!external)
  2057. vfio_iommu_detach_group(domain, group);
  2058. list_del(&group->next);
  2059. kfree(group);
  2060. }
  2061. if (!external)
  2062. iommu_domain_free(domain->domain);
  2063. }
  2064. static void vfio_iommu_type1_release(void *iommu_data)
  2065. {
  2066. struct vfio_iommu *iommu = iommu_data;
  2067. struct vfio_domain *domain, *domain_tmp;
  2068. if (iommu->external_domain) {
  2069. vfio_release_domain(iommu->external_domain, true);
  2070. kfree(iommu->external_domain);
  2071. }
  2072. vfio_iommu_unmap_unpin_all(iommu);
  2073. list_for_each_entry_safe(domain, domain_tmp,
  2074. &iommu->domain_list, next) {
  2075. vfio_release_domain(domain, false);
  2076. list_del(&domain->next);
  2077. kfree(domain);
  2078. }
  2079. vfio_iommu_iova_free(&iommu->iova_list);
  2080. kfree(iommu);
  2081. }
  2082. static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
  2083. {
  2084. struct vfio_domain *domain;
  2085. int ret = 1;
  2086. mutex_lock(&iommu->lock);
  2087. list_for_each_entry(domain, &iommu->domain_list, next) {
  2088. if (!(domain->prot & IOMMU_CACHE)) {
  2089. ret = 0;
  2090. break;
  2091. }
  2092. }
  2093. mutex_unlock(&iommu->lock);
  2094. return ret;
  2095. }
  2096. static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
  2097. unsigned long arg)
  2098. {
  2099. switch (arg) {
  2100. case VFIO_TYPE1_IOMMU:
  2101. case VFIO_TYPE1v2_IOMMU:
  2102. case VFIO_TYPE1_NESTING_IOMMU:
  2103. return 1;
  2104. case VFIO_DMA_CC_IOMMU:
  2105. if (!iommu)
  2106. return 0;
  2107. return vfio_domains_have_iommu_cache(iommu);
  2108. default:
  2109. return 0;
  2110. }
  2111. }
  2112. static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
  2113. struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
  2114. size_t size)
  2115. {
  2116. struct vfio_info_cap_header *header;
  2117. struct vfio_iommu_type1_info_cap_iova_range *iova_cap;
  2118. header = vfio_info_cap_add(caps, size,
  2119. VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1);
  2120. if (IS_ERR(header))
  2121. return PTR_ERR(header);
  2122. iova_cap = container_of(header,
  2123. struct vfio_iommu_type1_info_cap_iova_range,
  2124. header);
  2125. iova_cap->nr_iovas = cap_iovas->nr_iovas;
  2126. memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges,
  2127. cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges));
  2128. return 0;
  2129. }
  2130. static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
  2131. struct vfio_info_cap *caps)
  2132. {
  2133. struct vfio_iommu_type1_info_cap_iova_range *cap_iovas;
  2134. struct vfio_iova *iova;
  2135. size_t size;
  2136. int iovas = 0, i = 0, ret;
  2137. list_for_each_entry(iova, &iommu->iova_list, list)
  2138. iovas++;
  2139. if (!iovas) {
  2140. /*
  2141. * Return 0 as a container with a single mdev device
  2142. * will have an empty list
  2143. */
  2144. return 0;
  2145. }
  2146. size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
  2147. cap_iovas = kzalloc(size, GFP_KERNEL);
  2148. if (!cap_iovas)
  2149. return -ENOMEM;
  2150. cap_iovas->nr_iovas = iovas;
  2151. list_for_each_entry(iova, &iommu->iova_list, list) {
  2152. cap_iovas->iova_ranges[i].start = iova->start;
  2153. cap_iovas->iova_ranges[i].end = iova->end;
  2154. i++;
  2155. }
  2156. ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
  2157. kfree(cap_iovas);
  2158. return ret;
  2159. }
  2160. static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
  2161. struct vfio_info_cap *caps)
  2162. {
  2163. struct vfio_iommu_type1_info_cap_migration cap_mig;
  2164. cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
  2165. cap_mig.header.version = 1;
  2166. cap_mig.flags = 0;
  2167. /* support minimum pgsize */
  2168. cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
  2169. cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
  2170. return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
  2171. }
  2172. static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
  2173. struct vfio_info_cap *caps)
  2174. {
  2175. struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
  2176. cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
  2177. cap_dma_avail.header.version = 1;
  2178. cap_dma_avail.avail = iommu->dma_avail;
  2179. return vfio_info_add_capability(caps, &cap_dma_avail.header,
  2180. sizeof(cap_dma_avail));
  2181. }
  2182. static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
  2183. unsigned long arg)
  2184. {
  2185. struct vfio_iommu_type1_info info;
  2186. unsigned long minsz;
  2187. struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
  2188. unsigned long capsz;
  2189. int ret;
  2190. minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
  2191. /* For backward compatibility, cannot require this */
  2192. capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
  2193. if (copy_from_user(&info, (void __user *)arg, minsz))
  2194. return -EFAULT;
  2195. if (info.argsz < minsz)
  2196. return -EINVAL;
  2197. if (info.argsz >= capsz) {
  2198. minsz = capsz;
  2199. info.cap_offset = 0; /* output, no-recopy necessary */
  2200. }
  2201. mutex_lock(&iommu->lock);
  2202. info.flags = VFIO_IOMMU_INFO_PGSIZES;
  2203. info.iova_pgsizes = iommu->pgsize_bitmap;
  2204. ret = vfio_iommu_migration_build_caps(iommu, &caps);
  2205. if (!ret)
  2206. ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
  2207. if (!ret)
  2208. ret = vfio_iommu_iova_build_caps(iommu, &caps);
  2209. mutex_unlock(&iommu->lock);
  2210. if (ret)
  2211. return ret;
  2212. if (caps.size) {
  2213. info.flags |= VFIO_IOMMU_INFO_CAPS;
  2214. if (info.argsz < sizeof(info) + caps.size) {
  2215. info.argsz = sizeof(info) + caps.size;
  2216. } else {
  2217. vfio_info_cap_shift(&caps, sizeof(info));
  2218. if (copy_to_user((void __user *)arg +
  2219. sizeof(info), caps.buf,
  2220. caps.size)) {
  2221. kfree(caps.buf);
  2222. return -EFAULT;
  2223. }
  2224. info.cap_offset = sizeof(info);
  2225. }
  2226. kfree(caps.buf);
  2227. }
  2228. return copy_to_user((void __user *)arg, &info, minsz) ?
  2229. -EFAULT : 0;
  2230. }
  2231. static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
  2232. unsigned long arg)
  2233. {
  2234. struct vfio_iommu_type1_dma_map map;
  2235. unsigned long minsz;
  2236. uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
  2237. minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
  2238. if (copy_from_user(&map, (void __user *)arg, minsz))
  2239. return -EFAULT;
  2240. if (map.argsz < minsz || map.flags & ~mask)
  2241. return -EINVAL;
  2242. return vfio_dma_do_map(iommu, &map);
  2243. }
  2244. static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
  2245. unsigned long arg)
  2246. {
  2247. struct vfio_iommu_type1_dma_unmap unmap;
  2248. struct vfio_bitmap bitmap = { 0 };
  2249. unsigned long minsz;
  2250. int ret;
  2251. minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
  2252. if (copy_from_user(&unmap, (void __user *)arg, minsz))
  2253. return -EFAULT;
  2254. if (unmap.argsz < minsz ||
  2255. unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
  2256. return -EINVAL;
  2257. if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
  2258. unsigned long pgshift;
  2259. if (unmap.argsz < (minsz + sizeof(bitmap)))
  2260. return -EINVAL;
  2261. if (copy_from_user(&bitmap,
  2262. (void __user *)(arg + minsz),
  2263. sizeof(bitmap)))
  2264. return -EFAULT;
  2265. if (!access_ok((void __user *)bitmap.data, bitmap.size))
  2266. return -EINVAL;
  2267. pgshift = __ffs(bitmap.pgsize);
  2268. ret = verify_bitmap_size(unmap.size >> pgshift,
  2269. bitmap.size);
  2270. if (ret)
  2271. return ret;
  2272. }
  2273. ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
  2274. if (ret)
  2275. return ret;
  2276. return copy_to_user((void __user *)arg, &unmap, minsz) ?
  2277. -EFAULT : 0;
  2278. }
  2279. static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
  2280. unsigned long arg)
  2281. {
  2282. struct vfio_iommu_type1_dirty_bitmap dirty;
  2283. uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
  2284. VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
  2285. VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
  2286. unsigned long minsz;
  2287. int ret = 0;
  2288. if (!iommu->v2)
  2289. return -EACCES;
  2290. minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
  2291. if (copy_from_user(&dirty, (void __user *)arg, minsz))
  2292. return -EFAULT;
  2293. if (dirty.argsz < minsz || dirty.flags & ~mask)
  2294. return -EINVAL;
  2295. /* only one flag should be set at a time */
  2296. if (__ffs(dirty.flags) != __fls(dirty.flags))
  2297. return -EINVAL;
  2298. if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
  2299. size_t pgsize;
  2300. mutex_lock(&iommu->lock);
  2301. pgsize = 1 << __ffs(iommu->pgsize_bitmap);
  2302. if (!iommu->dirty_page_tracking) {
  2303. ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
  2304. if (!ret)
  2305. iommu->dirty_page_tracking = true;
  2306. }
  2307. mutex_unlock(&iommu->lock);
  2308. return ret;
  2309. } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
  2310. mutex_lock(&iommu->lock);
  2311. if (iommu->dirty_page_tracking) {
  2312. iommu->dirty_page_tracking = false;
  2313. vfio_dma_bitmap_free_all(iommu);
  2314. }
  2315. mutex_unlock(&iommu->lock);
  2316. return 0;
  2317. } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
  2318. struct vfio_iommu_type1_dirty_bitmap_get range;
  2319. unsigned long pgshift;
  2320. size_t data_size = dirty.argsz - minsz;
  2321. size_t iommu_pgsize;
  2322. if (!data_size || data_size < sizeof(range))
  2323. return -EINVAL;
  2324. if (copy_from_user(&range, (void __user *)(arg + minsz),
  2325. sizeof(range)))
  2326. return -EFAULT;
  2327. if (range.iova + range.size < range.iova)
  2328. return -EINVAL;
  2329. if (!access_ok((void __user *)range.bitmap.data,
  2330. range.bitmap.size))
  2331. return -EINVAL;
  2332. pgshift = __ffs(range.bitmap.pgsize);
  2333. ret = verify_bitmap_size(range.size >> pgshift,
  2334. range.bitmap.size);
  2335. if (ret)
  2336. return ret;
  2337. mutex_lock(&iommu->lock);
  2338. iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
  2339. /* allow only smallest supported pgsize */
  2340. if (range.bitmap.pgsize != iommu_pgsize) {
  2341. ret = -EINVAL;
  2342. goto out_unlock;
  2343. }
  2344. if (range.iova & (iommu_pgsize - 1)) {
  2345. ret = -EINVAL;
  2346. goto out_unlock;
  2347. }
  2348. if (!range.size || range.size & (iommu_pgsize - 1)) {
  2349. ret = -EINVAL;
  2350. goto out_unlock;
  2351. }
  2352. if (iommu->dirty_page_tracking)
  2353. ret = vfio_iova_dirty_bitmap(range.bitmap.data,
  2354. iommu, range.iova,
  2355. range.size,
  2356. range.bitmap.pgsize);
  2357. else
  2358. ret = -EINVAL;
  2359. out_unlock:
  2360. mutex_unlock(&iommu->lock);
  2361. return ret;
  2362. }
  2363. return -EINVAL;
  2364. }
  2365. static long vfio_iommu_type1_ioctl(void *iommu_data,
  2366. unsigned int cmd, unsigned long arg)
  2367. {
  2368. struct vfio_iommu *iommu = iommu_data;
  2369. switch (cmd) {
  2370. case VFIO_CHECK_EXTENSION:
  2371. return vfio_iommu_type1_check_extension(iommu, arg);
  2372. case VFIO_IOMMU_GET_INFO:
  2373. return vfio_iommu_type1_get_info(iommu, arg);
  2374. case VFIO_IOMMU_MAP_DMA:
  2375. return vfio_iommu_type1_map_dma(iommu, arg);
  2376. case VFIO_IOMMU_UNMAP_DMA:
  2377. return vfio_iommu_type1_unmap_dma(iommu, arg);
  2378. case VFIO_IOMMU_DIRTY_PAGES:
  2379. return vfio_iommu_type1_dirty_pages(iommu, arg);
  2380. default:
  2381. return -ENOTTY;
  2382. }
  2383. }
  2384. static int vfio_iommu_type1_register_notifier(void *iommu_data,
  2385. unsigned long *events,
  2386. struct notifier_block *nb)
  2387. {
  2388. struct vfio_iommu *iommu = iommu_data;
  2389. /* clear known events */
  2390. *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
  2391. /* refuse to register if still events remaining */
  2392. if (*events)
  2393. return -EINVAL;
  2394. return blocking_notifier_chain_register(&iommu->notifier, nb);
  2395. }
  2396. static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
  2397. struct notifier_block *nb)
  2398. {
  2399. struct vfio_iommu *iommu = iommu_data;
  2400. return blocking_notifier_chain_unregister(&iommu->notifier, nb);
  2401. }
  2402. static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
  2403. dma_addr_t user_iova, void *data,
  2404. size_t count, bool write,
  2405. size_t *copied)
  2406. {
  2407. struct mm_struct *mm;
  2408. unsigned long vaddr;
  2409. struct vfio_dma *dma;
  2410. bool kthread = current->mm == NULL;
  2411. size_t offset;
  2412. *copied = 0;
  2413. dma = vfio_find_dma(iommu, user_iova, 1);
  2414. if (!dma)
  2415. return -EINVAL;
  2416. if ((write && !(dma->prot & IOMMU_WRITE)) ||
  2417. !(dma->prot & IOMMU_READ))
  2418. return -EPERM;
  2419. mm = get_task_mm(dma->task);
  2420. if (!mm)
  2421. return -EPERM;
  2422. if (kthread)
  2423. kthread_use_mm(mm);
  2424. else if (current->mm != mm)
  2425. goto out;
  2426. offset = user_iova - dma->iova;
  2427. if (count > dma->size - offset)
  2428. count = dma->size - offset;
  2429. vaddr = dma->vaddr + offset;
  2430. if (write) {
  2431. *copied = copy_to_user((void __user *)vaddr, data,
  2432. count) ? 0 : count;
  2433. if (*copied && iommu->dirty_page_tracking) {
  2434. unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
  2435. /*
  2436. * Bitmap populated with the smallest supported page
  2437. * size
  2438. */
  2439. bitmap_set(dma->bitmap, offset >> pgshift,
  2440. ((offset + *copied - 1) >> pgshift) -
  2441. (offset >> pgshift) + 1);
  2442. }
  2443. } else
  2444. *copied = copy_from_user(data, (void __user *)vaddr,
  2445. count) ? 0 : count;
  2446. if (kthread)
  2447. kthread_unuse_mm(mm);
  2448. out:
  2449. mmput(mm);
  2450. return *copied ? 0 : -EFAULT;
  2451. }
  2452. static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
  2453. void *data, size_t count, bool write)
  2454. {
  2455. struct vfio_iommu *iommu = iommu_data;
  2456. int ret = 0;
  2457. size_t done;
  2458. mutex_lock(&iommu->lock);
  2459. while (count > 0) {
  2460. ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
  2461. count, write, &done);
  2462. if (ret)
  2463. break;
  2464. count -= done;
  2465. data += done;
  2466. user_iova += done;
  2467. }
  2468. mutex_unlock(&iommu->lock);
  2469. return ret;
  2470. }
  2471. static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
  2472. .name = "vfio-iommu-type1",
  2473. .owner = THIS_MODULE,
  2474. .open = vfio_iommu_type1_open,
  2475. .release = vfio_iommu_type1_release,
  2476. .ioctl = vfio_iommu_type1_ioctl,
  2477. .attach_group = vfio_iommu_type1_attach_group,
  2478. .detach_group = vfio_iommu_type1_detach_group,
  2479. .pin_pages = vfio_iommu_type1_pin_pages,
  2480. .unpin_pages = vfio_iommu_type1_unpin_pages,
  2481. .register_notifier = vfio_iommu_type1_register_notifier,
  2482. .unregister_notifier = vfio_iommu_type1_unregister_notifier,
  2483. .dma_rw = vfio_iommu_type1_dma_rw,
  2484. };
  2485. static int __init vfio_iommu_type1_init(void)
  2486. {
  2487. return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
  2488. }
  2489. static void __exit vfio_iommu_type1_cleanup(void)
  2490. {
  2491. vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
  2492. }
  2493. module_init(vfio_iommu_type1_init);
  2494. module_exit(vfio_iommu_type1_cleanup);
  2495. MODULE_VERSION(DRIVER_VERSION);
  2496. MODULE_LICENSE("GPL v2");
  2497. MODULE_AUTHOR(DRIVER_AUTHOR);
  2498. MODULE_DESCRIPTION(DRIVER_DESC);