grant-table.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614
  1. /******************************************************************************
  2. * grant_table.c
  3. *
  4. * Granting foreign access to our memory reservation.
  5. *
  6. * Copyright (c) 2005-2006, Christopher Clark
  7. * Copyright (c) 2004-2005, K A Fraser
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation; or, when distributed
  12. * separately from the Linux kernel or incorporated into other
  13. * software packages, subject to the following license:
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a copy
  16. * of this source file (the "Software"), to deal in the Software without
  17. * restriction, including without limitation the rights to use, copy, modify,
  18. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19. * and to permit persons to whom the Software is furnished to do so, subject to
  20. * the following conditions:
  21. *
  22. * The above copyright notice and this permission notice shall be included in
  23. * all copies or substantial portions of the Software.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31. * IN THE SOFTWARE.
  32. */
  33. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  34. #include <linux/memblock.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/io.h>
  41. #include <linux/delay.h>
  42. #include <linux/hardirq.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/ratelimit.h>
  45. #include <linux/moduleparam.h>
  46. #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  47. #include <linux/dma-mapping.h>
  48. #endif
  49. #include <xen/xen.h>
  50. #include <xen/interface/xen.h>
  51. #include <xen/page.h>
  52. #include <xen/grant_table.h>
  53. #include <xen/interface/memory.h>
  54. #include <xen/hvc-console.h>
  55. #include <xen/swiotlb-xen.h>
  56. #include <xen/balloon.h>
  57. #ifdef CONFIG_X86
  58. #include <asm/xen/cpuid.h>
  59. #endif
  60. #include <xen/mem-reservation.h>
  61. #include <asm/xen/hypercall.h>
  62. #include <asm/xen/interface.h>
  63. #include <asm/sync_bitops.h>
  64. /* External tools reserve first few grant table entries. */
  65. #define NR_RESERVED_ENTRIES 8
  66. #define GNTTAB_LIST_END 0xffffffff
  67. static grant_ref_t **gnttab_list;
  68. static unsigned int nr_grant_frames;
  69. static int gnttab_free_count;
  70. static grant_ref_t gnttab_free_head;
  71. static DEFINE_SPINLOCK(gnttab_list_lock);
  72. struct grant_frames xen_auto_xlat_grant_frames;
  73. static unsigned int xen_gnttab_version;
  74. module_param_named(version, xen_gnttab_version, uint, 0);
  75. static union {
  76. struct grant_entry_v1 *v1;
  77. union grant_entry_v2 *v2;
  78. void *addr;
  79. } gnttab_shared;
  80. /*This is a structure of function pointers for grant table*/
  81. struct gnttab_ops {
  82. /*
  83. * Version of the grant interface.
  84. */
  85. unsigned int version;
  86. /*
  87. * Grant refs per grant frame.
  88. */
  89. unsigned int grefs_per_grant_frame;
  90. /*
  91. * Mapping a list of frames for storing grant entries. Frames parameter
  92. * is used to store grant table address when grant table being setup,
  93. * nr_gframes is the number of frames to map grant table. Returning
  94. * GNTST_okay means success and negative value means failure.
  95. */
  96. int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
  97. /*
  98. * Release a list of frames which are mapped in map_frames for grant
  99. * entry status.
  100. */
  101. void (*unmap_frames)(void);
  102. /*
  103. * Introducing a valid entry into the grant table, granting the frame of
  104. * this grant entry to domain for accessing or transfering. Ref
  105. * parameter is reference of this introduced grant entry, domid is id of
  106. * granted domain, frame is the page frame to be granted, and flags is
  107. * status of the grant entry to be updated.
  108. */
  109. void (*update_entry)(grant_ref_t ref, domid_t domid,
  110. unsigned long frame, unsigned flags);
  111. /*
  112. * Stop granting a grant entry to domain for accessing. Ref parameter is
  113. * reference of a grant entry whose grant access will be stopped,
  114. * readonly is not in use in this function. If the grant entry is
  115. * currently mapped for reading or writing, just return failure(==0)
  116. * directly and don't tear down the grant access. Otherwise, stop grant
  117. * access for this entry and return success(==1).
  118. */
  119. int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
  120. /*
  121. * Stop granting a grant entry to domain for transfer. Ref parameter is
  122. * reference of a grant entry whose grant transfer will be stopped. If
  123. * tranfer has not started, just reclaim the grant entry and return
  124. * failure(==0). Otherwise, wait for the transfer to complete and then
  125. * return the frame.
  126. */
  127. unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
  128. /*
  129. * Read the frame number related to a given grant reference.
  130. */
  131. unsigned long (*read_frame)(grant_ref_t ref);
  132. };
  133. struct unmap_refs_callback_data {
  134. struct completion completion;
  135. int result;
  136. };
  137. static const struct gnttab_ops *gnttab_interface;
  138. /* This reflects status of grant entries, so act as a global value. */
  139. static grant_status_t *grstatus;
  140. static struct gnttab_free_callback *gnttab_free_callback_list;
  141. static int gnttab_expand(unsigned int req_entries);
  142. #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
  143. #define SPP (PAGE_SIZE / sizeof(grant_status_t))
  144. static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
  145. {
  146. return &gnttab_list[(entry) / RPP][(entry) % RPP];
  147. }
  148. /* This can be used as an l-value */
  149. #define gnttab_entry(entry) (*__gnttab_entry(entry))
  150. static int get_free_entries(unsigned count)
  151. {
  152. unsigned long flags;
  153. int ref, rc = 0;
  154. grant_ref_t head;
  155. spin_lock_irqsave(&gnttab_list_lock, flags);
  156. if ((gnttab_free_count < count) &&
  157. ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
  158. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  159. return rc;
  160. }
  161. ref = head = gnttab_free_head;
  162. gnttab_free_count -= count;
  163. while (count-- > 1)
  164. head = gnttab_entry(head);
  165. gnttab_free_head = gnttab_entry(head);
  166. gnttab_entry(head) = GNTTAB_LIST_END;
  167. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  168. return ref;
  169. }
  170. static void do_free_callbacks(void)
  171. {
  172. struct gnttab_free_callback *callback, *next;
  173. callback = gnttab_free_callback_list;
  174. gnttab_free_callback_list = NULL;
  175. while (callback != NULL) {
  176. next = callback->next;
  177. if (gnttab_free_count >= callback->count) {
  178. callback->next = NULL;
  179. callback->fn(callback->arg);
  180. } else {
  181. callback->next = gnttab_free_callback_list;
  182. gnttab_free_callback_list = callback;
  183. }
  184. callback = next;
  185. }
  186. }
  187. static inline void check_free_callbacks(void)
  188. {
  189. if (unlikely(gnttab_free_callback_list))
  190. do_free_callbacks();
  191. }
  192. static void put_free_entry(grant_ref_t ref)
  193. {
  194. unsigned long flags;
  195. spin_lock_irqsave(&gnttab_list_lock, flags);
  196. gnttab_entry(ref) = gnttab_free_head;
  197. gnttab_free_head = ref;
  198. gnttab_free_count++;
  199. check_free_callbacks();
  200. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  201. }
  202. /*
  203. * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
  204. * Introducing a valid entry into the grant table:
  205. * 1. Write ent->domid.
  206. * 2. Write ent->frame:
  207. * GTF_permit_access: Frame to which access is permitted.
  208. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
  209. * frame, or zero if none.
  210. * 3. Write memory barrier (WMB).
  211. * 4. Write ent->flags, inc. valid type.
  212. */
  213. static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
  214. unsigned long frame, unsigned flags)
  215. {
  216. gnttab_shared.v1[ref].domid = domid;
  217. gnttab_shared.v1[ref].frame = frame;
  218. wmb();
  219. gnttab_shared.v1[ref].flags = flags;
  220. }
  221. static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
  222. unsigned long frame, unsigned int flags)
  223. {
  224. gnttab_shared.v2[ref].hdr.domid = domid;
  225. gnttab_shared.v2[ref].full_page.frame = frame;
  226. wmb(); /* Hypervisor concurrent accesses. */
  227. gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
  228. }
  229. /*
  230. * Public grant-issuing interface functions
  231. */
  232. void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  233. unsigned long frame, int readonly)
  234. {
  235. gnttab_interface->update_entry(ref, domid, frame,
  236. GTF_permit_access | (readonly ? GTF_readonly : 0));
  237. }
  238. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
  239. int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
  240. int readonly)
  241. {
  242. int ref;
  243. ref = get_free_entries(1);
  244. if (unlikely(ref < 0))
  245. return -ENOSPC;
  246. gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
  247. return ref;
  248. }
  249. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
  250. static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
  251. {
  252. u16 flags, nflags;
  253. u16 *pflags;
  254. pflags = &gnttab_shared.v1[ref].flags;
  255. nflags = *pflags;
  256. do {
  257. flags = nflags;
  258. if (flags & (GTF_reading|GTF_writing))
  259. return 0;
  260. } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
  261. return 1;
  262. }
  263. static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
  264. {
  265. gnttab_shared.v2[ref].hdr.flags = 0;
  266. mb(); /* Concurrent access by hypervisor. */
  267. if (grstatus[ref] & (GTF_reading|GTF_writing)) {
  268. return 0;
  269. } else {
  270. /*
  271. * The read of grstatus needs to have acquire semantics.
  272. * On x86, reads already have that, and we just need to
  273. * protect against compiler reorderings.
  274. * On other architectures we may need a full barrier.
  275. */
  276. #ifdef CONFIG_X86
  277. barrier();
  278. #else
  279. mb();
  280. #endif
  281. }
  282. return 1;
  283. }
  284. static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  285. {
  286. return gnttab_interface->end_foreign_access_ref(ref, readonly);
  287. }
  288. int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  289. {
  290. if (_gnttab_end_foreign_access_ref(ref, readonly))
  291. return 1;
  292. pr_warn("WARNING: g.e. %#x still in use!\n", ref);
  293. return 0;
  294. }
  295. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
  296. static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
  297. {
  298. return gnttab_shared.v1[ref].frame;
  299. }
  300. static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
  301. {
  302. return gnttab_shared.v2[ref].full_page.frame;
  303. }
  304. struct deferred_entry {
  305. struct list_head list;
  306. grant_ref_t ref;
  307. bool ro;
  308. uint16_t warn_delay;
  309. struct page *page;
  310. };
  311. static LIST_HEAD(deferred_list);
  312. static void gnttab_handle_deferred(struct timer_list *);
  313. static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
  314. static void gnttab_handle_deferred(struct timer_list *unused)
  315. {
  316. unsigned int nr = 10;
  317. struct deferred_entry *first = NULL;
  318. unsigned long flags;
  319. spin_lock_irqsave(&gnttab_list_lock, flags);
  320. while (nr--) {
  321. struct deferred_entry *entry
  322. = list_first_entry(&deferred_list,
  323. struct deferred_entry, list);
  324. if (entry == first)
  325. break;
  326. list_del(&entry->list);
  327. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  328. if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
  329. put_free_entry(entry->ref);
  330. pr_debug("freeing g.e. %#x (pfn %#lx)\n",
  331. entry->ref, page_to_pfn(entry->page));
  332. put_page(entry->page);
  333. kfree(entry);
  334. entry = NULL;
  335. } else {
  336. if (!--entry->warn_delay)
  337. pr_info("g.e. %#x still pending\n", entry->ref);
  338. if (!first)
  339. first = entry;
  340. }
  341. spin_lock_irqsave(&gnttab_list_lock, flags);
  342. if (entry)
  343. list_add_tail(&entry->list, &deferred_list);
  344. else if (list_empty(&deferred_list))
  345. break;
  346. }
  347. if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
  348. deferred_timer.expires = jiffies + HZ;
  349. add_timer(&deferred_timer);
  350. }
  351. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  352. }
  353. static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
  354. struct page *page)
  355. {
  356. struct deferred_entry *entry;
  357. gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  358. const char *what = KERN_WARNING "leaking";
  359. entry = kmalloc(sizeof(*entry), gfp);
  360. if (!page) {
  361. unsigned long gfn = gnttab_interface->read_frame(ref);
  362. page = pfn_to_page(gfn_to_pfn(gfn));
  363. get_page(page);
  364. }
  365. if (entry) {
  366. unsigned long flags;
  367. entry->ref = ref;
  368. entry->ro = readonly;
  369. entry->page = page;
  370. entry->warn_delay = 60;
  371. spin_lock_irqsave(&gnttab_list_lock, flags);
  372. list_add_tail(&entry->list, &deferred_list);
  373. if (!timer_pending(&deferred_timer)) {
  374. deferred_timer.expires = jiffies + HZ;
  375. add_timer(&deferred_timer);
  376. }
  377. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  378. what = KERN_DEBUG "deferring";
  379. }
  380. printk("%s g.e. %#x (pfn %#lx)\n",
  381. what, ref, page ? page_to_pfn(page) : -1);
  382. }
  383. int gnttab_try_end_foreign_access(grant_ref_t ref)
  384. {
  385. int ret = _gnttab_end_foreign_access_ref(ref, 0);
  386. if (ret)
  387. put_free_entry(ref);
  388. return ret;
  389. }
  390. EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
  391. void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
  392. unsigned long page)
  393. {
  394. if (gnttab_try_end_foreign_access(ref)) {
  395. if (page != 0)
  396. put_page(virt_to_page(page));
  397. } else
  398. gnttab_add_deferred(ref, readonly,
  399. page ? virt_to_page(page) : NULL);
  400. }
  401. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
  402. int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
  403. {
  404. int ref;
  405. ref = get_free_entries(1);
  406. if (unlikely(ref < 0))
  407. return -ENOSPC;
  408. gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
  409. return ref;
  410. }
  411. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
  412. void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
  413. unsigned long pfn)
  414. {
  415. gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
  416. }
  417. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
  418. static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
  419. {
  420. unsigned long frame;
  421. u16 flags;
  422. u16 *pflags;
  423. pflags = &gnttab_shared.v1[ref].flags;
  424. /*
  425. * If a transfer is not even yet started, try to reclaim the grant
  426. * reference and return failure (== 0).
  427. */
  428. while (!((flags = *pflags) & GTF_transfer_committed)) {
  429. if (sync_cmpxchg(pflags, flags, 0) == flags)
  430. return 0;
  431. cpu_relax();
  432. }
  433. /* If a transfer is in progress then wait until it is completed. */
  434. while (!(flags & GTF_transfer_completed)) {
  435. flags = *pflags;
  436. cpu_relax();
  437. }
  438. rmb(); /* Read the frame number /after/ reading completion status. */
  439. frame = gnttab_shared.v1[ref].frame;
  440. BUG_ON(frame == 0);
  441. return frame;
  442. }
  443. static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
  444. {
  445. unsigned long frame;
  446. u16 flags;
  447. u16 *pflags;
  448. pflags = &gnttab_shared.v2[ref].hdr.flags;
  449. /*
  450. * If a transfer is not even yet started, try to reclaim the grant
  451. * reference and return failure (== 0).
  452. */
  453. while (!((flags = *pflags) & GTF_transfer_committed)) {
  454. if (sync_cmpxchg(pflags, flags, 0) == flags)
  455. return 0;
  456. cpu_relax();
  457. }
  458. /* If a transfer is in progress then wait until it is completed. */
  459. while (!(flags & GTF_transfer_completed)) {
  460. flags = *pflags;
  461. cpu_relax();
  462. }
  463. rmb(); /* Read the frame number /after/ reading completion status. */
  464. frame = gnttab_shared.v2[ref].full_page.frame;
  465. BUG_ON(frame == 0);
  466. return frame;
  467. }
  468. unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  469. {
  470. return gnttab_interface->end_foreign_transfer_ref(ref);
  471. }
  472. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
  473. unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
  474. {
  475. unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  476. put_free_entry(ref);
  477. return frame;
  478. }
  479. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
  480. void gnttab_free_grant_reference(grant_ref_t ref)
  481. {
  482. put_free_entry(ref);
  483. }
  484. EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
  485. void gnttab_free_grant_references(grant_ref_t head)
  486. {
  487. grant_ref_t ref;
  488. unsigned long flags;
  489. int count = 1;
  490. if (head == GNTTAB_LIST_END)
  491. return;
  492. spin_lock_irqsave(&gnttab_list_lock, flags);
  493. ref = head;
  494. while (gnttab_entry(ref) != GNTTAB_LIST_END) {
  495. ref = gnttab_entry(ref);
  496. count++;
  497. }
  498. gnttab_entry(ref) = gnttab_free_head;
  499. gnttab_free_head = head;
  500. gnttab_free_count += count;
  501. check_free_callbacks();
  502. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  503. }
  504. EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
  505. int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  506. {
  507. int h = get_free_entries(count);
  508. if (h < 0)
  509. return -ENOSPC;
  510. *head = h;
  511. return 0;
  512. }
  513. EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
  514. int gnttab_empty_grant_references(const grant_ref_t *private_head)
  515. {
  516. return (*private_head == GNTTAB_LIST_END);
  517. }
  518. EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
  519. int gnttab_claim_grant_reference(grant_ref_t *private_head)
  520. {
  521. grant_ref_t g = *private_head;
  522. if (unlikely(g == GNTTAB_LIST_END))
  523. return -ENOSPC;
  524. *private_head = gnttab_entry(g);
  525. return g;
  526. }
  527. EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
  528. void gnttab_release_grant_reference(grant_ref_t *private_head,
  529. grant_ref_t release)
  530. {
  531. gnttab_entry(release) = *private_head;
  532. *private_head = release;
  533. }
  534. EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
  535. void gnttab_request_free_callback(struct gnttab_free_callback *callback,
  536. void (*fn)(void *), void *arg, u16 count)
  537. {
  538. unsigned long flags;
  539. struct gnttab_free_callback *cb;
  540. spin_lock_irqsave(&gnttab_list_lock, flags);
  541. /* Check if the callback is already on the list */
  542. cb = gnttab_free_callback_list;
  543. while (cb) {
  544. if (cb == callback)
  545. goto out;
  546. cb = cb->next;
  547. }
  548. callback->fn = fn;
  549. callback->arg = arg;
  550. callback->count = count;
  551. callback->next = gnttab_free_callback_list;
  552. gnttab_free_callback_list = callback;
  553. check_free_callbacks();
  554. out:
  555. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  556. }
  557. EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
  558. void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
  559. {
  560. struct gnttab_free_callback **pcb;
  561. unsigned long flags;
  562. spin_lock_irqsave(&gnttab_list_lock, flags);
  563. for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
  564. if (*pcb == callback) {
  565. *pcb = callback->next;
  566. break;
  567. }
  568. }
  569. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  570. }
  571. EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
  572. static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
  573. {
  574. return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
  575. align;
  576. }
  577. static int grow_gnttab_list(unsigned int more_frames)
  578. {
  579. unsigned int new_nr_grant_frames, extra_entries, i;
  580. unsigned int nr_glist_frames, new_nr_glist_frames;
  581. unsigned int grefs_per_frame;
  582. grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
  583. new_nr_grant_frames = nr_grant_frames + more_frames;
  584. extra_entries = more_frames * grefs_per_frame;
  585. nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
  586. new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
  587. for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
  588. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
  589. if (!gnttab_list[i])
  590. goto grow_nomem;
  591. }
  592. for (i = grefs_per_frame * nr_grant_frames;
  593. i < grefs_per_frame * new_nr_grant_frames - 1; i++)
  594. gnttab_entry(i) = i + 1;
  595. gnttab_entry(i) = gnttab_free_head;
  596. gnttab_free_head = grefs_per_frame * nr_grant_frames;
  597. gnttab_free_count += extra_entries;
  598. nr_grant_frames = new_nr_grant_frames;
  599. check_free_callbacks();
  600. return 0;
  601. grow_nomem:
  602. while (i-- > nr_glist_frames)
  603. free_page((unsigned long) gnttab_list[i]);
  604. return -ENOMEM;
  605. }
  606. static unsigned int __max_nr_grant_frames(void)
  607. {
  608. struct gnttab_query_size query;
  609. int rc;
  610. query.dom = DOMID_SELF;
  611. rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
  612. if ((rc < 0) || (query.status != GNTST_okay))
  613. return 4; /* Legacy max supported number of frames */
  614. return query.max_nr_frames;
  615. }
  616. unsigned int gnttab_max_grant_frames(void)
  617. {
  618. unsigned int xen_max = __max_nr_grant_frames();
  619. static unsigned int boot_max_nr_grant_frames;
  620. /* First time, initialize it properly. */
  621. if (!boot_max_nr_grant_frames)
  622. boot_max_nr_grant_frames = __max_nr_grant_frames();
  623. if (xen_max > boot_max_nr_grant_frames)
  624. return boot_max_nr_grant_frames;
  625. return xen_max;
  626. }
  627. EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
  628. int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
  629. {
  630. xen_pfn_t *pfn;
  631. unsigned int max_nr_gframes = __max_nr_grant_frames();
  632. unsigned int i;
  633. void *vaddr;
  634. if (xen_auto_xlat_grant_frames.count)
  635. return -EINVAL;
  636. vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
  637. if (vaddr == NULL) {
  638. pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
  639. &addr);
  640. return -ENOMEM;
  641. }
  642. pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
  643. if (!pfn) {
  644. xen_unmap(vaddr);
  645. return -ENOMEM;
  646. }
  647. for (i = 0; i < max_nr_gframes; i++)
  648. pfn[i] = XEN_PFN_DOWN(addr) + i;
  649. xen_auto_xlat_grant_frames.vaddr = vaddr;
  650. xen_auto_xlat_grant_frames.pfn = pfn;
  651. xen_auto_xlat_grant_frames.count = max_nr_gframes;
  652. return 0;
  653. }
  654. EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
  655. void gnttab_free_auto_xlat_frames(void)
  656. {
  657. if (!xen_auto_xlat_grant_frames.count)
  658. return;
  659. kfree(xen_auto_xlat_grant_frames.pfn);
  660. xen_unmap(xen_auto_xlat_grant_frames.vaddr);
  661. xen_auto_xlat_grant_frames.pfn = NULL;
  662. xen_auto_xlat_grant_frames.count = 0;
  663. xen_auto_xlat_grant_frames.vaddr = NULL;
  664. }
  665. EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
  666. int gnttab_pages_set_private(int nr_pages, struct page **pages)
  667. {
  668. int i;
  669. for (i = 0; i < nr_pages; i++) {
  670. #if BITS_PER_LONG < 64
  671. struct xen_page_foreign *foreign;
  672. foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
  673. if (!foreign)
  674. return -ENOMEM;
  675. set_page_private(pages[i], (unsigned long)foreign);
  676. #endif
  677. SetPagePrivate(pages[i]);
  678. }
  679. return 0;
  680. }
  681. EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
  682. /**
  683. * gnttab_alloc_pages - alloc pages suitable for grant mapping into
  684. * @nr_pages: number of pages to alloc
  685. * @pages: returns the pages
  686. */
  687. int gnttab_alloc_pages(int nr_pages, struct page **pages)
  688. {
  689. int ret;
  690. ret = xen_alloc_unpopulated_pages(nr_pages, pages);
  691. if (ret < 0)
  692. return ret;
  693. ret = gnttab_pages_set_private(nr_pages, pages);
  694. if (ret < 0)
  695. gnttab_free_pages(nr_pages, pages);
  696. return ret;
  697. }
  698. EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
  699. #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
  700. static inline void cache_init(struct gnttab_page_cache *cache)
  701. {
  702. cache->pages = NULL;
  703. }
  704. static inline bool cache_empty(struct gnttab_page_cache *cache)
  705. {
  706. return !cache->pages;
  707. }
  708. static inline struct page *cache_deq(struct gnttab_page_cache *cache)
  709. {
  710. struct page *page;
  711. page = cache->pages;
  712. cache->pages = page->zone_device_data;
  713. return page;
  714. }
  715. static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
  716. {
  717. page->zone_device_data = cache->pages;
  718. cache->pages = page;
  719. }
  720. #else
  721. static inline void cache_init(struct gnttab_page_cache *cache)
  722. {
  723. INIT_LIST_HEAD(&cache->pages);
  724. }
  725. static inline bool cache_empty(struct gnttab_page_cache *cache)
  726. {
  727. return list_empty(&cache->pages);
  728. }
  729. static inline struct page *cache_deq(struct gnttab_page_cache *cache)
  730. {
  731. struct page *page;
  732. page = list_first_entry(&cache->pages, struct page, lru);
  733. list_del(&page->lru);
  734. return page;
  735. }
  736. static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
  737. {
  738. list_add(&page->lru, &cache->pages);
  739. }
  740. #endif
  741. void gnttab_page_cache_init(struct gnttab_page_cache *cache)
  742. {
  743. spin_lock_init(&cache->lock);
  744. cache_init(cache);
  745. cache->num_pages = 0;
  746. }
  747. EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
  748. int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
  749. {
  750. unsigned long flags;
  751. spin_lock_irqsave(&cache->lock, flags);
  752. if (cache_empty(cache)) {
  753. spin_unlock_irqrestore(&cache->lock, flags);
  754. return gnttab_alloc_pages(1, page);
  755. }
  756. page[0] = cache_deq(cache);
  757. cache->num_pages--;
  758. spin_unlock_irqrestore(&cache->lock, flags);
  759. return 0;
  760. }
  761. EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
  762. void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
  763. unsigned int num)
  764. {
  765. unsigned long flags;
  766. unsigned int i;
  767. spin_lock_irqsave(&cache->lock, flags);
  768. for (i = 0; i < num; i++)
  769. cache_enq(cache, page[i]);
  770. cache->num_pages += num;
  771. spin_unlock_irqrestore(&cache->lock, flags);
  772. }
  773. EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
  774. void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
  775. {
  776. struct page *page[10];
  777. unsigned int i = 0;
  778. unsigned long flags;
  779. spin_lock_irqsave(&cache->lock, flags);
  780. while (cache->num_pages > num) {
  781. page[i] = cache_deq(cache);
  782. cache->num_pages--;
  783. if (++i == ARRAY_SIZE(page)) {
  784. spin_unlock_irqrestore(&cache->lock, flags);
  785. gnttab_free_pages(i, page);
  786. i = 0;
  787. spin_lock_irqsave(&cache->lock, flags);
  788. }
  789. }
  790. spin_unlock_irqrestore(&cache->lock, flags);
  791. if (i != 0)
  792. gnttab_free_pages(i, page);
  793. }
  794. EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
  795. void gnttab_pages_clear_private(int nr_pages, struct page **pages)
  796. {
  797. int i;
  798. for (i = 0; i < nr_pages; i++) {
  799. if (PagePrivate(pages[i])) {
  800. #if BITS_PER_LONG < 64
  801. kfree((void *)page_private(pages[i]));
  802. #endif
  803. ClearPagePrivate(pages[i]);
  804. }
  805. }
  806. }
  807. EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
  808. /**
  809. * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
  810. * @nr_pages; number of pages to free
  811. * @pages: the pages
  812. */
  813. void gnttab_free_pages(int nr_pages, struct page **pages)
  814. {
  815. gnttab_pages_clear_private(nr_pages, pages);
  816. xen_free_unpopulated_pages(nr_pages, pages);
  817. }
  818. EXPORT_SYMBOL_GPL(gnttab_free_pages);
  819. #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  820. /**
  821. * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
  822. * @args: arguments to the function
  823. */
  824. int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
  825. {
  826. unsigned long pfn, start_pfn;
  827. size_t size;
  828. int i, ret;
  829. size = args->nr_pages << PAGE_SHIFT;
  830. if (args->coherent)
  831. args->vaddr = dma_alloc_coherent(args->dev, size,
  832. &args->dev_bus_addr,
  833. GFP_KERNEL | __GFP_NOWARN);
  834. else
  835. args->vaddr = dma_alloc_wc(args->dev, size,
  836. &args->dev_bus_addr,
  837. GFP_KERNEL | __GFP_NOWARN);
  838. if (!args->vaddr) {
  839. pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
  840. return -ENOMEM;
  841. }
  842. start_pfn = __phys_to_pfn(args->dev_bus_addr);
  843. for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
  844. pfn++, i++) {
  845. struct page *page = pfn_to_page(pfn);
  846. args->pages[i] = page;
  847. args->frames[i] = xen_page_to_gfn(page);
  848. xenmem_reservation_scrub_page(page);
  849. }
  850. xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
  851. ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
  852. if (ret != args->nr_pages) {
  853. pr_debug("Failed to decrease reservation for DMA buffer\n");
  854. ret = -EFAULT;
  855. goto fail;
  856. }
  857. ret = gnttab_pages_set_private(args->nr_pages, args->pages);
  858. if (ret < 0)
  859. goto fail;
  860. return 0;
  861. fail:
  862. gnttab_dma_free_pages(args);
  863. return ret;
  864. }
  865. EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
  866. /**
  867. * gnttab_dma_free_pages - free DMAable pages
  868. * @args: arguments to the function
  869. */
  870. int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
  871. {
  872. size_t size;
  873. int i, ret;
  874. gnttab_pages_clear_private(args->nr_pages, args->pages);
  875. for (i = 0; i < args->nr_pages; i++)
  876. args->frames[i] = page_to_xen_pfn(args->pages[i]);
  877. ret = xenmem_reservation_increase(args->nr_pages, args->frames);
  878. if (ret != args->nr_pages) {
  879. pr_debug("Failed to increase reservation for DMA buffer\n");
  880. ret = -EFAULT;
  881. } else {
  882. ret = 0;
  883. }
  884. xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
  885. args->frames);
  886. size = args->nr_pages << PAGE_SHIFT;
  887. if (args->coherent)
  888. dma_free_coherent(args->dev, size,
  889. args->vaddr, args->dev_bus_addr);
  890. else
  891. dma_free_wc(args->dev, size,
  892. args->vaddr, args->dev_bus_addr);
  893. return ret;
  894. }
  895. EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
  896. #endif
  897. /* Handling of paged out grant targets (GNTST_eagain) */
  898. #define MAX_DELAY 256
  899. static inline void
  900. gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
  901. const char *func)
  902. {
  903. unsigned delay = 1;
  904. do {
  905. BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
  906. if (*status == GNTST_eagain)
  907. msleep(delay++);
  908. } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
  909. if (delay >= MAX_DELAY) {
  910. pr_err("%s: %s eagain grant\n", func, current->comm);
  911. *status = GNTST_bad_page;
  912. }
  913. }
  914. void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
  915. {
  916. struct gnttab_map_grant_ref *op;
  917. if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
  918. BUG();
  919. for (op = batch; op < batch + count; op++)
  920. if (op->status == GNTST_eagain)
  921. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
  922. &op->status, __func__);
  923. }
  924. EXPORT_SYMBOL_GPL(gnttab_batch_map);
  925. void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
  926. {
  927. struct gnttab_copy *op;
  928. if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
  929. BUG();
  930. for (op = batch; op < batch + count; op++)
  931. if (op->status == GNTST_eagain)
  932. gnttab_retry_eagain_gop(GNTTABOP_copy, op,
  933. &op->status, __func__);
  934. }
  935. EXPORT_SYMBOL_GPL(gnttab_batch_copy);
  936. void gnttab_foreach_grant_in_range(struct page *page,
  937. unsigned int offset,
  938. unsigned int len,
  939. xen_grant_fn_t fn,
  940. void *data)
  941. {
  942. unsigned int goffset;
  943. unsigned int glen;
  944. unsigned long xen_pfn;
  945. len = min_t(unsigned int, PAGE_SIZE - offset, len);
  946. goffset = xen_offset_in_page(offset);
  947. xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
  948. while (len) {
  949. glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
  950. fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
  951. goffset = 0;
  952. xen_pfn++;
  953. len -= glen;
  954. }
  955. }
  956. EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
  957. void gnttab_foreach_grant(struct page **pages,
  958. unsigned int nr_grefs,
  959. xen_grant_fn_t fn,
  960. void *data)
  961. {
  962. unsigned int goffset = 0;
  963. unsigned long xen_pfn = 0;
  964. unsigned int i;
  965. for (i = 0; i < nr_grefs; i++) {
  966. if ((i % XEN_PFN_PER_PAGE) == 0) {
  967. xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
  968. goffset = 0;
  969. }
  970. fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
  971. goffset += XEN_PAGE_SIZE;
  972. xen_pfn++;
  973. }
  974. }
  975. int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
  976. struct gnttab_map_grant_ref *kmap_ops,
  977. struct page **pages, unsigned int count)
  978. {
  979. int i, ret;
  980. ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
  981. if (ret)
  982. return ret;
  983. for (i = 0; i < count; i++) {
  984. switch (map_ops[i].status) {
  985. case GNTST_okay:
  986. {
  987. struct xen_page_foreign *foreign;
  988. SetPageForeign(pages[i]);
  989. foreign = xen_page_foreign(pages[i]);
  990. foreign->domid = map_ops[i].dom;
  991. foreign->gref = map_ops[i].ref;
  992. break;
  993. }
  994. case GNTST_no_device_space:
  995. pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
  996. break;
  997. case GNTST_eagain:
  998. /* Retry eagain maps */
  999. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
  1000. map_ops + i,
  1001. &map_ops[i].status, __func__);
  1002. /* Test status in next loop iteration. */
  1003. i--;
  1004. break;
  1005. default:
  1006. break;
  1007. }
  1008. }
  1009. return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
  1010. }
  1011. EXPORT_SYMBOL_GPL(gnttab_map_refs);
  1012. int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
  1013. struct gnttab_unmap_grant_ref *kunmap_ops,
  1014. struct page **pages, unsigned int count)
  1015. {
  1016. unsigned int i;
  1017. int ret;
  1018. ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
  1019. if (ret)
  1020. return ret;
  1021. for (i = 0; i < count; i++)
  1022. ClearPageForeign(pages[i]);
  1023. return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
  1024. }
  1025. EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
  1026. #define GNTTAB_UNMAP_REFS_DELAY 5
  1027. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
  1028. static void gnttab_unmap_work(struct work_struct *work)
  1029. {
  1030. struct gntab_unmap_queue_data
  1031. *unmap_data = container_of(work,
  1032. struct gntab_unmap_queue_data,
  1033. gnttab_work.work);
  1034. if (unmap_data->age != UINT_MAX)
  1035. unmap_data->age++;
  1036. __gnttab_unmap_refs_async(unmap_data);
  1037. }
  1038. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  1039. {
  1040. int ret;
  1041. int pc;
  1042. for (pc = 0; pc < item->count; pc++) {
  1043. if (page_count(item->pages[pc]) > 1) {
  1044. unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
  1045. schedule_delayed_work(&item->gnttab_work,
  1046. msecs_to_jiffies(delay));
  1047. return;
  1048. }
  1049. }
  1050. ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
  1051. item->pages, item->count);
  1052. item->done(ret, item);
  1053. }
  1054. void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  1055. {
  1056. INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
  1057. item->age = 0;
  1058. __gnttab_unmap_refs_async(item);
  1059. }
  1060. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
  1061. static void unmap_refs_callback(int result,
  1062. struct gntab_unmap_queue_data *data)
  1063. {
  1064. struct unmap_refs_callback_data *d = data->data;
  1065. d->result = result;
  1066. complete(&d->completion);
  1067. }
  1068. int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
  1069. {
  1070. struct unmap_refs_callback_data data;
  1071. init_completion(&data.completion);
  1072. item->data = &data;
  1073. item->done = &unmap_refs_callback;
  1074. gnttab_unmap_refs_async(item);
  1075. wait_for_completion(&data.completion);
  1076. return data.result;
  1077. }
  1078. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
  1079. static unsigned int nr_status_frames(unsigned int nr_grant_frames)
  1080. {
  1081. return gnttab_frames(nr_grant_frames, SPP);
  1082. }
  1083. static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
  1084. {
  1085. int rc;
  1086. rc = arch_gnttab_map_shared(frames, nr_gframes,
  1087. gnttab_max_grant_frames(),
  1088. &gnttab_shared.addr);
  1089. BUG_ON(rc);
  1090. return 0;
  1091. }
  1092. static void gnttab_unmap_frames_v1(void)
  1093. {
  1094. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  1095. }
  1096. static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
  1097. {
  1098. uint64_t *sframes;
  1099. unsigned int nr_sframes;
  1100. struct gnttab_get_status_frames getframes;
  1101. int rc;
  1102. nr_sframes = nr_status_frames(nr_gframes);
  1103. /* No need for kzalloc as it is initialized in following hypercall
  1104. * GNTTABOP_get_status_frames.
  1105. */
  1106. sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
  1107. if (!sframes)
  1108. return -ENOMEM;
  1109. getframes.dom = DOMID_SELF;
  1110. getframes.nr_frames = nr_sframes;
  1111. set_xen_guest_handle(getframes.frame_list, sframes);
  1112. rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
  1113. &getframes, 1);
  1114. if (rc == -ENOSYS) {
  1115. kfree(sframes);
  1116. return -ENOSYS;
  1117. }
  1118. BUG_ON(rc || getframes.status);
  1119. rc = arch_gnttab_map_status(sframes, nr_sframes,
  1120. nr_status_frames(gnttab_max_grant_frames()),
  1121. &grstatus);
  1122. BUG_ON(rc);
  1123. kfree(sframes);
  1124. rc = arch_gnttab_map_shared(frames, nr_gframes,
  1125. gnttab_max_grant_frames(),
  1126. &gnttab_shared.addr);
  1127. BUG_ON(rc);
  1128. return 0;
  1129. }
  1130. static void gnttab_unmap_frames_v2(void)
  1131. {
  1132. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  1133. arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
  1134. }
  1135. static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
  1136. {
  1137. struct gnttab_setup_table setup;
  1138. xen_pfn_t *frames;
  1139. unsigned int nr_gframes = end_idx + 1;
  1140. int rc;
  1141. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  1142. struct xen_add_to_physmap xatp;
  1143. unsigned int i = end_idx;
  1144. rc = 0;
  1145. BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
  1146. /*
  1147. * Loop backwards, so that the first hypercall has the largest
  1148. * index, ensuring that the table will grow only once.
  1149. */
  1150. do {
  1151. xatp.domid = DOMID_SELF;
  1152. xatp.idx = i;
  1153. xatp.space = XENMAPSPACE_grant_table;
  1154. xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
  1155. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
  1156. if (rc != 0) {
  1157. pr_warn("grant table add_to_physmap failed, err=%d\n",
  1158. rc);
  1159. break;
  1160. }
  1161. } while (i-- > start_idx);
  1162. return rc;
  1163. }
  1164. /* No need for kzalloc as it is initialized in following hypercall
  1165. * GNTTABOP_setup_table.
  1166. */
  1167. frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
  1168. if (!frames)
  1169. return -ENOMEM;
  1170. setup.dom = DOMID_SELF;
  1171. setup.nr_frames = nr_gframes;
  1172. set_xen_guest_handle(setup.frame_list, frames);
  1173. rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
  1174. if (rc == -ENOSYS) {
  1175. kfree(frames);
  1176. return -ENOSYS;
  1177. }
  1178. BUG_ON(rc || setup.status);
  1179. rc = gnttab_interface->map_frames(frames, nr_gframes);
  1180. kfree(frames);
  1181. return rc;
  1182. }
  1183. static const struct gnttab_ops gnttab_v1_ops = {
  1184. .version = 1,
  1185. .grefs_per_grant_frame = XEN_PAGE_SIZE /
  1186. sizeof(struct grant_entry_v1),
  1187. .map_frames = gnttab_map_frames_v1,
  1188. .unmap_frames = gnttab_unmap_frames_v1,
  1189. .update_entry = gnttab_update_entry_v1,
  1190. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
  1191. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
  1192. .read_frame = gnttab_read_frame_v1,
  1193. };
  1194. static const struct gnttab_ops gnttab_v2_ops = {
  1195. .version = 2,
  1196. .grefs_per_grant_frame = XEN_PAGE_SIZE /
  1197. sizeof(union grant_entry_v2),
  1198. .map_frames = gnttab_map_frames_v2,
  1199. .unmap_frames = gnttab_unmap_frames_v2,
  1200. .update_entry = gnttab_update_entry_v2,
  1201. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
  1202. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
  1203. .read_frame = gnttab_read_frame_v2,
  1204. };
  1205. static bool gnttab_need_v2(void)
  1206. {
  1207. #ifdef CONFIG_X86
  1208. uint32_t base, width;
  1209. if (xen_pv_domain()) {
  1210. base = xen_cpuid_base();
  1211. if (cpuid_eax(base) < 5)
  1212. return false; /* Information not available, use V1. */
  1213. width = cpuid_ebx(base + 5) &
  1214. XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
  1215. return width > 32 + PAGE_SHIFT;
  1216. }
  1217. #endif
  1218. return !!(max_possible_pfn >> 32);
  1219. }
  1220. static void gnttab_request_version(void)
  1221. {
  1222. long rc;
  1223. struct gnttab_set_version gsv;
  1224. if (gnttab_need_v2())
  1225. gsv.version = 2;
  1226. else
  1227. gsv.version = 1;
  1228. /* Boot parameter overrides automatic selection. */
  1229. if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
  1230. gsv.version = xen_gnttab_version;
  1231. rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
  1232. if (rc == 0 && gsv.version == 2)
  1233. gnttab_interface = &gnttab_v2_ops;
  1234. else
  1235. gnttab_interface = &gnttab_v1_ops;
  1236. pr_info("Grant tables using version %d layout\n",
  1237. gnttab_interface->version);
  1238. }
  1239. static int gnttab_setup(void)
  1240. {
  1241. unsigned int max_nr_gframes;
  1242. max_nr_gframes = gnttab_max_grant_frames();
  1243. if (max_nr_gframes < nr_grant_frames)
  1244. return -ENOSYS;
  1245. if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
  1246. gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
  1247. if (gnttab_shared.addr == NULL) {
  1248. pr_warn("gnttab share frames is not mapped!\n");
  1249. return -ENOMEM;
  1250. }
  1251. }
  1252. return gnttab_map(0, nr_grant_frames - 1);
  1253. }
  1254. int gnttab_resume(void)
  1255. {
  1256. gnttab_request_version();
  1257. return gnttab_setup();
  1258. }
  1259. int gnttab_suspend(void)
  1260. {
  1261. if (!xen_feature(XENFEAT_auto_translated_physmap))
  1262. gnttab_interface->unmap_frames();
  1263. return 0;
  1264. }
  1265. static int gnttab_expand(unsigned int req_entries)
  1266. {
  1267. int rc;
  1268. unsigned int cur, extra;
  1269. cur = nr_grant_frames;
  1270. extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
  1271. gnttab_interface->grefs_per_grant_frame);
  1272. if (cur + extra > gnttab_max_grant_frames()) {
  1273. pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
  1274. " cur=%u extra=%u limit=%u"
  1275. " gnttab_free_count=%u req_entries=%u\n",
  1276. cur, extra, gnttab_max_grant_frames(),
  1277. gnttab_free_count, req_entries);
  1278. return -ENOSPC;
  1279. }
  1280. rc = gnttab_map(cur, cur + extra - 1);
  1281. if (rc == 0)
  1282. rc = grow_gnttab_list(extra);
  1283. return rc;
  1284. }
  1285. int gnttab_init(void)
  1286. {
  1287. int i;
  1288. unsigned long max_nr_grant_frames;
  1289. unsigned int max_nr_glist_frames, nr_glist_frames;
  1290. unsigned int nr_init_grefs;
  1291. int ret;
  1292. gnttab_request_version();
  1293. max_nr_grant_frames = gnttab_max_grant_frames();
  1294. nr_grant_frames = 1;
  1295. /* Determine the maximum number of frames required for the
  1296. * grant reference free list on the current hypervisor.
  1297. */
  1298. max_nr_glist_frames = (max_nr_grant_frames *
  1299. gnttab_interface->grefs_per_grant_frame / RPP);
  1300. gnttab_list = kmalloc_array(max_nr_glist_frames,
  1301. sizeof(grant_ref_t *),
  1302. GFP_KERNEL);
  1303. if (gnttab_list == NULL)
  1304. return -ENOMEM;
  1305. nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
  1306. for (i = 0; i < nr_glist_frames; i++) {
  1307. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
  1308. if (gnttab_list[i] == NULL) {
  1309. ret = -ENOMEM;
  1310. goto ini_nomem;
  1311. }
  1312. }
  1313. ret = arch_gnttab_init(max_nr_grant_frames,
  1314. nr_status_frames(max_nr_grant_frames));
  1315. if (ret < 0)
  1316. goto ini_nomem;
  1317. if (gnttab_setup() < 0) {
  1318. ret = -ENODEV;
  1319. goto ini_nomem;
  1320. }
  1321. nr_init_grefs = nr_grant_frames *
  1322. gnttab_interface->grefs_per_grant_frame;
  1323. for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
  1324. gnttab_entry(i) = i + 1;
  1325. gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
  1326. gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
  1327. gnttab_free_head = NR_RESERVED_ENTRIES;
  1328. printk("Grant table initialized\n");
  1329. return 0;
  1330. ini_nomem:
  1331. for (i--; i >= 0; i--)
  1332. free_page((unsigned long)gnttab_list[i]);
  1333. kfree(gnttab_list);
  1334. return ret;
  1335. }
  1336. EXPORT_SYMBOL_GPL(gnttab_init);
  1337. static int __gnttab_init(void)
  1338. {
  1339. if (!xen_domain())
  1340. return -ENODEV;
  1341. /* Delay grant-table initialization in the PV on HVM case */
  1342. if (xen_hvm_domain() && !xen_pvh_domain())
  1343. return 0;
  1344. return gnttab_init();
  1345. }
  1346. /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
  1347. * beforehand to initialize xen_auto_xlat_grant_frames. */
  1348. core_initcall_sync(__gnttab_init);