ib_core_uverbs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
  5. * Copyright 2019 Marvell. All rights reserved.
  6. */
  7. #include <linux/xarray.h>
  8. #include "uverbs.h"
  9. #include "core_priv.h"
  10. /**
  11. * rdma_umap_priv_init() - Initialize the private data of a vma
  12. *
  13. * @priv: The already allocated private data
  14. * @vma: The vm area struct that needs private data
  15. * @entry: entry into the mmap_xa that needs to be linked with
  16. * this vma
  17. *
  18. * Each time we map IO memory into user space this keeps track of the
  19. * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
  20. * to point to the zero page and allow the hot unplug to proceed.
  21. *
  22. * This is necessary for cases like PCI physical hot unplug as the actual BAR
  23. * memory may vanish after this and access to it from userspace could MCE.
  24. *
  25. * RDMA drivers supporting disassociation must have their user space designed
  26. * to cope in some way with their IO pages going to the zero page.
  27. *
  28. */
  29. void rdma_umap_priv_init(struct rdma_umap_priv *priv,
  30. struct vm_area_struct *vma,
  31. struct rdma_user_mmap_entry *entry)
  32. {
  33. struct ib_uverbs_file *ufile = vma->vm_file->private_data;
  34. priv->vma = vma;
  35. if (entry) {
  36. kref_get(&entry->ref);
  37. priv->entry = entry;
  38. }
  39. vma->vm_private_data = priv;
  40. /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
  41. mutex_lock(&ufile->umap_lock);
  42. list_add(&priv->list, &ufile->umaps);
  43. mutex_unlock(&ufile->umap_lock);
  44. }
  45. EXPORT_SYMBOL(rdma_umap_priv_init);
  46. /**
  47. * rdma_user_mmap_io() - Map IO memory into a process
  48. *
  49. * @ucontext: associated user context
  50. * @vma: the vma related to the current mmap call
  51. * @pfn: pfn to map
  52. * @size: size to map
  53. * @prot: pgprot to use in remap call
  54. * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
  55. * if mmap_entry is not used by the driver
  56. *
  57. * This is to be called by drivers as part of their mmap() functions if they
  58. * wish to send something like PCI-E BAR memory to userspace.
  59. *
  60. * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
  61. * success.
  62. */
  63. int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
  64. unsigned long pfn, unsigned long size, pgprot_t prot,
  65. struct rdma_user_mmap_entry *entry)
  66. {
  67. struct ib_uverbs_file *ufile = ucontext->ufile;
  68. struct rdma_umap_priv *priv;
  69. if (!(vma->vm_flags & VM_SHARED))
  70. return -EINVAL;
  71. if (vma->vm_end - vma->vm_start != size)
  72. return -EINVAL;
  73. /* Driver is using this wrong, must be called by ib_uverbs_mmap */
  74. if (WARN_ON(!vma->vm_file ||
  75. vma->vm_file->private_data != ufile))
  76. return -EINVAL;
  77. lockdep_assert_held(&ufile->device->disassociate_srcu);
  78. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  79. if (!priv)
  80. return -ENOMEM;
  81. vma->vm_page_prot = prot;
  82. if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
  83. kfree(priv);
  84. return -EAGAIN;
  85. }
  86. rdma_umap_priv_init(priv, vma, entry);
  87. return 0;
  88. }
  89. EXPORT_SYMBOL(rdma_user_mmap_io);
  90. /**
  91. * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
  92. *
  93. * @ucontext: associated user context
  94. * @pgoff: The mmap offset >> PAGE_SHIFT
  95. *
  96. * This function is called when a user tries to mmap with an offset (returned
  97. * by rdma_user_mmap_get_offset()) it initially received from the driver. The
  98. * rdma_user_mmap_entry was created by the function
  99. * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
  100. * entry so that it won't be deleted from the xarray in the meantime.
  101. *
  102. * Return an reference to an entry if exists or NULL if there is no
  103. * match. rdma_user_mmap_entry_put() must be called to put the reference.
  104. */
  105. struct rdma_user_mmap_entry *
  106. rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
  107. unsigned long pgoff)
  108. {
  109. struct rdma_user_mmap_entry *entry;
  110. if (pgoff > U32_MAX)
  111. return NULL;
  112. xa_lock(&ucontext->mmap_xa);
  113. entry = xa_load(&ucontext->mmap_xa, pgoff);
  114. /*
  115. * If refcount is zero, entry is already being deleted, driver_removed
  116. * indicates that the no further mmaps are possible and we waiting for
  117. * the active VMAs to be closed.
  118. */
  119. if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
  120. !kref_get_unless_zero(&entry->ref))
  121. goto err;
  122. xa_unlock(&ucontext->mmap_xa);
  123. ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
  124. pgoff, entry->npages);
  125. return entry;
  126. err:
  127. xa_unlock(&ucontext->mmap_xa);
  128. return NULL;
  129. }
  130. EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
  131. /**
  132. * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
  133. *
  134. * @ucontext: associated user context
  135. * @vma: the vma being mmap'd into
  136. *
  137. * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
  138. * checks that the VMA is correct.
  139. */
  140. struct rdma_user_mmap_entry *
  141. rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
  142. struct vm_area_struct *vma)
  143. {
  144. struct rdma_user_mmap_entry *entry;
  145. if (!(vma->vm_flags & VM_SHARED))
  146. return NULL;
  147. entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
  148. if (!entry)
  149. return NULL;
  150. if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
  151. rdma_user_mmap_entry_put(entry);
  152. return NULL;
  153. }
  154. return entry;
  155. }
  156. EXPORT_SYMBOL(rdma_user_mmap_entry_get);
  157. static void rdma_user_mmap_entry_free(struct kref *kref)
  158. {
  159. struct rdma_user_mmap_entry *entry =
  160. container_of(kref, struct rdma_user_mmap_entry, ref);
  161. struct ib_ucontext *ucontext = entry->ucontext;
  162. unsigned long i;
  163. /*
  164. * Erase all entries occupied by this single entry, this is deferred
  165. * until all VMA are closed so that the mmap offsets remain unique.
  166. */
  167. xa_lock(&ucontext->mmap_xa);
  168. for (i = 0; i < entry->npages; i++)
  169. __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
  170. xa_unlock(&ucontext->mmap_xa);
  171. ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
  172. entry->start_pgoff, entry->npages);
  173. if (ucontext->device->ops.mmap_free)
  174. ucontext->device->ops.mmap_free(entry);
  175. }
  176. /**
  177. * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
  178. *
  179. * @entry: an entry in the mmap_xa
  180. *
  181. * This function is called when the mapping is closed if it was
  182. * an io mapping or when the driver is done with the entry for
  183. * some other reason.
  184. * Should be called after rdma_user_mmap_entry_get was called
  185. * and entry is no longer needed. This function will erase the
  186. * entry and free it if its refcnt reaches zero.
  187. */
  188. void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
  189. {
  190. kref_put(&entry->ref, rdma_user_mmap_entry_free);
  191. }
  192. EXPORT_SYMBOL(rdma_user_mmap_entry_put);
  193. /**
  194. * rdma_user_mmap_entry_remove() - Drop reference to entry and
  195. * mark it as unmmapable
  196. *
  197. * @entry: the entry to insert into the mmap_xa
  198. *
  199. * Drivers can call this to prevent userspace from creating more mappings for
  200. * entry, however existing mmaps continue to exist and ops->mmap_free() will
  201. * not be called until all user mmaps are destroyed.
  202. */
  203. void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
  204. {
  205. if (!entry)
  206. return;
  207. xa_lock(&entry->ucontext->mmap_xa);
  208. entry->driver_removed = true;
  209. xa_unlock(&entry->ucontext->mmap_xa);
  210. kref_put(&entry->ref, rdma_user_mmap_entry_free);
  211. }
  212. EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
  213. /**
  214. * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa
  215. * in a given range.
  216. *
  217. * @ucontext: associated user context.
  218. * @entry: the entry to insert into the mmap_xa
  219. * @length: length of the address that will be mmapped
  220. * @min_pgoff: minimum pgoff to be returned
  221. * @max_pgoff: maximum pgoff to be returned
  222. *
  223. * This function should be called by drivers that use the rdma_user_mmap
  224. * interface for implementing their mmap syscall A database of mmap offsets is
  225. * handled in the core and helper functions are provided to insert entries
  226. * into the database and extract entries when the user calls mmap with the
  227. * given offset. The function allocates a unique page offset in a given range
  228. * that should be provided to user, the user will use the offset to retrieve
  229. * information such as address to be mapped and how.
  230. *
  231. * Return: 0 on success and -ENOMEM on failure
  232. */
  233. int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
  234. struct rdma_user_mmap_entry *entry,
  235. size_t length, u32 min_pgoff,
  236. u32 max_pgoff)
  237. {
  238. struct ib_uverbs_file *ufile = ucontext->ufile;
  239. XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
  240. u32 xa_first, xa_last, npages;
  241. int err;
  242. u32 i;
  243. if (!entry)
  244. return -EINVAL;
  245. kref_init(&entry->ref);
  246. entry->ucontext = ucontext;
  247. /*
  248. * We want the whole allocation to be done without interruption from a
  249. * different thread. The allocation requires finding a free range and
  250. * storing. During the xa_insert the lock could be released, possibly
  251. * allowing another thread to choose the same range.
  252. */
  253. mutex_lock(&ufile->umap_lock);
  254. xa_lock(&ucontext->mmap_xa);
  255. /* We want to find an empty range */
  256. npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
  257. entry->npages = npages;
  258. while (true) {
  259. /* First find an empty index */
  260. xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
  261. if (xas.xa_node == XAS_RESTART)
  262. goto err_unlock;
  263. xa_first = xas.xa_index;
  264. /* Is there enough room to have the range? */
  265. if (check_add_overflow(xa_first, npages, &xa_last))
  266. goto err_unlock;
  267. /*
  268. * Now look for the next present entry. If an entry doesn't
  269. * exist, we found an empty range and can proceed.
  270. */
  271. xas_next_entry(&xas, xa_last - 1);
  272. if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
  273. break;
  274. }
  275. for (i = xa_first; i < xa_last; i++) {
  276. err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
  277. if (err)
  278. goto err_undo;
  279. }
  280. /*
  281. * Internally the kernel uses a page offset, in libc this is a byte
  282. * offset. Drivers should not return pgoff to userspace.
  283. */
  284. entry->start_pgoff = xa_first;
  285. xa_unlock(&ucontext->mmap_xa);
  286. mutex_unlock(&ufile->umap_lock);
  287. ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
  288. entry->start_pgoff, npages);
  289. return 0;
  290. err_undo:
  291. for (; i > xa_first; i--)
  292. __xa_erase(&ucontext->mmap_xa, i - 1);
  293. err_unlock:
  294. xa_unlock(&ucontext->mmap_xa);
  295. mutex_unlock(&ufile->umap_lock);
  296. return -ENOMEM;
  297. }
  298. EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
  299. /**
  300. * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa.
  301. *
  302. * @ucontext: associated user context.
  303. * @entry: the entry to insert into the mmap_xa
  304. * @length: length of the address that will be mmapped
  305. *
  306. * This function should be called by drivers that use the rdma_user_mmap
  307. * interface for handling user mmapped addresses. The database is handled in
  308. * the core and helper functions are provided to insert entries into the
  309. * database and extract entries when the user calls mmap with the given offset.
  310. * The function allocates a unique page offset that should be provided to user,
  311. * the user will use the offset to retrieve information such as address to
  312. * be mapped and how.
  313. *
  314. * Return: 0 on success and -ENOMEM on failure
  315. */
  316. int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
  317. struct rdma_user_mmap_entry *entry,
  318. size_t length)
  319. {
  320. return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
  321. U32_MAX);
  322. }
  323. EXPORT_SYMBOL(rdma_user_mmap_entry_insert);