umem_odp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /*
  2. * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/sched.h>
  34. #include <linux/sched/mm.h>
  35. #include <linux/sched/task.h>
  36. #include <linux/pid.h>
  37. #include <linux/slab.h>
  38. #include <linux/export.h>
  39. #include <linux/vmalloc.h>
  40. #include <linux/hugetlb.h>
  41. #include <linux/interval_tree.h>
  42. #include <linux/hmm.h>
  43. #include <linux/pagemap.h>
  44. #include <rdma/ib_verbs.h>
  45. #include <rdma/ib_umem.h>
  46. #include <rdma/ib_umem_odp.h>
  47. #include "uverbs.h"
  48. static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
  49. const struct mmu_interval_notifier_ops *ops)
  50. {
  51. int ret;
  52. umem_odp->umem.is_odp = 1;
  53. mutex_init(&umem_odp->umem_mutex);
  54. if (!umem_odp->is_implicit_odp) {
  55. size_t page_size = 1UL << umem_odp->page_shift;
  56. unsigned long start;
  57. unsigned long end;
  58. size_t ndmas, npfns;
  59. start = ALIGN_DOWN(umem_odp->umem.address, page_size);
  60. if (check_add_overflow(umem_odp->umem.address,
  61. (unsigned long)umem_odp->umem.length,
  62. &end))
  63. return -EOVERFLOW;
  64. end = ALIGN(end, page_size);
  65. if (unlikely(end < page_size))
  66. return -EOVERFLOW;
  67. ndmas = (end - start) >> umem_odp->page_shift;
  68. if (!ndmas)
  69. return -EINVAL;
  70. npfns = (end - start) >> PAGE_SHIFT;
  71. umem_odp->pfn_list = kvcalloc(
  72. npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
  73. if (!umem_odp->pfn_list)
  74. return -ENOMEM;
  75. umem_odp->dma_list = kvcalloc(
  76. ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
  77. if (!umem_odp->dma_list) {
  78. ret = -ENOMEM;
  79. goto out_pfn_list;
  80. }
  81. ret = mmu_interval_notifier_insert(&umem_odp->notifier,
  82. umem_odp->umem.owning_mm,
  83. start, end - start, ops);
  84. if (ret)
  85. goto out_dma_list;
  86. }
  87. return 0;
  88. out_dma_list:
  89. kvfree(umem_odp->dma_list);
  90. out_pfn_list:
  91. kvfree(umem_odp->pfn_list);
  92. return ret;
  93. }
  94. /**
  95. * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
  96. *
  97. * Implicit ODP umems do not have a VA range and do not have any page lists.
  98. * They exist only to hold the per_mm reference to help the driver create
  99. * children umems.
  100. *
  101. * @device: IB device to create UMEM
  102. * @access: ib_reg_mr access flags
  103. */
  104. struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
  105. int access)
  106. {
  107. struct ib_umem *umem;
  108. struct ib_umem_odp *umem_odp;
  109. int ret;
  110. if (access & IB_ACCESS_HUGETLB)
  111. return ERR_PTR(-EINVAL);
  112. umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
  113. if (!umem_odp)
  114. return ERR_PTR(-ENOMEM);
  115. umem = &umem_odp->umem;
  116. umem->ibdev = device;
  117. umem->writable = ib_access_writable(access);
  118. umem->owning_mm = current->mm;
  119. umem_odp->is_implicit_odp = 1;
  120. umem_odp->page_shift = PAGE_SHIFT;
  121. umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
  122. ret = ib_init_umem_odp(umem_odp, NULL);
  123. if (ret) {
  124. put_pid(umem_odp->tgid);
  125. kfree(umem_odp);
  126. return ERR_PTR(ret);
  127. }
  128. return umem_odp;
  129. }
  130. EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
  131. /**
  132. * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
  133. * parent ODP umem
  134. *
  135. * @root: The parent umem enclosing the child. This must be allocated using
  136. * ib_alloc_implicit_odp_umem()
  137. * @addr: The starting userspace VA
  138. * @size: The length of the userspace VA
  139. * @ops: MMU interval ops, currently only @invalidate
  140. */
  141. struct ib_umem_odp *
  142. ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
  143. size_t size,
  144. const struct mmu_interval_notifier_ops *ops)
  145. {
  146. /*
  147. * Caller must ensure that root cannot be freed during the call to
  148. * ib_alloc_odp_umem.
  149. */
  150. struct ib_umem_odp *odp_data;
  151. struct ib_umem *umem;
  152. int ret;
  153. if (WARN_ON(!root->is_implicit_odp))
  154. return ERR_PTR(-EINVAL);
  155. odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
  156. if (!odp_data)
  157. return ERR_PTR(-ENOMEM);
  158. umem = &odp_data->umem;
  159. umem->ibdev = root->umem.ibdev;
  160. umem->length = size;
  161. umem->address = addr;
  162. umem->writable = root->umem.writable;
  163. umem->owning_mm = root->umem.owning_mm;
  164. odp_data->page_shift = PAGE_SHIFT;
  165. odp_data->notifier.ops = ops;
  166. /*
  167. * A mmget must be held when registering a notifier, the owming_mm only
  168. * has a mm_grab at this point.
  169. */
  170. if (!mmget_not_zero(umem->owning_mm)) {
  171. ret = -EFAULT;
  172. goto out_free;
  173. }
  174. odp_data->tgid = get_pid(root->tgid);
  175. ret = ib_init_umem_odp(odp_data, ops);
  176. if (ret)
  177. goto out_tgid;
  178. mmput(umem->owning_mm);
  179. return odp_data;
  180. out_tgid:
  181. put_pid(odp_data->tgid);
  182. mmput(umem->owning_mm);
  183. out_free:
  184. kfree(odp_data);
  185. return ERR_PTR(ret);
  186. }
  187. EXPORT_SYMBOL(ib_umem_odp_alloc_child);
  188. /**
  189. * ib_umem_odp_get - Create a umem_odp for a userspace va
  190. *
  191. * @device: IB device struct to get UMEM
  192. * @addr: userspace virtual address to start at
  193. * @size: length of region to pin
  194. * @access: IB_ACCESS_xxx flags for memory being pinned
  195. * @ops: MMU interval ops, currently only @invalidate
  196. *
  197. * The driver should use when the access flags indicate ODP memory. It avoids
  198. * pinning, instead, stores the mm for future page fault handling in
  199. * conjunction with MMU notifiers.
  200. */
  201. struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
  202. unsigned long addr, size_t size, int access,
  203. const struct mmu_interval_notifier_ops *ops)
  204. {
  205. struct ib_umem_odp *umem_odp;
  206. struct mm_struct *mm;
  207. int ret;
  208. if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
  209. return ERR_PTR(-EINVAL);
  210. umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
  211. if (!umem_odp)
  212. return ERR_PTR(-ENOMEM);
  213. umem_odp->umem.ibdev = device;
  214. umem_odp->umem.length = size;
  215. umem_odp->umem.address = addr;
  216. umem_odp->umem.writable = ib_access_writable(access);
  217. umem_odp->umem.owning_mm = mm = current->mm;
  218. umem_odp->notifier.ops = ops;
  219. umem_odp->page_shift = PAGE_SHIFT;
  220. #ifdef CONFIG_HUGETLB_PAGE
  221. if (access & IB_ACCESS_HUGETLB)
  222. umem_odp->page_shift = HPAGE_SHIFT;
  223. #endif
  224. umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
  225. ret = ib_init_umem_odp(umem_odp, ops);
  226. if (ret)
  227. goto err_put_pid;
  228. return umem_odp;
  229. err_put_pid:
  230. put_pid(umem_odp->tgid);
  231. kfree(umem_odp);
  232. return ERR_PTR(ret);
  233. }
  234. EXPORT_SYMBOL(ib_umem_odp_get);
  235. void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
  236. {
  237. /*
  238. * Ensure that no more pages are mapped in the umem.
  239. *
  240. * It is the driver's responsibility to ensure, before calling us,
  241. * that the hardware will not attempt to access the MR any more.
  242. */
  243. if (!umem_odp->is_implicit_odp) {
  244. mutex_lock(&umem_odp->umem_mutex);
  245. ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
  246. ib_umem_end(umem_odp));
  247. mutex_unlock(&umem_odp->umem_mutex);
  248. mmu_interval_notifier_remove(&umem_odp->notifier);
  249. kvfree(umem_odp->dma_list);
  250. kvfree(umem_odp->pfn_list);
  251. }
  252. put_pid(umem_odp->tgid);
  253. kfree(umem_odp);
  254. }
  255. EXPORT_SYMBOL(ib_umem_odp_release);
  256. /*
  257. * Map for DMA and insert a single page into the on-demand paging page tables.
  258. *
  259. * @umem: the umem to insert the page to.
  260. * @dma_index: index in the umem to add the dma to.
  261. * @page: the page struct to map and add.
  262. * @access_mask: access permissions needed for this page.
  263. * @current_seq: sequence number for synchronization with invalidations.
  264. * the sequence number is taken from
  265. * umem_odp->notifiers_seq.
  266. *
  267. * The function returns -EFAULT if the DMA mapping operation fails.
  268. *
  269. */
  270. static int ib_umem_odp_map_dma_single_page(
  271. struct ib_umem_odp *umem_odp,
  272. unsigned int dma_index,
  273. struct page *page,
  274. u64 access_mask)
  275. {
  276. struct ib_device *dev = umem_odp->umem.ibdev;
  277. dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
  278. if (*dma_addr) {
  279. /*
  280. * If the page is already dma mapped it means it went through
  281. * a non-invalidating trasition, like read-only to writable.
  282. * Resync the flags.
  283. */
  284. *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
  285. return 0;
  286. }
  287. *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
  288. DMA_BIDIRECTIONAL);
  289. if (ib_dma_mapping_error(dev, *dma_addr)) {
  290. *dma_addr = 0;
  291. return -EFAULT;
  292. }
  293. umem_odp->npages++;
  294. *dma_addr |= access_mask;
  295. return 0;
  296. }
  297. /**
  298. * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
  299. *
  300. * Maps the range passed in the argument to DMA addresses.
  301. * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
  302. * Upon success the ODP MR will be locked to let caller complete its device
  303. * page table update.
  304. *
  305. * Returns the number of pages mapped in success, negative error code
  306. * for failure.
  307. * @umem_odp: the umem to map and pin
  308. * @user_virt: the address from which we need to map.
  309. * @bcnt: the minimal number of bytes to pin and map. The mapping might be
  310. * bigger due to alignment, and may also be smaller in case of an error
  311. * pinning or mapping a page. The actual pages mapped is returned in
  312. * the return value.
  313. * @access_mask: bit mask of the requested access permissions for the given
  314. * range.
  315. * @fault: is faulting required for the given range
  316. */
  317. int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
  318. u64 bcnt, u64 access_mask, bool fault)
  319. __acquires(&umem_odp->umem_mutex)
  320. {
  321. struct task_struct *owning_process = NULL;
  322. struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
  323. int pfn_index, dma_index, ret = 0, start_idx;
  324. unsigned int page_shift, hmm_order, pfn_start_idx;
  325. unsigned long num_pfns, current_seq;
  326. struct hmm_range range = {};
  327. unsigned long timeout;
  328. if (access_mask == 0)
  329. return -EINVAL;
  330. if (user_virt < ib_umem_start(umem_odp) ||
  331. user_virt + bcnt > ib_umem_end(umem_odp))
  332. return -EFAULT;
  333. page_shift = umem_odp->page_shift;
  334. /*
  335. * owning_process is allowed to be NULL, this means somehow the mm is
  336. * existing beyond the lifetime of the originating process.. Presumably
  337. * mmget_not_zero will fail in this case.
  338. */
  339. owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
  340. if (!owning_process || !mmget_not_zero(owning_mm)) {
  341. ret = -EINVAL;
  342. goto out_put_task;
  343. }
  344. range.notifier = &umem_odp->notifier;
  345. range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
  346. range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
  347. pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
  348. num_pfns = (range.end - range.start) >> PAGE_SHIFT;
  349. if (fault) {
  350. range.default_flags = HMM_PFN_REQ_FAULT;
  351. if (access_mask & ODP_WRITE_ALLOWED_BIT)
  352. range.default_flags |= HMM_PFN_REQ_WRITE;
  353. }
  354. range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
  355. timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
  356. retry:
  357. current_seq = range.notifier_seq =
  358. mmu_interval_read_begin(&umem_odp->notifier);
  359. mmap_read_lock(owning_mm);
  360. ret = hmm_range_fault(&range);
  361. mmap_read_unlock(owning_mm);
  362. if (unlikely(ret)) {
  363. if (ret == -EBUSY && !time_after(jiffies, timeout))
  364. goto retry;
  365. goto out_put_mm;
  366. }
  367. start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
  368. dma_index = start_idx;
  369. mutex_lock(&umem_odp->umem_mutex);
  370. if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
  371. mutex_unlock(&umem_odp->umem_mutex);
  372. goto retry;
  373. }
  374. for (pfn_index = 0; pfn_index < num_pfns;
  375. pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
  376. if (fault) {
  377. /*
  378. * Since we asked for hmm_range_fault() to populate
  379. * pages it shouldn't return an error entry on success.
  380. */
  381. WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
  382. WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
  383. } else {
  384. if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
  385. WARN_ON(umem_odp->dma_list[dma_index]);
  386. continue;
  387. }
  388. access_mask = ODP_READ_ALLOWED_BIT;
  389. if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
  390. access_mask |= ODP_WRITE_ALLOWED_BIT;
  391. }
  392. hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
  393. /* If a hugepage was detected and ODP wasn't set for, the umem
  394. * page_shift will be used, the opposite case is an error.
  395. */
  396. if (hmm_order + PAGE_SHIFT < page_shift) {
  397. ret = -EINVAL;
  398. ibdev_dbg(umem_odp->umem.ibdev,
  399. "%s: un-expected hmm_order %d, page_shift %d\n",
  400. __func__, hmm_order, page_shift);
  401. break;
  402. }
  403. ret = ib_umem_odp_map_dma_single_page(
  404. umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
  405. access_mask);
  406. if (ret < 0) {
  407. ibdev_dbg(umem_odp->umem.ibdev,
  408. "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
  409. break;
  410. }
  411. }
  412. /* upon sucesss lock should stay on hold for the callee */
  413. if (!ret)
  414. ret = dma_index - start_idx;
  415. else
  416. mutex_unlock(&umem_odp->umem_mutex);
  417. out_put_mm:
  418. mmput(owning_mm);
  419. out_put_task:
  420. if (owning_process)
  421. put_task_struct(owning_process);
  422. return ret;
  423. }
  424. EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
  425. void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
  426. u64 bound)
  427. {
  428. dma_addr_t dma_addr;
  429. dma_addr_t dma;
  430. int idx;
  431. u64 addr;
  432. struct ib_device *dev = umem_odp->umem.ibdev;
  433. lockdep_assert_held(&umem_odp->umem_mutex);
  434. virt = max_t(u64, virt, ib_umem_start(umem_odp));
  435. bound = min_t(u64, bound, ib_umem_end(umem_odp));
  436. for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
  437. idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
  438. dma = umem_odp->dma_list[idx];
  439. /* The access flags guaranteed a valid DMA address in case was NULL */
  440. if (dma) {
  441. unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
  442. struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
  443. dma_addr = dma & ODP_DMA_ADDR_MASK;
  444. ib_dma_unmap_page(dev, dma_addr,
  445. BIT(umem_odp->page_shift),
  446. DMA_BIDIRECTIONAL);
  447. if (dma & ODP_WRITE_ALLOWED_BIT) {
  448. struct page *head_page = compound_head(page);
  449. /*
  450. * set_page_dirty prefers being called with
  451. * the page lock. However, MMU notifiers are
  452. * called sometimes with and sometimes without
  453. * the lock. We rely on the umem_mutex instead
  454. * to prevent other mmu notifiers from
  455. * continuing and allowing the page mapping to
  456. * be removed.
  457. */
  458. set_page_dirty(head_page);
  459. }
  460. umem_odp->dma_list[idx] = 0;
  461. umem_odp->npages--;
  462. }
  463. }
  464. }
  465. EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);