ib_umem.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2007 Cisco Systems. All rights reserved.
  4. */
  5. #ifndef IB_UMEM_H
  6. #define IB_UMEM_H
  7. #include <linux/list.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/workqueue.h>
  10. #include <rdma/ib_verbs.h>
  11. struct ib_ucontext;
  12. struct ib_umem_odp;
  13. struct ib_umem {
  14. struct ib_device *ibdev;
  15. struct mm_struct *owning_mm;
  16. u64 iova;
  17. size_t length;
  18. unsigned long address;
  19. u32 writable : 1;
  20. u32 is_odp : 1;
  21. struct work_struct work;
  22. struct sg_table sg_head;
  23. int nmap;
  24. unsigned int sg_nents;
  25. };
  26. /* Returns the offset of the umem start relative to the first page. */
  27. static inline int ib_umem_offset(struct ib_umem *umem)
  28. {
  29. return umem->address & ~PAGE_MASK;
  30. }
  31. static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
  32. unsigned long pgsz)
  33. {
  34. return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
  35. ALIGN_DOWN(umem->iova, pgsz))) /
  36. pgsz;
  37. }
  38. static inline size_t ib_umem_num_pages(struct ib_umem *umem)
  39. {
  40. return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
  41. }
  42. static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
  43. struct ib_umem *umem,
  44. unsigned long pgsz)
  45. {
  46. __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
  47. }
  48. /**
  49. * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
  50. * @umem: umem to iterate over
  51. * @pgsz: Page size to split the list into
  52. *
  53. * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
  54. * returned DMA blocks will be aligned to pgsz and span the range:
  55. * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
  56. *
  57. * Performs exactly ib_umem_num_dma_blocks() iterations.
  58. */
  59. #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
  60. for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
  61. __rdma_block_iter_next(biter);)
  62. #ifdef CONFIG_INFINIBAND_USER_MEM
  63. struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
  64. size_t size, int access);
  65. void ib_umem_release(struct ib_umem *umem);
  66. int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
  67. size_t length);
  68. unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
  69. unsigned long pgsz_bitmap,
  70. unsigned long virt);
  71. #else /* CONFIG_INFINIBAND_USER_MEM */
  72. #include <linux/err.h>
  73. static inline struct ib_umem *ib_umem_get(struct ib_device *device,
  74. unsigned long addr, size_t size,
  75. int access)
  76. {
  77. return ERR_PTR(-EINVAL);
  78. }
  79. static inline void ib_umem_release(struct ib_umem *umem) { }
  80. static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
  81. size_t length) {
  82. return -EINVAL;
  83. }
  84. static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
  85. unsigned long pgsz_bitmap,
  86. unsigned long virt)
  87. {
  88. return 0;
  89. }
  90. #endif /* CONFIG_INFINIBAND_USER_MEM */
  91. #endif /* IB_UMEM_H */