genalloc.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Basic general purpose allocator for managing special purpose
  4. * memory, for example, memory that is not managed by the regular
  5. * kmalloc/kfree interface. Uses for this includes on-device special
  6. * memory, uncached memory etc.
  7. *
  8. * It is safe to use the allocator in NMI handlers and other special
  9. * unblockable contexts that could otherwise deadlock on locks. This
  10. * is implemented by using atomic operations and retries on any
  11. * conflicts. The disadvantage is that there may be livelocks in
  12. * extreme cases. For better scalability, one allocator can be used
  13. * for each CPU.
  14. *
  15. * The lockless operation only works if there is enough memory
  16. * available. If new memory is added to the pool a lock has to be
  17. * still taken. So any user relying on locklessness has to ensure
  18. * that sufficient memory is preallocated.
  19. *
  20. * The basic atomic operation of this allocator is cmpxchg on long.
  21. * On architectures that don't have NMI-safe cmpxchg implementation,
  22. * the allocator can NOT be used in NMI handler. So code uses the
  23. * allocator in NMI handler should depend on
  24. * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  25. */
  26. #ifndef __GENALLOC_H__
  27. #define __GENALLOC_H__
  28. #include <linux/types.h>
  29. #include <linux/spinlock_types.h>
  30. #include <linux/atomic.h>
  31. struct device;
  32. struct device_node;
  33. struct gen_pool;
  34. /**
  35. * typedef genpool_algo_t: Allocation callback function type definition
  36. * @map: Pointer to bitmap
  37. * @size: The bitmap size in bits
  38. * @start: The bitnumber to start searching at
  39. * @nr: The number of zeroed bits we're looking for
  40. * @data: optional additional data used by the callback
  41. * @pool: the pool being allocated from
  42. */
  43. typedef unsigned long (*genpool_algo_t)(unsigned long *map,
  44. unsigned long size,
  45. unsigned long start,
  46. unsigned int nr,
  47. void *data, struct gen_pool *pool,
  48. unsigned long start_addr);
  49. /*
  50. * General purpose special memory pool descriptor.
  51. */
  52. struct gen_pool {
  53. spinlock_t lock;
  54. struct list_head chunks; /* list of chunks in this pool */
  55. int min_alloc_order; /* minimum allocation order */
  56. genpool_algo_t algo; /* allocation function */
  57. void *data;
  58. const char *name;
  59. };
  60. /*
  61. * General purpose special memory pool chunk descriptor.
  62. */
  63. struct gen_pool_chunk {
  64. struct list_head next_chunk; /* next chunk in pool */
  65. atomic_long_t avail;
  66. phys_addr_t phys_addr; /* physical starting address of memory chunk */
  67. void *owner; /* private data to retrieve at alloc time */
  68. unsigned long start_addr; /* start address of memory chunk */
  69. unsigned long end_addr; /* end address of memory chunk (inclusive) */
  70. unsigned long bits[]; /* bitmap for allocating memory chunk */
  71. };
  72. /*
  73. * gen_pool data descriptor for gen_pool_first_fit_align.
  74. */
  75. struct genpool_data_align {
  76. int align; /* alignment by bytes for starting address */
  77. };
  78. /*
  79. * gen_pool data descriptor for gen_pool_fixed_alloc.
  80. */
  81. struct genpool_data_fixed {
  82. unsigned long offset; /* The offset of the specific region */
  83. };
  84. extern struct gen_pool *gen_pool_create(int, int);
  85. extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
  86. extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
  87. size_t, int, void *);
  88. static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
  89. phys_addr_t phys, size_t size, int nid)
  90. {
  91. return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
  92. }
  93. /**
  94. * gen_pool_add - add a new chunk of special memory to the pool
  95. * @pool: pool to add new memory chunk to
  96. * @addr: starting address of memory chunk to add to pool
  97. * @size: size in bytes of the memory chunk to add to pool
  98. * @nid: node id of the node the chunk structure and bitmap should be
  99. * allocated on, or -1
  100. *
  101. * Add a new chunk of special memory to the specified pool.
  102. *
  103. * Returns 0 on success or a -ve errno on failure.
  104. */
  105. static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
  106. size_t size, int nid)
  107. {
  108. return gen_pool_add_virt(pool, addr, -1, size, nid);
  109. }
  110. extern void gen_pool_destroy(struct gen_pool *);
  111. unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
  112. genpool_algo_t algo, void *data, void **owner);
  113. static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
  114. size_t size, void **owner)
  115. {
  116. return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
  117. owner);
  118. }
  119. static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
  120. size_t size, genpool_algo_t algo, void *data)
  121. {
  122. return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
  123. }
  124. /**
  125. * gen_pool_alloc - allocate special memory from the pool
  126. * @pool: pool to allocate from
  127. * @size: number of bytes to allocate from the pool
  128. *
  129. * Allocate the requested number of bytes from the specified pool.
  130. * Uses the pool allocation function (with first-fit algorithm by default).
  131. * Can not be used in NMI handler on architectures without
  132. * NMI-safe cmpxchg implementation.
  133. */
  134. static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
  135. {
  136. return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
  137. }
  138. extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
  139. dma_addr_t *dma);
  140. extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
  141. dma_addr_t *dma, genpool_algo_t algo, void *data);
  142. extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
  143. dma_addr_t *dma, int align);
  144. extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma);
  145. extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
  146. dma_addr_t *dma, genpool_algo_t algo, void *data);
  147. extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
  148. dma_addr_t *dma, int align);
  149. extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
  150. size_t size, void **owner);
  151. static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
  152. size_t size)
  153. {
  154. gen_pool_free_owner(pool, addr, size, NULL);
  155. }
  156. extern void gen_pool_for_each_chunk(struct gen_pool *,
  157. void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
  158. extern size_t gen_pool_avail(struct gen_pool *);
  159. extern size_t gen_pool_size(struct gen_pool *);
  160. extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
  161. void *data);
  162. extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
  163. unsigned long start, unsigned int nr, void *data,
  164. struct gen_pool *pool, unsigned long start_addr);
  165. extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
  166. unsigned long size, unsigned long start, unsigned int nr,
  167. void *data, struct gen_pool *pool, unsigned long start_addr);
  168. extern unsigned long gen_pool_first_fit_align(unsigned long *map,
  169. unsigned long size, unsigned long start, unsigned int nr,
  170. void *data, struct gen_pool *pool, unsigned long start_addr);
  171. extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
  172. unsigned long size, unsigned long start, unsigned int nr,
  173. void *data, struct gen_pool *pool, unsigned long start_addr);
  174. extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
  175. unsigned long start, unsigned int nr, void *data,
  176. struct gen_pool *pool, unsigned long start_addr);
  177. extern struct gen_pool *devm_gen_pool_create(struct device *dev,
  178. int min_alloc_order, int nid, const char *name);
  179. extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
  180. extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
  181. size_t size);
  182. #ifdef CONFIG_OF
  183. extern struct gen_pool *of_gen_pool_get(struct device_node *np,
  184. const char *propname, int index);
  185. #else
  186. static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
  187. const char *propname, int index)
  188. {
  189. return NULL;
  190. }
  191. #endif
  192. #endif /* __GENALLOC_H__ */