xrp_alloc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * Copyright (c) 2016 - 2017 Cadence Design Systems Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining
  5. * a copy of this software and associated documentation files (the
  6. * "Software"), to deal in the Software without restriction, including
  7. * without limitation the rights to use, copy, modify, merge, publish,
  8. * distribute, sublicense, and/or sell copies of the Software, and to
  9. * permit persons to whom the Software is furnished to do so, subject to
  10. * the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included
  13. * in all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  18. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  19. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  20. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  21. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Alternatively you can use and distribute this file under the terms of
  24. * the GNU General Public License version 2 or later.
  25. */
  26. #ifdef __KERNEL__
  27. #include <asm/atomic.h>
  28. #include <linux/kernel.h>
  29. #include <linux/mutex.h>
  30. #include <linux/printk.h>
  31. #include <linux/slab.h>
  32. #include <linux/pagemap.h>
  33. #else
  34. #include <errno.h>
  35. #include <stdbool.h>
  36. #include <stddef.h>
  37. #include <stdio.h>
  38. #include <stdlib.h>
  39. #include "xrp_debug.h"
  40. #define PAGE_SIZE 4096
  41. #define GFP_KERNEL 0
  42. #define ALIGN(v, a) (((v) + (a) - 1) & -(a))
  43. #define GET_PAGE_NUM(size, offset) ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
  44. static void *kmalloc(size_t sz, int flags)
  45. {
  46. (void)flags;
  47. return malloc(sz);
  48. }
  49. static void *kzalloc(size_t sz, int flags)
  50. {
  51. (void)flags;
  52. return calloc(1, sz);
  53. }
  54. static void kfree(void *p)
  55. {
  56. free(p);
  57. }
  58. #endif
  59. #include "xrp_private_alloc.h"
  60. #ifndef __KERNEL__
  61. static void mutex_init(struct mutex *mutex)
  62. {
  63. xrp_mutex_init(&mutex->o);
  64. }
  65. static void mutex_lock(struct mutex *mutex)
  66. {
  67. xrp_mutex_lock(&mutex->o);
  68. }
  69. static void mutex_unlock(struct mutex *mutex)
  70. {
  71. xrp_mutex_unlock(&mutex->o);
  72. }
  73. static void atomic_set(atomic_t *p, uint32_t v)
  74. {
  75. *((volatile atomic_t *)p) = v;
  76. }
  77. #define container_of(ptr, type, member) ({ \
  78. void *__mptr = (void *)(ptr); \
  79. ((type *)(__mptr - offsetof(type, member))); })
  80. #endif
  81. struct xrp_private_pool {
  82. struct xrp_allocation_pool pool;
  83. struct mutex free_list_lock;
  84. phys_addr_t start;
  85. u32 size;
  86. struct xrp_allocation *free_list;
  87. };
  88. static inline void xrp_pool_lock(struct xrp_private_pool *pool)
  89. {
  90. mutex_lock(&pool->free_list_lock);
  91. }
  92. static inline void xrp_pool_unlock(struct xrp_private_pool *pool)
  93. {
  94. mutex_unlock(&pool->free_list_lock);
  95. }
  96. static void xrp_private_free(struct xrp_allocation *xrp_allocation)
  97. {
  98. struct xrp_private_pool *pool = container_of(xrp_allocation->pool,
  99. struct xrp_private_pool,
  100. pool);
  101. struct xrp_allocation **pcur;
  102. pr_debug("%s: %pap x %d\n", __func__,
  103. &xrp_allocation->start, xrp_allocation->size);
  104. xrp_pool_lock(pool);
  105. for (pcur = &pool->free_list; ; pcur = &(*pcur)->next) {
  106. struct xrp_allocation *cur = *pcur;
  107. if (cur && cur->start + cur->size == xrp_allocation->start) {
  108. struct xrp_allocation *next = cur->next;
  109. pr_debug("merging block tail: %pap x 0x%x ->\n",
  110. &cur->start, cur->size);
  111. cur->size += xrp_allocation->size;
  112. pr_debug("... -> %pap x 0x%x\n",
  113. &cur->start, cur->size);
  114. kfree(xrp_allocation);
  115. if (next && cur->start + cur->size == next->start) {
  116. pr_debug("merging with next block: %pap x 0x%x ->\n",
  117. &cur->start, cur->size);
  118. cur->size += next->size;
  119. cur->next = next->next;
  120. pr_debug("... -> %pap x 0x%x\n",
  121. &cur->start, cur->size);
  122. kfree(next);
  123. }
  124. break;
  125. }
  126. if (!cur || xrp_allocation->start < cur->start) {
  127. if (cur && xrp_allocation->start + xrp_allocation->size ==
  128. cur->start) {
  129. pr_debug("merging block head: %pap x 0x%x ->\n",
  130. &cur->start, cur->size);
  131. cur->size += xrp_allocation->size;
  132. cur->start = xrp_allocation->start;
  133. pr_debug("... -> %pap x 0x%x\n",
  134. &cur->start, cur->size);
  135. kfree(xrp_allocation);
  136. } else {
  137. pr_debug("inserting new free block\n");
  138. xrp_allocation->next = cur;
  139. *pcur = xrp_allocation;
  140. }
  141. break;
  142. }
  143. }
  144. xrp_pool_unlock(pool);
  145. }
  146. static long xrp_alloc_gfp(u32 size, u32 align,struct xrp_allocation **alloc);
  147. static long xrp_private_alloc(struct xrp_allocation_pool *pool,
  148. u32 size, u32 align,
  149. struct xrp_allocation **alloc)
  150. {
  151. struct xrp_private_pool *ppool = container_of(pool,
  152. struct xrp_private_pool,
  153. pool);
  154. struct xrp_allocation **pcur;
  155. struct xrp_allocation *cur = NULL;
  156. struct xrp_allocation *new;
  157. phys_addr_t aligned_start = 0;
  158. bool found = false;
  159. if (!size || (align & (align - 1)))
  160. return -EINVAL;
  161. if (!align)
  162. align = 1;
  163. new = kzalloc(sizeof(struct xrp_allocation), GFP_KERNEL);
  164. if (!new)
  165. return -ENOMEM;
  166. align = ALIGN(align, PAGE_SIZE);
  167. size = ALIGN(size, PAGE_SIZE);
  168. xrp_pool_lock(ppool);
  169. /* on exit free list is fixed */
  170. for (pcur = &ppool->free_list; *pcur; pcur = &(*pcur)->next) {
  171. cur = *pcur;
  172. aligned_start = ALIGN(cur->start, align);
  173. if (aligned_start >= cur->start &&
  174. aligned_start - cur->start + size <= cur->size) {
  175. if (aligned_start == cur->start) {
  176. if (aligned_start + size == cur->start + cur->size) {
  177. pr_debug("reusing complete block: %pap x %x\n",
  178. &cur->start, cur->size);
  179. *pcur = cur->next;
  180. } else {
  181. pr_debug("cutting block head: %pap x %x ->\n",
  182. &cur->start, cur->size);
  183. cur->size -= aligned_start + size - cur->start;
  184. cur->start = aligned_start + size;
  185. pr_debug("... -> %pap x %x\n",
  186. &cur->start, cur->size);
  187. cur = NULL;
  188. }
  189. } else {
  190. if (aligned_start + size == cur->start + cur->size) {
  191. pr_debug("cutting block tail: %pap x %x ->\n",
  192. &cur->start, cur->size);
  193. cur->size = aligned_start - cur->start;
  194. pr_debug("... -> %pap x %x\n",
  195. &cur->start, cur->size);
  196. cur = NULL;
  197. } else {
  198. pr_debug("splitting block into two: %pap x %x ->\n",
  199. &cur->start, cur->size);
  200. new->start = aligned_start + size;
  201. new->size = cur->start +
  202. cur->size - new->start;
  203. cur->size = aligned_start - cur->start;
  204. new->next = cur->next;
  205. cur->next = new;
  206. pr_debug("... -> %pap x %x + %pap x %x\n",
  207. &cur->start, cur->size,
  208. &new->start, new->size);
  209. cur = NULL;
  210. new = NULL;
  211. }
  212. }
  213. found = true;
  214. break;
  215. } else {
  216. cur = NULL;
  217. }
  218. }
  219. xrp_pool_unlock(ppool);
  220. if (!found) {
  221. kfree(cur);
  222. kfree(new);
  223. if(!xrp_alloc_gfp(size,align,alloc))
  224. {
  225. return 0;
  226. }
  227. return -ENOMEM;
  228. }
  229. if (!cur) {
  230. cur = new;
  231. new = NULL;
  232. }
  233. if (!cur) {
  234. cur = kzalloc(sizeof(struct xrp_allocation), GFP_KERNEL);
  235. if (!cur)
  236. return -ENOMEM;
  237. }
  238. if (new)
  239. kfree(new);
  240. pr_debug("returning: %pap x %x\n", &aligned_start, size);
  241. cur->start = aligned_start;
  242. cur->size = size;
  243. cur->pool = pool;
  244. atomic_set(&cur->ref, 0);
  245. xrp_allocation_get(cur);
  246. *alloc = cur;
  247. return 0;
  248. }
  249. static void xrp_private_free_pool(struct xrp_allocation_pool *pool)
  250. {
  251. struct xrp_private_pool *ppool = container_of(pool,
  252. struct xrp_private_pool,
  253. pool);
  254. kfree(ppool->free_list);
  255. kfree(ppool);
  256. }
  257. static phys_addr_t xrp_private_offset(const struct xrp_allocation *allocation)
  258. {
  259. struct xrp_private_pool *ppool = container_of(allocation->pool,
  260. struct xrp_private_pool,
  261. pool);
  262. return allocation->start ;//- ppool->start;
  263. }
  264. static const struct xrp_allocation_ops xrp_private_pool_ops = {
  265. .alloc = xrp_private_alloc,
  266. .free = xrp_private_free,
  267. .free_pool = xrp_private_free_pool,
  268. .offset = xrp_private_offset,
  269. };
  270. long xrp_init_private_pool(struct xrp_allocation_pool **ppool,
  271. phys_addr_t start, u32 size)
  272. {
  273. struct xrp_private_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  274. struct xrp_allocation *allocation = kmalloc(sizeof(*allocation),
  275. GFP_KERNEL);
  276. if (!pool || !allocation) {
  277. kfree(pool);
  278. kfree(allocation);
  279. return -ENOMEM;
  280. }
  281. *allocation = (struct xrp_allocation){
  282. .pool = &pool->pool,
  283. .start = start,
  284. .size = size,
  285. };
  286. *pool = (struct xrp_private_pool){
  287. .pool = {
  288. .ops = &xrp_private_pool_ops,
  289. },
  290. .start = start,
  291. .size = size,
  292. .free_list = allocation,
  293. };
  294. mutex_init(&pool->free_list_lock);
  295. *ppool = &pool->pool;
  296. return 0;
  297. }
  298. static void xrp_free_gfp(struct xrp_allocation *alloc)
  299. {
  300. size_t numPages;
  301. int i;
  302. struct page *page;
  303. phys_addr_t phys;
  304. if(!alloc)
  305. return;
  306. if(alloc->size & (PAGE_SIZE-1) ||
  307. alloc->start &(PAGE_SIZE-1))
  308. {
  309. pr_debug("alloc is not aligment addr: %llx,size: %d",alloc->start,alloc->size);
  310. return ;
  311. }
  312. phys = alloc->start;
  313. numPages = alloc->size>>PAGE_SHIFT;
  314. for (i = 0; i < numPages; i++)
  315. {
  316. page = pfn_to_page(__phys_to_pfn(phys));
  317. ClearPageReserved(page);
  318. phys +=PAGE_SIZE;
  319. }
  320. __free_pages(pfn_to_page(__phys_to_pfn(alloc->start)), get_order(alloc->size));
  321. kfree(alloc->pool);
  322. kfree(alloc);
  323. pr_debug("free gfp alloc on phy addr: %llx,size: %d",alloc->start,alloc->size);
  324. return;
  325. }
  326. static const struct xrp_allocation_ops xrp_gfp_pool_ops = {
  327. .free = xrp_free_gfp,
  328. .offset = xrp_private_offset,
  329. };
  330. static long xrp_alloc_gfp(u32 size, u32 align,
  331. struct xrp_allocation **alloc)
  332. {
  333. struct xrp_allocation *new;
  334. size_t numPages;
  335. struct page *contiguousPages;
  336. struct xrp_allocation_pool *pool;
  337. int i;
  338. unsigned int gfp = GFP_KERNEL | GFP_DMA | __GFP_NOWARN;
  339. if (!size || (align & (align - 1)))
  340. return -EINVAL;
  341. if (!align)
  342. align = 1;
  343. new = kzalloc(sizeof(struct xrp_allocation), GFP_KERNEL);
  344. if(!new)
  345. return -ENOMEM;
  346. new->pool = kzalloc(sizeof(struct xrp_allocation_pool),GFP_KERNEL);
  347. if(!new->pool)
  348. goto OnError;
  349. new->pool->ops = &xrp_gfp_pool_ops;
  350. align = ALIGN(align, PAGE_SIZE);
  351. size = ALIGN(size, PAGE_SIZE);
  352. numPages = size >> PAGE_SHIFT;
  353. int order = get_order(size);
  354. if (order >= MAX_ORDER)
  355. {
  356. pr_debug("Too big buffer size requested. (order %d >= max %d)\n",
  357. order, MAX_ORDER);
  358. goto TwoError;
  359. }
  360. contiguousPages = alloc_pages(gfp, order);
  361. for (i = 0; i < numPages; i++)
  362. {
  363. struct page *page;
  364. page = nth_page(contiguousPages, i);
  365. SetPageReserved(page);
  366. }
  367. new->start = page_to_phys(nth_page(contiguousPages, 0));
  368. new->size = size;
  369. atomic_set(&new->ref, 0);
  370. xrp_allocation_get(new);
  371. *alloc = new;
  372. pr_debug("alloc by gfp with phy addr: %llx,size: %d",new->start,new->size);
  373. return 0;
  374. TwoError:
  375. kfree(new->pool);
  376. OnError:
  377. kfree(new);
  378. return -ENOMEM;
  379. }