lmb.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2018 Simon Goldschmidt
  4. */
  5. #include <common.h>
  6. #include <lmb.h>
  7. #include <dm/test.h>
  8. #include <test/ut.h>
  9. static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
  10. phys_addr_t ram_base, phys_size_t ram_size,
  11. unsigned long num_reserved,
  12. phys_addr_t base1, phys_size_t size1,
  13. phys_addr_t base2, phys_size_t size2,
  14. phys_addr_t base3, phys_size_t size3)
  15. {
  16. if (ram_size) {
  17. ut_asserteq(lmb->memory.cnt, 1);
  18. ut_asserteq(lmb->memory.region[0].base, ram_base);
  19. ut_asserteq(lmb->memory.region[0].size, ram_size);
  20. }
  21. ut_asserteq(lmb->reserved.cnt, num_reserved);
  22. if (num_reserved > 0) {
  23. ut_asserteq(lmb->reserved.region[0].base, base1);
  24. ut_asserteq(lmb->reserved.region[0].size, size1);
  25. }
  26. if (num_reserved > 1) {
  27. ut_asserteq(lmb->reserved.region[1].base, base2);
  28. ut_asserteq(lmb->reserved.region[1].size, size2);
  29. }
  30. if (num_reserved > 2) {
  31. ut_asserteq(lmb->reserved.region[2].base, base3);
  32. ut_asserteq(lmb->reserved.region[2].size, size3);
  33. }
  34. return 0;
  35. }
  36. #define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
  37. base2, size2, base3, size3) \
  38. ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
  39. num_reserved, base1, size1, base2, size2, base3, \
  40. size3))
  41. /*
  42. * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
  43. * then does some alloc + free tests.
  44. */
  45. static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
  46. const phys_size_t ram_size, const phys_addr_t ram0,
  47. const phys_size_t ram0_size,
  48. const phys_addr_t alloc_64k_addr)
  49. {
  50. const phys_addr_t ram_end = ram + ram_size;
  51. const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
  52. struct lmb lmb;
  53. long ret;
  54. phys_addr_t a, a2, b, b2, c, d;
  55. /* check for overflow */
  56. ut_assert(ram_end == 0 || ram_end > ram);
  57. ut_assert(alloc_64k_end > alloc_64k_addr);
  58. /* check input addresses + size */
  59. ut_assert(alloc_64k_addr >= ram + 8);
  60. ut_assert(alloc_64k_end <= ram_end - 8);
  61. lmb_init(&lmb);
  62. if (ram0_size) {
  63. ret = lmb_add(&lmb, ram0, ram0_size);
  64. ut_asserteq(ret, 0);
  65. }
  66. ret = lmb_add(&lmb, ram, ram_size);
  67. ut_asserteq(ret, 0);
  68. if (ram0_size) {
  69. ut_asserteq(lmb.memory.cnt, 2);
  70. ut_asserteq(lmb.memory.region[0].base, ram0);
  71. ut_asserteq(lmb.memory.region[0].size, ram0_size);
  72. ut_asserteq(lmb.memory.region[1].base, ram);
  73. ut_asserteq(lmb.memory.region[1].size, ram_size);
  74. } else {
  75. ut_asserteq(lmb.memory.cnt, 1);
  76. ut_asserteq(lmb.memory.region[0].base, ram);
  77. ut_asserteq(lmb.memory.region[0].size, ram_size);
  78. }
  79. /* reserve 64KiB somewhere */
  80. ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
  81. ut_asserteq(ret, 0);
  82. ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
  83. 0, 0, 0, 0);
  84. /* allocate somewhere, should be at the end of RAM */
  85. a = lmb_alloc(&lmb, 4, 1);
  86. ut_asserteq(a, ram_end - 4);
  87. ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
  88. ram_end - 4, 4, 0, 0);
  89. /* alloc below end of reserved region -> below reserved region */
  90. b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  91. ut_asserteq(b, alloc_64k_addr - 4);
  92. ASSERT_LMB(&lmb, 0, 0, 2,
  93. alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
  94. /* 2nd time */
  95. c = lmb_alloc(&lmb, 4, 1);
  96. ut_asserteq(c, ram_end - 8);
  97. ASSERT_LMB(&lmb, 0, 0, 2,
  98. alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
  99. d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  100. ut_asserteq(d, alloc_64k_addr - 8);
  101. ASSERT_LMB(&lmb, 0, 0, 2,
  102. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
  103. ret = lmb_free(&lmb, a, 4);
  104. ut_asserteq(ret, 0);
  105. ASSERT_LMB(&lmb, 0, 0, 2,
  106. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  107. /* allocate again to ensure we get the same address */
  108. a2 = lmb_alloc(&lmb, 4, 1);
  109. ut_asserteq(a, a2);
  110. ASSERT_LMB(&lmb, 0, 0, 2,
  111. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
  112. ret = lmb_free(&lmb, a2, 4);
  113. ut_asserteq(ret, 0);
  114. ASSERT_LMB(&lmb, 0, 0, 2,
  115. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  116. ret = lmb_free(&lmb, b, 4);
  117. ut_asserteq(ret, 0);
  118. ASSERT_LMB(&lmb, 0, 0, 3,
  119. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
  120. ram_end - 8, 4);
  121. /* allocate again to ensure we get the same address */
  122. b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  123. ut_asserteq(b, b2);
  124. ASSERT_LMB(&lmb, 0, 0, 2,
  125. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  126. ret = lmb_free(&lmb, b2, 4);
  127. ut_asserteq(ret, 0);
  128. ASSERT_LMB(&lmb, 0, 0, 3,
  129. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
  130. ram_end - 8, 4);
  131. ret = lmb_free(&lmb, c, 4);
  132. ut_asserteq(ret, 0);
  133. ASSERT_LMB(&lmb, 0, 0, 2,
  134. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
  135. ret = lmb_free(&lmb, d, 4);
  136. ut_asserteq(ret, 0);
  137. ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
  138. 0, 0, 0, 0);
  139. if (ram0_size) {
  140. ut_asserteq(lmb.memory.cnt, 2);
  141. ut_asserteq(lmb.memory.region[0].base, ram0);
  142. ut_asserteq(lmb.memory.region[0].size, ram0_size);
  143. ut_asserteq(lmb.memory.region[1].base, ram);
  144. ut_asserteq(lmb.memory.region[1].size, ram_size);
  145. } else {
  146. ut_asserteq(lmb.memory.cnt, 1);
  147. ut_asserteq(lmb.memory.region[0].base, ram);
  148. ut_asserteq(lmb.memory.region[0].size, ram_size);
  149. }
  150. return 0;
  151. }
  152. static int test_multi_alloc_512mb(struct unit_test_state *uts,
  153. const phys_addr_t ram)
  154. {
  155. return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
  156. }
  157. static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
  158. const phys_addr_t ram,
  159. const phys_addr_t ram0)
  160. {
  161. return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
  162. ram + 0x10000000);
  163. }
  164. /* Create a memory region with one reserved region and allocate */
  165. static int lib_test_lmb_simple(struct unit_test_state *uts)
  166. {
  167. int ret;
  168. /* simulate 512 MiB RAM beginning at 1GiB */
  169. ret = test_multi_alloc_512mb(uts, 0x40000000);
  170. if (ret)
  171. return ret;
  172. /* simulate 512 MiB RAM beginning at 1.5GiB */
  173. return test_multi_alloc_512mb(uts, 0xE0000000);
  174. }
  175. DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  176. /* Create two memory regions with one reserved region and allocate */
  177. static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
  178. {
  179. int ret;
  180. /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
  181. ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
  182. if (ret)
  183. return ret;
  184. /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
  185. return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
  186. }
  187. DM_TEST(lib_test_lmb_simple_x2, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  188. /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
  189. static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
  190. {
  191. const phys_size_t ram_size = 0x20000000;
  192. const phys_size_t big_block_size = 0x10000000;
  193. const phys_addr_t ram_end = ram + ram_size;
  194. const phys_addr_t alloc_64k_addr = ram + 0x10000000;
  195. struct lmb lmb;
  196. long ret;
  197. phys_addr_t a, b;
  198. /* check for overflow */
  199. ut_assert(ram_end == 0 || ram_end > ram);
  200. lmb_init(&lmb);
  201. ret = lmb_add(&lmb, ram, ram_size);
  202. ut_asserteq(ret, 0);
  203. /* reserve 64KiB in the middle of RAM */
  204. ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
  205. ut_asserteq(ret, 0);
  206. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  207. 0, 0, 0, 0);
  208. /* allocate a big block, should be below reserved */
  209. a = lmb_alloc(&lmb, big_block_size, 1);
  210. ut_asserteq(a, ram);
  211. ASSERT_LMB(&lmb, ram, ram_size, 1, a,
  212. big_block_size + 0x10000, 0, 0, 0, 0);
  213. /* allocate 2nd big block */
  214. /* This should fail, printing an error */
  215. b = lmb_alloc(&lmb, big_block_size, 1);
  216. ut_asserteq(b, 0);
  217. ASSERT_LMB(&lmb, ram, ram_size, 1, a,
  218. big_block_size + 0x10000, 0, 0, 0, 0);
  219. ret = lmb_free(&lmb, a, big_block_size);
  220. ut_asserteq(ret, 0);
  221. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  222. 0, 0, 0, 0);
  223. /* allocate too big block */
  224. /* This should fail, printing an error */
  225. a = lmb_alloc(&lmb, ram_size, 1);
  226. ut_asserteq(a, 0);
  227. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  228. 0, 0, 0, 0);
  229. return 0;
  230. }
  231. static int lib_test_lmb_big(struct unit_test_state *uts)
  232. {
  233. int ret;
  234. /* simulate 512 MiB RAM beginning at 1GiB */
  235. ret = test_bigblock(uts, 0x40000000);
  236. if (ret)
  237. return ret;
  238. /* simulate 512 MiB RAM beginning at 1.5GiB */
  239. return test_bigblock(uts, 0xE0000000);
  240. }
  241. DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  242. /* Simulate 512 MiB RAM, allocate a block without previous reservation */
  243. static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
  244. const phys_addr_t alloc_size, const ulong align)
  245. {
  246. const phys_size_t ram_size = 0x20000000;
  247. const phys_addr_t ram_end = ram + ram_size;
  248. struct lmb lmb;
  249. long ret;
  250. phys_addr_t a, b;
  251. const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
  252. ~(align - 1);
  253. /* check for overflow */
  254. ut_assert(ram_end == 0 || ram_end > ram);
  255. lmb_init(&lmb);
  256. ret = lmb_add(&lmb, ram, ram_size);
  257. ut_asserteq(ret, 0);
  258. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  259. /* allocate a block */
  260. a = lmb_alloc(&lmb, alloc_size, align);
  261. ut_assert(a != 0);
  262. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  263. alloc_size, 0, 0, 0, 0);
  264. /* allocate another block */
  265. b = lmb_alloc(&lmb, alloc_size, align);
  266. ut_assert(b != 0);
  267. if (alloc_size == alloc_size_aligned) {
  268. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
  269. (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
  270. 0);
  271. } else {
  272. ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
  273. (alloc_size_aligned * 2), alloc_size, ram + ram_size
  274. - alloc_size_aligned, alloc_size, 0, 0);
  275. }
  276. /* and free them */
  277. ret = lmb_free(&lmb, b, alloc_size);
  278. ut_asserteq(ret, 0);
  279. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  280. alloc_size, 0, 0, 0, 0);
  281. ret = lmb_free(&lmb, a, alloc_size);
  282. ut_asserteq(ret, 0);
  283. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  284. /* allocate a block with base*/
  285. b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
  286. ut_assert(a == b);
  287. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  288. alloc_size, 0, 0, 0, 0);
  289. /* and free it */
  290. ret = lmb_free(&lmb, b, alloc_size);
  291. ut_asserteq(ret, 0);
  292. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  293. return 0;
  294. }
  295. static int lib_test_lmb_noreserved(struct unit_test_state *uts)
  296. {
  297. int ret;
  298. /* simulate 512 MiB RAM beginning at 1GiB */
  299. ret = test_noreserved(uts, 0x40000000, 4, 1);
  300. if (ret)
  301. return ret;
  302. /* simulate 512 MiB RAM beginning at 1.5GiB */
  303. return test_noreserved(uts, 0xE0000000, 4, 1);
  304. }
  305. DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  306. static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
  307. {
  308. int ret;
  309. /* simulate 512 MiB RAM beginning at 1GiB */
  310. ret = test_noreserved(uts, 0x40000000, 5, 8);
  311. if (ret)
  312. return ret;
  313. /* simulate 512 MiB RAM beginning at 1.5GiB */
  314. return test_noreserved(uts, 0xE0000000, 5, 8);
  315. }
  316. DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  317. /*
  318. * Simulate a RAM that starts at 0 and allocate down to address 0, which must
  319. * fail as '0' means failure for the lmb_alloc functions.
  320. */
  321. static int lib_test_lmb_at_0(struct unit_test_state *uts)
  322. {
  323. const phys_addr_t ram = 0;
  324. const phys_size_t ram_size = 0x20000000;
  325. struct lmb lmb;
  326. long ret;
  327. phys_addr_t a, b;
  328. lmb_init(&lmb);
  329. ret = lmb_add(&lmb, ram, ram_size);
  330. ut_asserteq(ret, 0);
  331. /* allocate nearly everything */
  332. a = lmb_alloc(&lmb, ram_size - 4, 1);
  333. ut_asserteq(a, ram + 4);
  334. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  335. 0, 0, 0, 0);
  336. /* allocate the rest */
  337. /* This should fail as the allocated address would be 0 */
  338. b = lmb_alloc(&lmb, 4, 1);
  339. ut_asserteq(b, 0);
  340. /* check that this was an error by checking lmb */
  341. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  342. 0, 0, 0, 0);
  343. /* check that this was an error by freeing b */
  344. ret = lmb_free(&lmb, b, 4);
  345. ut_asserteq(ret, -1);
  346. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  347. 0, 0, 0, 0);
  348. ret = lmb_free(&lmb, a, ram_size - 4);
  349. ut_asserteq(ret, 0);
  350. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  351. return 0;
  352. }
  353. DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  354. /* Check that calling lmb_reserve with overlapping regions fails. */
  355. static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
  356. {
  357. const phys_addr_t ram = 0x40000000;
  358. const phys_size_t ram_size = 0x20000000;
  359. struct lmb lmb;
  360. long ret;
  361. lmb_init(&lmb);
  362. ret = lmb_add(&lmb, ram, ram_size);
  363. ut_asserteq(ret, 0);
  364. ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
  365. ut_asserteq(ret, 0);
  366. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  367. 0, 0, 0, 0);
  368. /* allocate overlapping region should fail */
  369. ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
  370. ut_asserteq(ret, -1);
  371. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  372. 0, 0, 0, 0);
  373. /* allocate 3nd region */
  374. ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
  375. ut_asserteq(ret, 0);
  376. ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
  377. 0x40030000, 0x10000, 0, 0);
  378. /* allocate 2nd region */
  379. ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
  380. ut_assert(ret >= 0);
  381. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
  382. 0, 0, 0, 0);
  383. return 0;
  384. }
  385. DM_TEST(lib_test_lmb_overlapping_reserve,
  386. DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  387. /*
  388. * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
  389. * Expect addresses outside the memory range to fail.
  390. */
  391. static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
  392. {
  393. const phys_size_t ram_size = 0x20000000;
  394. const phys_addr_t ram_end = ram + ram_size;
  395. const phys_size_t alloc_addr_a = ram + 0x8000000;
  396. const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
  397. const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
  398. struct lmb lmb;
  399. long ret;
  400. phys_addr_t a, b, c, d, e;
  401. /* check for overflow */
  402. ut_assert(ram_end == 0 || ram_end > ram);
  403. lmb_init(&lmb);
  404. ret = lmb_add(&lmb, ram, ram_size);
  405. ut_asserteq(ret, 0);
  406. /* reserve 3 blocks */
  407. ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
  408. ut_asserteq(ret, 0);
  409. ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
  410. ut_asserteq(ret, 0);
  411. ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
  412. ut_asserteq(ret, 0);
  413. ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
  414. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  415. /* allocate blocks */
  416. a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
  417. ut_asserteq(a, ram);
  418. ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
  419. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  420. b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
  421. alloc_addr_b - alloc_addr_a - 0x10000);
  422. ut_asserteq(b, alloc_addr_a + 0x10000);
  423. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
  424. alloc_addr_c, 0x10000, 0, 0);
  425. c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
  426. alloc_addr_c - alloc_addr_b - 0x10000);
  427. ut_asserteq(c, alloc_addr_b + 0x10000);
  428. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  429. 0, 0, 0, 0);
  430. d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
  431. ram_end - alloc_addr_c - 0x10000);
  432. ut_asserteq(d, alloc_addr_c + 0x10000);
  433. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
  434. 0, 0, 0, 0);
  435. /* allocating anything else should fail */
  436. e = lmb_alloc(&lmb, 1, 1);
  437. ut_asserteq(e, 0);
  438. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
  439. 0, 0, 0, 0);
  440. ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
  441. ut_asserteq(ret, 0);
  442. /* allocate at 3 points in free range */
  443. d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
  444. ut_asserteq(d, ram_end - 4);
  445. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
  446. d, 4, 0, 0);
  447. ret = lmb_free(&lmb, d, 4);
  448. ut_asserteq(ret, 0);
  449. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  450. 0, 0, 0, 0);
  451. d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
  452. ut_asserteq(d, ram_end - 128);
  453. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
  454. d, 4, 0, 0);
  455. ret = lmb_free(&lmb, d, 4);
  456. ut_asserteq(ret, 0);
  457. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  458. 0, 0, 0, 0);
  459. d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
  460. ut_asserteq(d, alloc_addr_c + 0x10000);
  461. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
  462. 0, 0, 0, 0);
  463. ret = lmb_free(&lmb, d, 4);
  464. ut_asserteq(ret, 0);
  465. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  466. 0, 0, 0, 0);
  467. /* allocate at the bottom */
  468. ret = lmb_free(&lmb, a, alloc_addr_a - ram);
  469. ut_asserteq(ret, 0);
  470. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
  471. 0, 0, 0, 0);
  472. d = lmb_alloc_addr(&lmb, ram, 4);
  473. ut_asserteq(d, ram);
  474. ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
  475. ram + 0x8000000, 0x10010000, 0, 0);
  476. /* check that allocating outside memory fails */
  477. if (ram_end != 0) {
  478. ret = lmb_alloc_addr(&lmb, ram_end, 1);
  479. ut_asserteq(ret, 0);
  480. }
  481. if (ram != 0) {
  482. ret = lmb_alloc_addr(&lmb, ram - 1, 1);
  483. ut_asserteq(ret, 0);
  484. }
  485. return 0;
  486. }
  487. static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
  488. {
  489. int ret;
  490. /* simulate 512 MiB RAM beginning at 1GiB */
  491. ret = test_alloc_addr(uts, 0x40000000);
  492. if (ret)
  493. return ret;
  494. /* simulate 512 MiB RAM beginning at 1.5GiB */
  495. return test_alloc_addr(uts, 0xE0000000);
  496. }
  497. DM_TEST(lib_test_lmb_alloc_addr, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
  498. /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
  499. static int test_get_unreserved_size(struct unit_test_state *uts,
  500. const phys_addr_t ram)
  501. {
  502. const phys_size_t ram_size = 0x20000000;
  503. const phys_addr_t ram_end = ram + ram_size;
  504. const phys_size_t alloc_addr_a = ram + 0x8000000;
  505. const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
  506. const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
  507. struct lmb lmb;
  508. long ret;
  509. phys_size_t s;
  510. /* check for overflow */
  511. ut_assert(ram_end == 0 || ram_end > ram);
  512. lmb_init(&lmb);
  513. ret = lmb_add(&lmb, ram, ram_size);
  514. ut_asserteq(ret, 0);
  515. /* reserve 3 blocks */
  516. ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
  517. ut_asserteq(ret, 0);
  518. ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
  519. ut_asserteq(ret, 0);
  520. ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
  521. ut_asserteq(ret, 0);
  522. ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
  523. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  524. /* check addresses in between blocks */
  525. s = lmb_get_free_size(&lmb, ram);
  526. ut_asserteq(s, alloc_addr_a - ram);
  527. s = lmb_get_free_size(&lmb, ram + 0x10000);
  528. ut_asserteq(s, alloc_addr_a - ram - 0x10000);
  529. s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
  530. ut_asserteq(s, 4);
  531. s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
  532. ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
  533. s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
  534. ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
  535. s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
  536. ut_asserteq(s, 4);
  537. s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
  538. ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
  539. s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
  540. ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
  541. s = lmb_get_free_size(&lmb, ram_end - 4);
  542. ut_asserteq(s, 4);
  543. return 0;
  544. }
  545. static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
  546. {
  547. int ret;
  548. /* simulate 512 MiB RAM beginning at 1GiB */
  549. ret = test_get_unreserved_size(uts, 0x40000000);
  550. if (ret)
  551. return ret;
  552. /* simulate 512 MiB RAM beginning at 1.5GiB */
  553. return test_get_unreserved_size(uts, 0xE0000000);
  554. }
  555. DM_TEST(lib_test_lmb_get_free_size,
  556. DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);