of_reserved_mem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Device tree based initialization code for reserved memory.
  4. *
  5. * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
  6. * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
  7. * http://www.samsung.com
  8. * Author: Marek Szyprowski <m.szyprowski@samsung.com>
  9. * Author: Josh Cartwright <joshc@codeaurora.org>
  10. */
  11. #define pr_fmt(fmt) "OF: reserved mem: " fmt
  12. #include <linux/err.h>
  13. #include <linux/of.h>
  14. #include <linux/of_fdt.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/mm.h>
  17. #include <linux/sizes.h>
  18. #include <linux/of_reserved_mem.h>
  19. #include <linux/sort.h>
  20. #include <linux/slab.h>
  21. #include <linux/memblock.h>
  22. #define MAX_RESERVED_REGIONS 64
  23. static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
  24. static int reserved_mem_count;
  25. static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
  26. phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
  27. phys_addr_t *res_base)
  28. {
  29. phys_addr_t base;
  30. end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
  31. align = !align ? SMP_CACHE_BYTES : align;
  32. base = memblock_find_in_range(start, end, size, align);
  33. if (!base)
  34. return -ENOMEM;
  35. *res_base = base;
  36. if (nomap)
  37. return memblock_remove(base, size);
  38. return memblock_reserve(base, size);
  39. }
  40. /**
  41. * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
  42. */
  43. void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
  44. phys_addr_t base, phys_addr_t size)
  45. {
  46. struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
  47. if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
  48. pr_err("not enough space for all defined regions.\n");
  49. return;
  50. }
  51. rmem->fdt_node = node;
  52. rmem->name = uname;
  53. rmem->base = base;
  54. rmem->size = size;
  55. reserved_mem_count++;
  56. return;
  57. }
  58. /**
  59. * __reserved_mem_alloc_size() - allocate reserved memory described by
  60. * 'size', 'alignment' and 'alloc-ranges' properties.
  61. */
  62. static int __init __reserved_mem_alloc_size(unsigned long node,
  63. const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
  64. {
  65. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  66. phys_addr_t start = 0, end = 0;
  67. phys_addr_t base = 0, align = 0, size;
  68. int len;
  69. const __be32 *prop;
  70. bool nomap;
  71. int ret;
  72. prop = of_get_flat_dt_prop(node, "size", &len);
  73. if (!prop)
  74. return -EINVAL;
  75. if (len != dt_root_size_cells * sizeof(__be32)) {
  76. pr_err("invalid size property in '%s' node.\n", uname);
  77. return -EINVAL;
  78. }
  79. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  80. prop = of_get_flat_dt_prop(node, "alignment", &len);
  81. if (prop) {
  82. if (len != dt_root_addr_cells * sizeof(__be32)) {
  83. pr_err("invalid alignment property in '%s' node.\n",
  84. uname);
  85. return -EINVAL;
  86. }
  87. align = dt_mem_next_cell(dt_root_addr_cells, &prop);
  88. }
  89. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  90. /* Need adjust the alignment to satisfy the CMA requirement */
  91. if (IS_ENABLED(CONFIG_CMA)
  92. && of_flat_dt_is_compatible(node, "shared-dma-pool")
  93. && of_get_flat_dt_prop(node, "reusable", NULL)
  94. && !nomap) {
  95. unsigned long order =
  96. max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
  97. align = max(align, (phys_addr_t)PAGE_SIZE << order);
  98. }
  99. prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
  100. if (prop) {
  101. if (len % t_len != 0) {
  102. pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
  103. uname);
  104. return -EINVAL;
  105. }
  106. base = 0;
  107. while (len > 0) {
  108. start = dt_mem_next_cell(dt_root_addr_cells, &prop);
  109. end = start + dt_mem_next_cell(dt_root_size_cells,
  110. &prop);
  111. ret = early_init_dt_alloc_reserved_memory_arch(size,
  112. align, start, end, nomap, &base);
  113. if (ret == 0) {
  114. pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
  115. uname, &base,
  116. (unsigned long)(size / SZ_1M));
  117. break;
  118. }
  119. len -= t_len;
  120. }
  121. } else {
  122. ret = early_init_dt_alloc_reserved_memory_arch(size, align,
  123. 0, 0, nomap, &base);
  124. if (ret == 0)
  125. pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
  126. uname, &base, (unsigned long)(size / SZ_1M));
  127. }
  128. if (base == 0) {
  129. pr_info("failed to allocate memory for node '%s'\n", uname);
  130. return -ENOMEM;
  131. }
  132. *res_base = base;
  133. *res_size = size;
  134. return 0;
  135. }
  136. static const struct of_device_id __rmem_of_table_sentinel
  137. __used __section("__reservedmem_of_table_end");
  138. /**
  139. * __reserved_mem_init_node() - call region specific reserved memory init code
  140. */
  141. static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
  142. {
  143. extern const struct of_device_id __reservedmem_of_table[];
  144. const struct of_device_id *i;
  145. int ret = -ENOENT;
  146. for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
  147. reservedmem_of_init_fn initfn = i->data;
  148. const char *compat = i->compatible;
  149. if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
  150. continue;
  151. ret = initfn(rmem);
  152. if (ret == 0) {
  153. pr_info("initialized node %s, compatible id %s\n",
  154. rmem->name, compat);
  155. break;
  156. }
  157. }
  158. return ret;
  159. }
  160. static int __init __rmem_cmp(const void *a, const void *b)
  161. {
  162. const struct reserved_mem *ra = a, *rb = b;
  163. if (ra->base < rb->base)
  164. return -1;
  165. if (ra->base > rb->base)
  166. return 1;
  167. /*
  168. * Put the dynamic allocations (address == 0, size == 0) before static
  169. * allocations at address 0x0 so that overlap detection works
  170. * correctly.
  171. */
  172. if (ra->size < rb->size)
  173. return -1;
  174. if (ra->size > rb->size)
  175. return 1;
  176. return 0;
  177. }
  178. static void __init __rmem_check_for_overlap(void)
  179. {
  180. int i;
  181. if (reserved_mem_count < 2)
  182. return;
  183. sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
  184. __rmem_cmp, NULL);
  185. for (i = 0; i < reserved_mem_count - 1; i++) {
  186. struct reserved_mem *this, *next;
  187. this = &reserved_mem[i];
  188. next = &reserved_mem[i + 1];
  189. if (this->base + this->size > next->base) {
  190. phys_addr_t this_end, next_end;
  191. this_end = this->base + this->size;
  192. next_end = next->base + next->size;
  193. pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
  194. this->name, &this->base, &this_end,
  195. next->name, &next->base, &next_end);
  196. }
  197. }
  198. }
  199. /**
  200. * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
  201. */
  202. void __init fdt_init_reserved_mem(void)
  203. {
  204. int i;
  205. /* check for overlapping reserved regions */
  206. __rmem_check_for_overlap();
  207. for (i = 0; i < reserved_mem_count; i++) {
  208. struct reserved_mem *rmem = &reserved_mem[i];
  209. unsigned long node = rmem->fdt_node;
  210. int len;
  211. const __be32 *prop;
  212. int err = 0;
  213. bool nomap;
  214. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  215. prop = of_get_flat_dt_prop(node, "phandle", &len);
  216. if (!prop)
  217. prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
  218. if (prop)
  219. rmem->phandle = of_read_number(prop, len/4);
  220. if (rmem->size == 0)
  221. err = __reserved_mem_alloc_size(node, rmem->name,
  222. &rmem->base, &rmem->size);
  223. if (err == 0) {
  224. err = __reserved_mem_init_node(rmem);
  225. if (err != 0 && err != -ENOENT) {
  226. pr_info("node %s compatible matching fail\n",
  227. rmem->name);
  228. memblock_free(rmem->base, rmem->size);
  229. if (nomap)
  230. memblock_add(rmem->base, rmem->size);
  231. }
  232. }
  233. }
  234. }
  235. static inline struct reserved_mem *__find_rmem(struct device_node *node)
  236. {
  237. unsigned int i;
  238. if (!node->phandle)
  239. return NULL;
  240. for (i = 0; i < reserved_mem_count; i++)
  241. if (reserved_mem[i].phandle == node->phandle)
  242. return &reserved_mem[i];
  243. return NULL;
  244. }
  245. struct rmem_assigned_device {
  246. struct device *dev;
  247. struct reserved_mem *rmem;
  248. struct list_head list;
  249. };
  250. static LIST_HEAD(of_rmem_assigned_device_list);
  251. static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
  252. /**
  253. * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
  254. * given device
  255. * @dev: Pointer to the device to configure
  256. * @np: Pointer to the device_node with 'reserved-memory' property
  257. * @idx: Index of selected region
  258. *
  259. * This function assigns respective DMA-mapping operations based on reserved
  260. * memory region specified by 'memory-region' property in @np node to the @dev
  261. * device. When driver needs to use more than one reserved memory region, it
  262. * should allocate child devices and initialize regions by name for each of
  263. * child device.
  264. *
  265. * Returns error code or zero on success.
  266. */
  267. int of_reserved_mem_device_init_by_idx(struct device *dev,
  268. struct device_node *np, int idx)
  269. {
  270. struct rmem_assigned_device *rd;
  271. struct device_node *target;
  272. struct reserved_mem *rmem;
  273. int ret;
  274. if (!np || !dev)
  275. return -EINVAL;
  276. target = of_parse_phandle(np, "memory-region", idx);
  277. if (!target)
  278. return -ENODEV;
  279. if (!of_device_is_available(target)) {
  280. of_node_put(target);
  281. return 0;
  282. }
  283. rmem = __find_rmem(target);
  284. of_node_put(target);
  285. if (!rmem || !rmem->ops || !rmem->ops->device_init)
  286. return -EINVAL;
  287. rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
  288. if (!rd)
  289. return -ENOMEM;
  290. ret = rmem->ops->device_init(rmem, dev);
  291. if (ret == 0) {
  292. rd->dev = dev;
  293. rd->rmem = rmem;
  294. mutex_lock(&of_rmem_assigned_device_mutex);
  295. list_add(&rd->list, &of_rmem_assigned_device_list);
  296. mutex_unlock(&of_rmem_assigned_device_mutex);
  297. dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
  298. } else {
  299. kfree(rd);
  300. }
  301. return ret;
  302. }
  303. EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
  304. /**
  305. * of_reserved_mem_device_init_by_name() - assign named reserved memory region
  306. * to given device
  307. * @dev: pointer to the device to configure
  308. * @np: pointer to the device node with 'memory-region' property
  309. * @name: name of the selected memory region
  310. *
  311. * Returns: 0 on success or a negative error-code on failure.
  312. */
  313. int of_reserved_mem_device_init_by_name(struct device *dev,
  314. struct device_node *np,
  315. const char *name)
  316. {
  317. int idx = of_property_match_string(np, "memory-region-names", name);
  318. return of_reserved_mem_device_init_by_idx(dev, np, idx);
  319. }
  320. EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
  321. /**
  322. * of_reserved_mem_device_release() - release reserved memory device structures
  323. * @dev: Pointer to the device to deconfigure
  324. *
  325. * This function releases structures allocated for memory region handling for
  326. * the given device.
  327. */
  328. void of_reserved_mem_device_release(struct device *dev)
  329. {
  330. struct rmem_assigned_device *rd, *tmp;
  331. LIST_HEAD(release_list);
  332. mutex_lock(&of_rmem_assigned_device_mutex);
  333. list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
  334. if (rd->dev == dev)
  335. list_move_tail(&rd->list, &release_list);
  336. }
  337. mutex_unlock(&of_rmem_assigned_device_mutex);
  338. list_for_each_entry_safe(rd, tmp, &release_list, list) {
  339. if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
  340. rd->rmem->ops->device_release(rd->rmem, dev);
  341. kfree(rd);
  342. }
  343. }
  344. EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
  345. /**
  346. * of_reserved_mem_lookup() - acquire reserved_mem from a device node
  347. * @np: node pointer of the desired reserved-memory region
  348. *
  349. * This function allows drivers to acquire a reference to the reserved_mem
  350. * struct based on a device node handle.
  351. *
  352. * Returns a reserved_mem reference, or NULL on error.
  353. */
  354. struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
  355. {
  356. const char *name;
  357. int i;
  358. if (!np->full_name)
  359. return NULL;
  360. name = kbasename(np->full_name);
  361. for (i = 0; i < reserved_mem_count; i++)
  362. if (!strcmp(reserved_mem[i].name, name))
  363. return &reserved_mem[i];
  364. return NULL;
  365. }
  366. EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);