early_ioremap.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Provide common bits of early_ioremap() support for architectures needing
  4. * temporary mappings during boot before ioremap() is available.
  5. *
  6. * This is mostly a direct copy of the x86 early_ioremap implementation.
  7. *
  8. * (C) Copyright 1995 1996, 2014 Linus Torvalds
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/fixmap.h>
  19. #include <asm/early_ioremap.h>
  20. #ifdef CONFIG_MMU
  21. static int early_ioremap_debug __initdata;
  22. static int __init early_ioremap_debug_setup(char *str)
  23. {
  24. early_ioremap_debug = 1;
  25. return 0;
  26. }
  27. early_param("early_ioremap_debug", early_ioremap_debug_setup);
  28. static int after_paging_init __initdata;
  29. pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
  30. unsigned long size,
  31. pgprot_t prot)
  32. {
  33. return prot;
  34. }
  35. void __init __weak early_ioremap_shutdown(void)
  36. {
  37. }
  38. void __init early_ioremap_reset(void)
  39. {
  40. early_ioremap_shutdown();
  41. after_paging_init = 1;
  42. }
  43. /*
  44. * Generally, ioremap() is available after paging_init() has been called.
  45. * Architectures wanting to allow early_ioremap after paging_init() can
  46. * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
  47. */
  48. #ifndef __late_set_fixmap
  49. static inline void __init __late_set_fixmap(enum fixed_addresses idx,
  50. phys_addr_t phys, pgprot_t prot)
  51. {
  52. BUG();
  53. }
  54. #endif
  55. #ifndef __late_clear_fixmap
  56. static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
  57. {
  58. BUG();
  59. }
  60. #endif
  61. static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  62. static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  63. static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  64. void __init early_ioremap_setup(void)
  65. {
  66. int i;
  67. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  68. if (WARN_ON(prev_map[i]))
  69. break;
  70. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  71. slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  72. }
  73. static int __init check_early_ioremap_leak(void)
  74. {
  75. int count = 0;
  76. int i;
  77. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  78. if (prev_map[i])
  79. count++;
  80. if (WARN(count, KERN_WARNING
  81. "Debug warning: early ioremap leak of %d areas detected.\n"
  82. "please boot with early_ioremap_debug and report the dmesg.\n",
  83. count))
  84. return 1;
  85. return 0;
  86. }
  87. late_initcall(check_early_ioremap_leak);
  88. static void __init __iomem *
  89. __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
  90. {
  91. unsigned long offset;
  92. resource_size_t last_addr;
  93. unsigned int nrpages;
  94. enum fixed_addresses idx;
  95. int i, slot;
  96. WARN_ON(system_state >= SYSTEM_RUNNING);
  97. slot = -1;
  98. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  99. if (!prev_map[i]) {
  100. slot = i;
  101. break;
  102. }
  103. }
  104. if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
  105. __func__, &phys_addr, size))
  106. return NULL;
  107. /* Don't allow wraparound or zero size */
  108. last_addr = phys_addr + size - 1;
  109. if (WARN_ON(!size || last_addr < phys_addr))
  110. return NULL;
  111. prev_size[slot] = size;
  112. /*
  113. * Mappings have to be page-aligned
  114. */
  115. offset = offset_in_page(phys_addr);
  116. phys_addr &= PAGE_MASK;
  117. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  118. /*
  119. * Mappings have to fit in the FIX_BTMAP area.
  120. */
  121. nrpages = size >> PAGE_SHIFT;
  122. if (WARN_ON(nrpages > NR_FIX_BTMAPS))
  123. return NULL;
  124. /*
  125. * Ok, go for it..
  126. */
  127. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  128. while (nrpages > 0) {
  129. if (after_paging_init)
  130. __late_set_fixmap(idx, phys_addr, prot);
  131. else
  132. __early_set_fixmap(idx, phys_addr, prot);
  133. phys_addr += PAGE_SIZE;
  134. --idx;
  135. --nrpages;
  136. }
  137. WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
  138. __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
  139. prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
  140. return prev_map[slot];
  141. }
  142. void __init early_iounmap(void __iomem *addr, unsigned long size)
  143. {
  144. unsigned long virt_addr;
  145. unsigned long offset;
  146. unsigned int nrpages;
  147. enum fixed_addresses idx;
  148. int i, slot;
  149. slot = -1;
  150. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  151. if (prev_map[i] == addr) {
  152. slot = i;
  153. break;
  154. }
  155. }
  156. if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
  157. addr, size))
  158. return;
  159. if (WARN(prev_size[slot] != size,
  160. "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
  161. addr, size, slot, prev_size[slot]))
  162. return;
  163. WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
  164. addr, size, slot);
  165. virt_addr = (unsigned long)addr;
  166. if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
  167. return;
  168. offset = offset_in_page(virt_addr);
  169. nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
  170. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  171. while (nrpages > 0) {
  172. if (after_paging_init)
  173. __late_clear_fixmap(idx);
  174. else
  175. __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
  176. --idx;
  177. --nrpages;
  178. }
  179. prev_map[slot] = NULL;
  180. }
  181. /* Remap an IO device */
  182. void __init __iomem *
  183. early_ioremap(resource_size_t phys_addr, unsigned long size)
  184. {
  185. return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
  186. }
  187. /* Remap memory */
  188. void __init *
  189. early_memremap(resource_size_t phys_addr, unsigned long size)
  190. {
  191. pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
  192. FIXMAP_PAGE_NORMAL);
  193. return (__force void *)__early_ioremap(phys_addr, size, prot);
  194. }
  195. #ifdef FIXMAP_PAGE_RO
  196. void __init *
  197. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  198. {
  199. pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
  200. FIXMAP_PAGE_RO);
  201. return (__force void *)__early_ioremap(phys_addr, size, prot);
  202. }
  203. #endif
  204. #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
  205. void __init *
  206. early_memremap_prot(resource_size_t phys_addr, unsigned long size,
  207. unsigned long prot_val)
  208. {
  209. return (__force void *)__early_ioremap(phys_addr, size,
  210. __pgprot(prot_val));
  211. }
  212. #endif
  213. #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
  214. void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
  215. {
  216. unsigned long slop, clen;
  217. char *p;
  218. while (size) {
  219. slop = offset_in_page(src);
  220. clen = size;
  221. if (clen > MAX_MAP_CHUNK - slop)
  222. clen = MAX_MAP_CHUNK - slop;
  223. p = early_memremap(src & PAGE_MASK, clen + slop);
  224. memcpy(dest, p + slop, clen);
  225. early_memunmap(p, clen + slop);
  226. dest += clen;
  227. src += clen;
  228. size -= clen;
  229. }
  230. }
  231. #else /* CONFIG_MMU */
  232. void __init __iomem *
  233. early_ioremap(resource_size_t phys_addr, unsigned long size)
  234. {
  235. return (__force void __iomem *)phys_addr;
  236. }
  237. /* Remap memory */
  238. void __init *
  239. early_memremap(resource_size_t phys_addr, unsigned long size)
  240. {
  241. return (void *)phys_addr;
  242. }
  243. void __init *
  244. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  245. {
  246. return (void *)phys_addr;
  247. }
  248. void __init early_iounmap(void __iomem *addr, unsigned long size)
  249. {
  250. }
  251. #endif /* CONFIG_MMU */
  252. void __init early_memunmap(void *addr, unsigned long size)
  253. {
  254. early_iounmap((__force void __iomem *)addr, size);
  255. }