ioremap.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Based on arch/arm/mm/ioremap.c
  4. *
  5. * (C) Copyright 1995 1996 Linus Torvalds
  6. * Hacked for ARM by Phil Blundell <philb@gnu.org>
  7. * Hacked to allow all architectures to build, and various cleanups
  8. * by Russell King
  9. * Copyright (C) 2012 ARM Ltd.
  10. */
  11. #include <linux/export.h>
  12. #include <linux/mm.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/io.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/tlbflush.h>
  17. static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
  18. pgprot_t prot, void *caller)
  19. {
  20. unsigned long last_addr;
  21. unsigned long offset = phys_addr & ~PAGE_MASK;
  22. int err;
  23. unsigned long addr;
  24. struct vm_struct *area;
  25. /*
  26. * Page align the mapping address and size, taking account of any
  27. * offset.
  28. */
  29. phys_addr &= PAGE_MASK;
  30. size = PAGE_ALIGN(size + offset);
  31. /*
  32. * Don't allow wraparound, zero size or outside PHYS_MASK.
  33. */
  34. last_addr = phys_addr + size - 1;
  35. if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
  36. return NULL;
  37. /*
  38. * Don't allow RAM to be mapped.
  39. */
  40. if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
  41. return NULL;
  42. area = get_vm_area_caller(size, VM_IOREMAP, caller);
  43. if (!area)
  44. return NULL;
  45. addr = (unsigned long)area->addr;
  46. area->phys_addr = phys_addr;
  47. err = ioremap_page_range(addr, addr + size, phys_addr, prot);
  48. if (err) {
  49. vunmap((void *)addr);
  50. return NULL;
  51. }
  52. return (void __iomem *)(offset + addr);
  53. }
  54. void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
  55. {
  56. return __ioremap_caller(phys_addr, size, prot,
  57. __builtin_return_address(0));
  58. }
  59. EXPORT_SYMBOL(__ioremap);
  60. void iounmap(volatile void __iomem *io_addr)
  61. {
  62. unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
  63. /*
  64. * We could get an address outside vmalloc range in case
  65. * of ioremap_cache() reusing a RAM mapping.
  66. */
  67. if (is_vmalloc_addr((void *)addr))
  68. vunmap((void *)addr);
  69. }
  70. EXPORT_SYMBOL(iounmap);
  71. void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
  72. {
  73. /* For normal memory we already have a cacheable mapping. */
  74. if (pfn_valid(__phys_to_pfn(phys_addr)))
  75. return (void __iomem *)__phys_to_virt(phys_addr);
  76. return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
  77. __builtin_return_address(0));
  78. }
  79. EXPORT_SYMBOL(ioremap_cache);
  80. /*
  81. * Must be called after early_fixmap_init
  82. */
  83. void __init early_ioremap_init(void)
  84. {
  85. early_ioremap_setup();
  86. }