dma.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * OpenRISC Linux
  4. *
  5. * Linux architectural port borrowing liberally from similar works of
  6. * others. All original copyrights apply as per the original source
  7. * declaration.
  8. *
  9. * Modifications for the OpenRISC architecture:
  10. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  11. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  12. *
  13. * DMA mapping callbacks...
  14. */
  15. #include <linux/dma-map-ops.h>
  16. #include <linux/pagewalk.h>
  17. #include <asm/cpuinfo.h>
  18. #include <asm/spr_defs.h>
  19. #include <asm/tlbflush.h>
  20. static int
  21. page_set_nocache(pte_t *pte, unsigned long addr,
  22. unsigned long next, struct mm_walk *walk)
  23. {
  24. unsigned long cl;
  25. struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  26. pte_val(*pte) |= _PAGE_CI;
  27. /*
  28. * Flush the page out of the TLB so that the new page flags get
  29. * picked up next time there's an access
  30. */
  31. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  32. /* Flush page out of dcache */
  33. for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
  34. mtspr(SPR_DCBFR, cl);
  35. return 0;
  36. }
  37. static const struct mm_walk_ops set_nocache_walk_ops = {
  38. .pte_entry = page_set_nocache,
  39. };
  40. static int
  41. page_clear_nocache(pte_t *pte, unsigned long addr,
  42. unsigned long next, struct mm_walk *walk)
  43. {
  44. pte_val(*pte) &= ~_PAGE_CI;
  45. /*
  46. * Flush the page out of the TLB so that the new page flags get
  47. * picked up next time there's an access
  48. */
  49. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  50. return 0;
  51. }
  52. static const struct mm_walk_ops clear_nocache_walk_ops = {
  53. .pte_entry = page_clear_nocache,
  54. };
  55. void *arch_dma_set_uncached(void *cpu_addr, size_t size)
  56. {
  57. unsigned long va = (unsigned long)cpu_addr;
  58. int error;
  59. /*
  60. * We need to iterate through the pages, clearing the dcache for
  61. * them and setting the cache-inhibit bit.
  62. */
  63. mmap_read_lock(&init_mm);
  64. error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
  65. NULL);
  66. mmap_read_unlock(&init_mm);
  67. if (error)
  68. return ERR_PTR(error);
  69. return cpu_addr;
  70. }
  71. void arch_dma_clear_uncached(void *cpu_addr, size_t size)
  72. {
  73. unsigned long va = (unsigned long)cpu_addr;
  74. mmap_read_lock(&init_mm);
  75. /* walk_page_range shouldn't be able to fail here */
  76. WARN_ON(walk_page_range(&init_mm, va, va + size,
  77. &clear_nocache_walk_ops, NULL));
  78. mmap_read_unlock(&init_mm);
  79. }
  80. void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
  81. enum dma_data_direction dir)
  82. {
  83. unsigned long cl;
  84. struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
  85. switch (dir) {
  86. case DMA_TO_DEVICE:
  87. /* Flush the dcache for the requested range */
  88. for (cl = addr; cl < addr + size;
  89. cl += cpuinfo->dcache_block_size)
  90. mtspr(SPR_DCBFR, cl);
  91. break;
  92. case DMA_FROM_DEVICE:
  93. /* Invalidate the dcache for the requested range */
  94. for (cl = addr; cl < addr + size;
  95. cl += cpuinfo->dcache_block_size)
  96. mtspr(SPR_DCBIR, cl);
  97. break;
  98. default:
  99. /*
  100. * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
  101. * flush nor invalidate the cache here as the area will need
  102. * to be manually synced anyway.
  103. */
  104. break;
  105. }
  106. }