tlbflush.h 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
  4. * Copyright (C) 2012 Regents of the University of California
  5. */
  6. #ifndef _ASM_RISCV_TLBFLUSH_H
  7. #define _ASM_RISCV_TLBFLUSH_H
  8. #include <linux/mm_types.h>
  9. #include <asm/smp.h>
  10. #ifdef CONFIG_MMU
  11. static inline void local_flush_tlb_all(void)
  12. {
  13. #ifdef CONFIG_NO_SFENCE_VMA
  14. csr_write(CSR_SMCIR, 1 << 26);
  15. #else
  16. __asm__ __volatile__ ("sfence.vma" : : : "memory");
  17. #endif
  18. }
  19. /* Flush one page from local TLB */
  20. static inline void local_flush_tlb_page(unsigned long addr)
  21. {
  22. #ifdef CONFIG_NO_SFENCE_VMA
  23. csr_write(CSR_SMCIR, 1 << 26);
  24. #else
  25. __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
  26. #endif
  27. }
  28. #else /* CONFIG_MMU */
  29. #define local_flush_tlb_all() do { } while (0)
  30. #define local_flush_tlb_page(addr) do { } while (0)
  31. #endif /* CONFIG_MMU */
  32. #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
  33. void flush_tlb_all(void);
  34. void flush_tlb_mm(struct mm_struct *mm);
  35. void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
  36. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  37. unsigned long end);
  38. #else /* CONFIG_SMP && CONFIG_MMU */
  39. #define flush_tlb_all() local_flush_tlb_all()
  40. #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
  41. static inline void flush_tlb_range(struct vm_area_struct *vma,
  42. unsigned long start, unsigned long end)
  43. {
  44. local_flush_tlb_all();
  45. }
  46. #define flush_tlb_mm(mm) flush_tlb_all()
  47. #endif /* !CONFIG_SMP || !CONFIG_MMU */
  48. /* Flush a range of kernel pages */
  49. static inline void flush_tlb_kernel_range(unsigned long start,
  50. unsigned long end)
  51. {
  52. #ifdef CONFIG_NO_SFENCE_VMA
  53. csr_write(CSR_SMCIR, 1 << 26);
  54. #else
  55. start &= PAGE_MASK;
  56. end += PAGE_SIZE - 1;
  57. end &= PAGE_MASK;
  58. if ((end - start) > SZ_1M) {
  59. flush_tlb_all();
  60. return;
  61. }
  62. while (start < end) {
  63. __asm__ __volatile__ ("sfence.vma %0" : : "r" (start) : "memory");
  64. start += PAGE_SIZE;
  65. }
  66. #endif
  67. }
  68. #endif /* _ASM_RISCV_TLBFLUSH_H */