irq_64.c 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  4. *
  5. * This file contains the lowest level x86_64-specific interrupt
  6. * entry and irq statistics code. All the remaining irq logic is
  7. * done by the generic kernel/irq/ code and in the
  8. * x86_64-specific irq controller code. (e.g. i8259.c and
  9. * io_apic.c.)
  10. */
  11. #include <linux/kernel_stat.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/delay.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/smp.h>
  19. #include <linux/sched/task_stack.h>
  20. #include <asm/cpu_entry_area.h>
  21. #include <asm/irq_stack.h>
  22. #include <asm/io_apic.h>
  23. #include <asm/apic.h>
  24. DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
  25. DECLARE_INIT_PER_CPU(irq_stack_backing_store);
  26. #ifdef CONFIG_VMAP_STACK
  27. /*
  28. * VMAP the backing store with guard pages
  29. */
  30. static int map_irq_stack(unsigned int cpu)
  31. {
  32. char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
  33. struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
  34. void *va;
  35. int i;
  36. for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
  37. phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
  38. pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
  39. }
  40. va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
  41. if (!va)
  42. return -ENOMEM;
  43. per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
  44. return 0;
  45. }
  46. #else
  47. /*
  48. * If VMAP stacks are disabled due to KASAN, just use the per cpu
  49. * backing store without guard pages.
  50. */
  51. static int map_irq_stack(unsigned int cpu)
  52. {
  53. void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
  54. per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
  55. return 0;
  56. }
  57. #endif
  58. int irq_init_percpu_irqstack(unsigned int cpu)
  59. {
  60. if (per_cpu(hardirq_stack_ptr, cpu))
  61. return 0;
  62. return map_irq_stack(cpu);
  63. }
  64. void do_softirq_own_stack(void)
  65. {
  66. run_on_irqstack_cond(__do_softirq, NULL);
  67. }