|
@@ -0,0 +1,4269 @@
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/asid.h kernel/arch/riscv/include/asm/asid.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/asid.h 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/asid.h 2020-09-03 06:01:13.901989796 +0000
|
|
|
+@@ -0,0 +1,78 @@
|
|
|
++/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
++#ifndef __ASM_ASM_ASID_H
|
|
|
++#define __ASM_ASM_ASID_H
|
|
|
++
|
|
|
++#include <linux/atomic.h>
|
|
|
++#include <linux/compiler.h>
|
|
|
++#include <linux/cpumask.h>
|
|
|
++#include <linux/percpu.h>
|
|
|
++#include <linux/spinlock.h>
|
|
|
++
|
|
|
++struct asid_info
|
|
|
++{
|
|
|
++ atomic64_t generation;
|
|
|
++ unsigned long *map;
|
|
|
++ atomic64_t __percpu *active;
|
|
|
++ u64 __percpu *reserved;
|
|
|
++ u32 bits;
|
|
|
++ /* Lock protecting the structure */
|
|
|
++ raw_spinlock_t lock;
|
|
|
++ /* Which CPU requires context flush on next call */
|
|
|
++ cpumask_t flush_pending;
|
|
|
++ /* Number of ASID allocated by context (shift value) */
|
|
|
++ unsigned int ctxt_shift;
|
|
|
++ /* Callback to locally flush the context. */
|
|
|
++ void (*flush_cpu_ctxt_cb)(void);
|
|
|
++};
|
|
|
++
|
|
|
++#define NUM_ASIDS(info) (1UL << ((info)->bits))
|
|
|
++#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
|
|
|
++
|
|
|
++#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
|
|
|
++
|
|
|
++void asid_new_context(struct asid_info *info, atomic64_t *pasid,
|
|
|
++ unsigned int cpu, struct mm_struct *mm);
|
|
|
++
|
|
|
++/*
|
|
|
++ * Check the ASID is still valid for the context. If not generate a new ASID.
|
|
|
++ *
|
|
|
++ * @pasid: Pointer to the current ASID batch
|
|
|
++ * @cpu: current CPU ID. Must have been acquired throught get_cpu()
|
|
|
++ */
|
|
|
++static inline void asid_check_context(struct asid_info *info,
|
|
|
++ atomic64_t *pasid, unsigned int cpu,
|
|
|
++ struct mm_struct *mm)
|
|
|
++{
|
|
|
++ u64 asid, old_active_asid;
|
|
|
++
|
|
|
++ asid = atomic64_read(pasid);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * The memory ordering here is subtle.
|
|
|
++ * If our active_asid is non-zero and the ASID matches the current
|
|
|
++ * generation, then we update the active_asid entry with a relaxed
|
|
|
++ * cmpxchg. Racing with a concurrent rollover means that either:
|
|
|
++ *
|
|
|
++ * - We get a zero back from the cmpxchg and end up waiting on the
|
|
|
++ * lock. Taking the lock synchronises with the rollover and so
|
|
|
++ * we are forced to see the updated generation.
|
|
|
++ *
|
|
|
++ * - We get a valid ASID back from the cmpxchg, which means the
|
|
|
++ * relaxed xchg in flush_context will treat us as reserved
|
|
|
++ * because atomic RmWs are totally ordered for a given location.
|
|
|
++ */
|
|
|
++ old_active_asid = atomic64_read(&active_asid(info, cpu));
|
|
|
++ if (old_active_asid &&
|
|
|
++ !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
|
|
|
++ atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
|
|
|
++ old_active_asid, asid))
|
|
|
++ return;
|
|
|
++
|
|
|
++ asid_new_context(info, pasid, cpu, mm);
|
|
|
++}
|
|
|
++
|
|
|
++int asid_allocator_init(struct asid_info *info,
|
|
|
++ u32 bits, unsigned int asid_per_ctxt,
|
|
|
++ void (*flush_cpu_ctxt_cb)(void));
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/cacheflush.h kernel/arch/riscv/include/asm/cacheflush.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/cacheflush.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/cacheflush.h 2020-09-03 06:01:13.902989796 +0000
|
|
|
+@@ -99,6 +99,9 @@
|
|
|
+
|
|
|
+ #endif /* CONFIG_SMP */
|
|
|
+
|
|
|
++void dma_wbinv_range(unsigned long start, unsigned long end);
|
|
|
++void dma_wb_range(unsigned long start, unsigned long end);
|
|
|
++
|
|
|
+ /*
|
|
|
+ * Bits in sys_riscv_flush_icache()'s flags argument.
|
|
|
+ */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/csr.h kernel/arch/riscv/include/asm/csr.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/csr.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/csr.h 2020-09-03 06:01:13.902989796 +0000
|
|
|
+@@ -21,6 +21,12 @@
|
|
|
+ #define SR_FS_CLEAN _AC(0x00004000, UL)
|
|
|
+ #define SR_FS_DIRTY _AC(0x00006000, UL)
|
|
|
+
|
|
|
++#define SR_VS _AC(0x01800000, UL) /* Vector Status */
|
|
|
++#define SR_VS_OFF _AC(0x00000000, UL)
|
|
|
++#define SR_VS_INITIAL _AC(0x00800000, UL)
|
|
|
++#define SR_VS_CLEAN _AC(0x01000000, UL)
|
|
|
++#define SR_VS_DIRTY _AC(0x01800000, UL)
|
|
|
++
|
|
|
+ #define SR_XS _AC(0x00018000, UL) /* Extension Status */
|
|
|
+ #define SR_XS_OFF _AC(0x00000000, UL)
|
|
|
+ #define SR_XS_INITIAL _AC(0x00008000, UL)
|
|
|
+@@ -42,6 +48,9 @@
|
|
|
+ #define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
|
|
|
+ #define SATP_MODE_39 _AC(0x8000000000000000, UL)
|
|
|
+ #define SATP_MODE SATP_MODE_39
|
|
|
++#define SATP_ASID_BITS 16
|
|
|
++#define SATP_ASID_SHIFT 44
|
|
|
++#define SATP_ASID_MASK _AC(0xFFFF, UL)
|
|
|
+ #endif
|
|
|
+
|
|
|
+ /* SCAUSE */
|
|
|
+@@ -56,6 +65,7 @@
|
|
|
+ #define IRQ_U_EXT 8
|
|
|
+ #define IRQ_S_EXT 9
|
|
|
+ #define IRQ_M_EXT 11
|
|
|
++#define IRQ_S_PMU 17
|
|
|
+
|
|
|
+ #define EXC_INST_MISALIGNED 0
|
|
|
+ #define EXC_INST_ACCESS 1
|
|
|
+@@ -67,10 +77,18 @@
|
|
|
+ #define EXC_LOAD_PAGE_FAULT 13
|
|
|
+ #define EXC_STORE_PAGE_FAULT 15
|
|
|
+
|
|
|
++#define CSR_VSTART 0x8
|
|
|
++#define CSR_VXSAT 0x9
|
|
|
++#define CSR_VXRM 0xa
|
|
|
++#define CSR_VL 0xc20
|
|
|
++#define CSR_VTYPE 0xc21
|
|
|
++#define CSR_VLENB 0xc22
|
|
|
++
|
|
|
+ /* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
|
|
|
+ #define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
|
|
|
+ #define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
|
|
|
+ #define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
|
|
|
++#define SIE_SMIE (_AC(0x1, UL) << IRQ_S_PMU)
|
|
|
+
|
|
|
+ #define CSR_CYCLE 0xc00
|
|
|
+ #define CSR_TIME 0xc01
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/elf.h kernel/arch/riscv/include/asm/elf.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/elf.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/elf.h 2020-09-03 06:01:13.902989796 +0000
|
|
|
+@@ -49,6 +49,9 @@
|
|
|
+ #define ELF_HWCAP (elf_hwcap)
|
|
|
+ extern unsigned long elf_hwcap;
|
|
|
+
|
|
|
++#define ELF_CORE_COPY_REGS(dest, regs) \
|
|
|
++ *(struct user_regs_struct *)&(dest) = (regs)->user_regs;
|
|
|
++
|
|
|
+ /*
|
|
|
+ * This yields a string that ld.so will use to load implementation
|
|
|
+ * specific libraries for optimization. This is more specific in
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/fence.h kernel/arch/riscv/include/asm/fence.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/fence.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/fence.h 2020-09-03 06:01:13.902989796 +0000
|
|
|
+@@ -9,4 +9,8 @@
|
|
|
+ #define RISCV_RELEASE_BARRIER
|
|
|
+ #endif
|
|
|
+
|
|
|
++extern int c910_mmu_v1_flag;
|
|
|
++#define sync_mmu_v1() \
|
|
|
++ if (c910_mmu_v1_flag) asm volatile (".long 0x01b0000b");
|
|
|
++
|
|
|
+ #endif /* _ASM_RISCV_FENCE_H */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/io.h kernel/arch/riscv/include/asm/io.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/io.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/io.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -12,19 +12,17 @@
|
|
|
+ #define _ASM_RISCV_IO_H
|
|
|
+
|
|
|
+ #include <linux/types.h>
|
|
|
++#include <asm/fence.h>
|
|
|
+ #include <asm/mmiowb.h>
|
|
|
+ #include <asm/pgtable.h>
|
|
|
+
|
|
|
+-extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
|
|
++extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
|
|
|
++extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
|
|
|
+
|
|
|
+-/*
|
|
|
+- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
|
|
|
+- * change the properties of memory regions. This should be fixed by the
|
|
|
+- * upcoming platform spec.
|
|
|
+- */
|
|
|
+-#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
|
|
+-#define ioremap_wc(addr, size) ioremap((addr), (size))
|
|
|
+-#define ioremap_wt(addr, size) ioremap((addr), (size))
|
|
|
++#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
|
|
|
++#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
|
|
|
++#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
|
|
++#define ioremap_cache ioremap_cache
|
|
|
+
|
|
|
+ extern void iounmap(volatile void __iomem *addr);
|
|
|
+
|
|
|
+@@ -32,26 +30,34 @@
|
|
|
+ #define __raw_writeb __raw_writeb
|
|
|
+ static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ }
|
|
|
+
|
|
|
+ #define __raw_writew __raw_writew
|
|
|
+ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ }
|
|
|
+
|
|
|
+ #define __raw_writel __raw_writel
|
|
|
+ static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ }
|
|
|
+
|
|
|
+ #ifdef CONFIG_64BIT
|
|
|
+ #define __raw_writeq __raw_writeq
|
|
|
+ static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+
|
|
|
+@@ -60,7 +66,9 @@
|
|
|
+ {
|
|
|
+ u8 val;
|
|
|
+
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ return val;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -69,7 +77,9 @@
|
|
|
+ {
|
|
|
+ u16 val;
|
|
|
+
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ return val;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -78,7 +88,9 @@
|
|
|
+ {
|
|
|
+ u32 val;
|
|
|
+
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ return val;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -88,7 +100,9 @@
|
|
|
+ {
|
|
|
+ u64 val;
|
|
|
+
|
|
|
++ sync_mmu_v1();
|
|
|
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
|
|
|
++ sync_mmu_v1();
|
|
|
+ return val;
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/kexec.h kernel/arch/riscv/include/asm/kexec.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/kexec.h 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/kexec.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -0,0 +1,99 @@
|
|
|
++/*
|
|
|
++ * kexec for riscv
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#ifndef _RISCV_KEXEC_H
|
|
|
++#define _RISCV_KEXEC_H
|
|
|
++
|
|
|
++/* Maximum physical address we can use pages from */
|
|
|
++
|
|
|
++#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
|
|
|
++
|
|
|
++/* Maximum address we can reach in physical address mode */
|
|
|
++
|
|
|
++#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
|
|
|
++
|
|
|
++/* Maximum address we can use for the control code buffer */
|
|
|
++
|
|
|
++#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
|
|
|
++
|
|
|
++#define KEXEC_CONTROL_PAGE_SIZE 4096
|
|
|
++
|
|
|
++#define KEXEC_ARCH KEXEC_ARCH_RISCV
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++/**
|
|
|
++ * crash_setup_regs() - save registers for the panic kernel
|
|
|
++ *
|
|
|
++ * @newregs: registers are saved here
|
|
|
++ * @oldregs: registers to be saved (may be %NULL)
|
|
|
++ */
|
|
|
++static inline void crash_setup_regs(struct pt_regs *newregs,
|
|
|
++ struct pt_regs *oldregs)
|
|
|
++{
|
|
|
++ if (oldregs) {
|
|
|
++ memcpy(newregs, oldregs, sizeof(*newregs));
|
|
|
++ } else {
|
|
|
++ u64 tmp1, tmp2;
|
|
|
++
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "sd ra, 8(%2)\n"
|
|
|
++ "sd gp, 24(%2)\n"
|
|
|
++ "sd t0, 40(%2)\n"
|
|
|
++ "sd t1, 48(%2)\n"
|
|
|
++ "sd t2, 56(%2)\n"
|
|
|
++ "sd s0, 64(%2)\n"
|
|
|
++ "sd s1, 72(%2)\n"
|
|
|
++ "sd a0, 80(%2)\n"
|
|
|
++ "sd a1, 88(%2)\n"
|
|
|
++ "sd a2, 96(%2)\n"
|
|
|
++ "sd a3, 104(%2)\n"
|
|
|
++ "sd a4, 112(%2)\n"
|
|
|
++ "sd a5, 120(%2)\n"
|
|
|
++ "sd a6, 128(%2)\n"
|
|
|
++ "sd a7, 136(%2)\n"
|
|
|
++ "sd s2, 144(%2)\n"
|
|
|
++ "sd s3, 152(%2)\n"
|
|
|
++ "sd s4, 160(%2)\n"
|
|
|
++ "sd s5, 168(%2)\n"
|
|
|
++ "sd s6, 176(%2)\n"
|
|
|
++ "sd s7, 184(%2)\n"
|
|
|
++ "sd s8, 192(%2)\n"
|
|
|
++ "sd s9, 200(%2)\n"
|
|
|
++ "sd s10, 208(%2)\n"
|
|
|
++ "sd s11, 216(%2)\n"
|
|
|
++ "sd t3, 224(%2)\n"
|
|
|
++ "sd t4, 232(%2)\n"
|
|
|
++ "sd t5, 240(%2)\n"
|
|
|
++ "sd t6, 248(%2)\n"
|
|
|
++ "auipc %0, 0\n"
|
|
|
++ "sd %0, 0(%2)\n"
|
|
|
++ "csrr %0, sstatus\n"
|
|
|
++ "sd %0, 256(%2)\n"
|
|
|
++ "csrr %0, stval\n"
|
|
|
++ "sd %0, 264(%2)\n"
|
|
|
++ "csrr %0, scause\n"
|
|
|
++ "sd %0, 272(%2)\n"
|
|
|
++ "sd tp, 32(%2)\n"
|
|
|
++ "sd sp, 16(%2)\n"
|
|
|
++ : "=&r" (tmp1), "=&r" (tmp2)
|
|
|
++ : "r" (newregs)
|
|
|
++ : "memory"
|
|
|
++ );
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline bool crash_is_nosave(unsigned long pfn) {return false; }
|
|
|
++static inline void crash_prepare_suspend(void) {}
|
|
|
++static inline void crash_post_resume(void) {}
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/mmu_context.h kernel/arch/riscv/include/asm/mmu_context.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/mmu_context.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/mmu_context.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -12,19 +12,20 @@
|
|
|
+
|
|
|
+ #include <linux/mm.h>
|
|
|
+ #include <linux/sched.h>
|
|
|
++#include <asm/tlbflush.h>
|
|
|
++#include <asm/cacheflush.h>
|
|
|
++#include <asm/asid.h>
|
|
|
++
|
|
|
++#define ASID_MASK ((1 << SATP_ASID_BITS) - 1)
|
|
|
++#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
|
|
|
++
|
|
|
++#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
|
|
|
+
|
|
|
+ static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
|
+ struct task_struct *task)
|
|
|
+ {
|
|
|
+ }
|
|
|
+
|
|
|
+-/* Initialize context-related info for a new mm_struct */
|
|
|
+-static inline int init_new_context(struct task_struct *task,
|
|
|
+- struct mm_struct *mm)
|
|
|
+-{
|
|
|
+- return 0;
|
|
|
+-}
|
|
|
+-
|
|
|
+ static inline void destroy_context(struct mm_struct *mm)
|
|
|
+ {
|
|
|
+ }
|
|
|
+@@ -32,6 +33,8 @@
|
|
|
+ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
+ struct task_struct *task);
|
|
|
+
|
|
|
++void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
|
|
++
|
|
|
+ static inline void activate_mm(struct mm_struct *prev,
|
|
|
+ struct mm_struct *next)
|
|
|
+ {
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/mmu.h kernel/arch/riscv/include/asm/mmu.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/mmu.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/mmu.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -11,6 +11,7 @@
|
|
|
+
|
|
|
+ typedef struct {
|
|
|
+ void *vdso;
|
|
|
++ atomic64_t asid;
|
|
|
+ #ifdef CONFIG_SMP
|
|
|
+ /* A local icache flush is needed before user execution can resume. */
|
|
|
+ cpumask_t icache_stale_mask;
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/perf_event.h kernel/arch/riscv/include/asm/perf_event.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/perf_event.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/perf_event.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -18,8 +18,8 @@
|
|
|
+ * The RISCV_MAX_COUNTERS parameter should be specified.
|
|
|
+ */
|
|
|
+
|
|
|
+-#ifdef CONFIG_RISCV_BASE_PMU
|
|
|
+-#define RISCV_MAX_COUNTERS 2
|
|
|
++#if defined(CONFIG_RISCV_BASE_PMU) || defined(CONFIG_THEAD_XT_V1_PMU)
|
|
|
++#define RISCV_MAX_COUNTERS 32
|
|
|
+ #endif
|
|
|
+
|
|
|
+ #ifndef RISCV_MAX_COUNTERS
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable-64.h kernel/arch/riscv/include/asm/pgtable-64.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/pgtable-64.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/pgtable-64.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -55,7 +55,7 @@
|
|
|
+
|
|
|
+ static inline unsigned long pud_page_vaddr(pud_t pud)
|
|
|
+ {
|
|
|
+- return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
|
|
++ return (unsigned long)pfn_to_virt((pud_val(pud) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
|
|
|
+ }
|
|
|
+
|
|
|
+ #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable-bits.h kernel/arch/riscv/include/asm/pgtable-bits.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/pgtable-bits.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/pgtable-bits.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -24,6 +24,13 @@
|
|
|
+ #define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
|
|
|
+ #define _PAGE_SOFT (1 << 8) /* Reserved for software */
|
|
|
+
|
|
|
++/* C-SKY extend */
|
|
|
++#define _PAGE_SEC (1UL << 59) /* Security */
|
|
|
++#define _PAGE_SHARE (1UL << 60) /* Shareable */
|
|
|
++#define _PAGE_BUF (1UL << 61) /* Bufferable */
|
|
|
++#define _PAGE_CACHE (1UL << 62) /* Cacheable */
|
|
|
++#define _PAGE_SO (1UL << 63) /* Strong Order */
|
|
|
++
|
|
|
+ #define _PAGE_SPECIAL _PAGE_SOFT
|
|
|
+ #define _PAGE_TABLE _PAGE_PRESENT
|
|
|
+
|
|
|
+@@ -38,6 +45,9 @@
|
|
|
+ /* Set of bits to preserve across pte_modify() */
|
|
|
+ #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
|
|
|
+ _PAGE_WRITE | _PAGE_EXEC | \
|
|
|
+- _PAGE_USER | _PAGE_GLOBAL))
|
|
|
++ _PAGE_USER | _PAGE_GLOBAL | \
|
|
|
++ _PAGE_SEC | _PAGE_SHARE | \
|
|
|
++ _PAGE_BUF | _PAGE_CACHE | \
|
|
|
++ _PAGE_SO ))
|
|
|
+
|
|
|
+ #endif /* _ASM_RISCV_PGTABLE_BITS_H */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable.h kernel/arch/riscv/include/asm/pgtable.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/pgtable.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/pgtable.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -35,9 +35,11 @@
|
|
|
+ #define FIRST_USER_ADDRESS 0
|
|
|
+
|
|
|
+ /* Page protection bits */
|
|
|
+-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
|
|
|
++#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER | \
|
|
|
++ _PAGE_SHARE | _PAGE_CACHE | _PAGE_BUF)
|
|
|
+
|
|
|
+-#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
|
|
|
++#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_CACHE | \
|
|
|
++ _PAGE_BUF | _PAGE_SHARE | _PAGE_SHARE)
|
|
|
+ #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
|
|
|
+ #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
|
|
|
+ #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
|
|
|
+@@ -54,11 +56,17 @@
|
|
|
+ #define _PAGE_KERNEL (_PAGE_READ \
|
|
|
+ | _PAGE_WRITE \
|
|
|
+ | _PAGE_PRESENT \
|
|
|
++ | _PAGE_GLOBAL \
|
|
|
+ | _PAGE_ACCESSED \
|
|
|
+- | _PAGE_DIRTY)
|
|
|
++ | _PAGE_DIRTY \
|
|
|
++ | _PAGE_CACHE \
|
|
|
++ | _PAGE_SHARE \
|
|
|
++ | _PAGE_BUF)
|
|
|
+
|
|
|
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
|
|
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
|
|
|
++#define PAGE_KERNEL_SO __pgprot((_PAGE_KERNEL | _PAGE_SO) & \
|
|
|
++ ~(_PAGE_CACHE | _PAGE_BUF))
|
|
|
+
|
|
|
+ #define PAGE_TABLE __pgprot(_PAGE_TABLE)
|
|
|
+
|
|
|
+@@ -167,18 +175,18 @@
|
|
|
+
|
|
|
+ static inline struct page *pmd_page(pmd_t pmd)
|
|
|
+ {
|
|
|
+- return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
|
|
++ return pfn_to_page((pmd_val(pmd) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
|
|
|
+ }
|
|
|
+
|
|
|
+ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|
|
+ {
|
|
|
+- return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
|
|
++ return (unsigned long)pfn_to_virt((pmd_val(pmd) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Yields the page frame number (PFN) of a page table entry */
|
|
|
+ static inline unsigned long pte_pfn(pte_t pte)
|
|
|
+ {
|
|
|
+- return (pte_val(pte) >> _PAGE_PFN_SHIFT);
|
|
|
++ return ((pte_val(pte) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
|
|
|
+ }
|
|
|
+
|
|
|
+ #define pte_page(x) pfn_to_page(pte_pfn(x))
|
|
|
+@@ -405,6 +413,32 @@
|
|
|
+ return ptep_test_and_clear_young(vma, address, ptep);
|
|
|
+ }
|
|
|
+
|
|
|
++#define __HAVE_PHYS_MEM_ACCESS_PROT
|
|
|
++struct file;
|
|
|
++extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
++ unsigned long size, pgprot_t vma_prot);
|
|
|
++
|
|
|
++#define pgprot_noncached pgprot_noncached
|
|
|
++static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
|
|
++{
|
|
|
++ unsigned long prot = pgprot_val(_prot);
|
|
|
++
|
|
|
++ prot &= ~(_PAGE_CACHE | _PAGE_BUF);
|
|
|
++ prot |= _PAGE_SO;
|
|
|
++
|
|
|
++ return __pgprot(prot);
|
|
|
++}
|
|
|
++
|
|
|
++#define pgprot_writecombine pgprot_writecombine
|
|
|
++static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
|
|
++{
|
|
|
++ unsigned long prot = pgprot_val(_prot);
|
|
|
++
|
|
|
++ prot &= ~(_PAGE_CACHE | _PAGE_BUF);
|
|
|
++
|
|
|
++ return __pgprot(prot);
|
|
|
++}
|
|
|
++
|
|
|
+ /*
|
|
|
+ * Encode and decode a swap entry
|
|
|
+ *
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/processor.h kernel/arch/riscv/include/asm/processor.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/processor.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/processor.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -32,6 +32,7 @@
|
|
|
+ unsigned long sp; /* Kernel mode stack */
|
|
|
+ unsigned long s[12]; /* s[0]: frame pointer */
|
|
|
+ struct __riscv_d_ext_state fstate;
|
|
|
++ struct __riscv_v_state vstate;
|
|
|
+ };
|
|
|
+
|
|
|
+ #define INIT_THREAD { \
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/ptrace.h kernel/arch/riscv/include/asm/ptrace.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/ptrace.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/ptrace.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -12,38 +12,43 @@
|
|
|
+ #ifndef __ASSEMBLY__
|
|
|
+
|
|
|
+ struct pt_regs {
|
|
|
+- unsigned long sepc;
|
|
|
+- unsigned long ra;
|
|
|
+- unsigned long sp;
|
|
|
+- unsigned long gp;
|
|
|
+- unsigned long tp;
|
|
|
+- unsigned long t0;
|
|
|
+- unsigned long t1;
|
|
|
+- unsigned long t2;
|
|
|
+- unsigned long s0;
|
|
|
+- unsigned long s1;
|
|
|
+- unsigned long a0;
|
|
|
+- unsigned long a1;
|
|
|
+- unsigned long a2;
|
|
|
+- unsigned long a3;
|
|
|
+- unsigned long a4;
|
|
|
+- unsigned long a5;
|
|
|
+- unsigned long a6;
|
|
|
+- unsigned long a7;
|
|
|
+- unsigned long s2;
|
|
|
+- unsigned long s3;
|
|
|
+- unsigned long s4;
|
|
|
+- unsigned long s5;
|
|
|
+- unsigned long s6;
|
|
|
+- unsigned long s7;
|
|
|
+- unsigned long s8;
|
|
|
+- unsigned long s9;
|
|
|
+- unsigned long s10;
|
|
|
+- unsigned long s11;
|
|
|
+- unsigned long t3;
|
|
|
+- unsigned long t4;
|
|
|
+- unsigned long t5;
|
|
|
+- unsigned long t6;
|
|
|
++ union {
|
|
|
++ struct user_regs_struct user_regs;
|
|
|
++ struct {
|
|
|
++ unsigned long sepc;
|
|
|
++ unsigned long ra;
|
|
|
++ unsigned long sp;
|
|
|
++ unsigned long gp;
|
|
|
++ unsigned long tp;
|
|
|
++ unsigned long t0;
|
|
|
++ unsigned long t1;
|
|
|
++ unsigned long t2;
|
|
|
++ unsigned long s0;
|
|
|
++ unsigned long s1;
|
|
|
++ unsigned long a0;
|
|
|
++ unsigned long a1;
|
|
|
++ unsigned long a2;
|
|
|
++ unsigned long a3;
|
|
|
++ unsigned long a4;
|
|
|
++ unsigned long a5;
|
|
|
++ unsigned long a6;
|
|
|
++ unsigned long a7;
|
|
|
++ unsigned long s2;
|
|
|
++ unsigned long s3;
|
|
|
++ unsigned long s4;
|
|
|
++ unsigned long s5;
|
|
|
++ unsigned long s6;
|
|
|
++ unsigned long s7;
|
|
|
++ unsigned long s8;
|
|
|
++ unsigned long s9;
|
|
|
++ unsigned long s10;
|
|
|
++ unsigned long s11;
|
|
|
++ unsigned long t3;
|
|
|
++ unsigned long t4;
|
|
|
++ unsigned long t5;
|
|
|
++ unsigned long t6;
|
|
|
++ };
|
|
|
++ };
|
|
|
+ /* Supervisor CSRs */
|
|
|
+ unsigned long sstatus;
|
|
|
+ unsigned long sbadaddr;
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/sbi.h kernel/arch/riscv/include/asm/sbi.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/sbi.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/sbi.h 2020-09-03 06:01:13.903989796 +0000
|
|
|
+@@ -17,6 +17,7 @@
|
|
|
+ #define SBI_REMOTE_SFENCE_VMA 6
|
|
|
+ #define SBI_REMOTE_SFENCE_VMA_ASID 7
|
|
|
+ #define SBI_SHUTDOWN 8
|
|
|
++#define SBI_PMU 0x09000001
|
|
|
+
|
|
|
+ #define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
|
|
|
+ register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
|
|
|
+@@ -94,4 +95,9 @@
|
|
|
+ SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
|
|
|
+ }
|
|
|
+
|
|
|
++static inline void sbi_set_pmu(int start)
|
|
|
++{
|
|
|
++ SBI_CALL_1(SBI_PMU, start);
|
|
|
++}
|
|
|
++
|
|
|
+ #endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/switch_to.h kernel/arch/riscv/include/asm/switch_to.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/switch_to.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/switch_to.h 2020-09-03 06:01:13.904989796 +0000
|
|
|
+@@ -63,6 +63,52 @@
|
|
|
+ #define __switch_to_aux(__prev, __next) do { } while (0)
|
|
|
+ #endif
|
|
|
+
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++extern void __vstate_save(struct task_struct *save_to);
|
|
|
++extern void __vstate_restore(struct task_struct *restore_from);
|
|
|
++
|
|
|
++static inline void __vstate_clean(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ regs->sstatus |= (regs->sstatus & ~(SR_VS)) | SR_VS_CLEAN;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void vstate_save(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if ((regs->sstatus & SR_VS) == SR_VS_DIRTY) {
|
|
|
++ __vstate_save(task);
|
|
|
++ __vstate_clean(regs);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline void vstate_restore(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if ((regs->sstatus & SR_VS) != SR_VS_OFF) {
|
|
|
++ __vstate_restore(task);
|
|
|
++ __vstate_clean(regs);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline void __switch_to_vector(struct task_struct *prev,
|
|
|
++ struct task_struct *next)
|
|
|
++{
|
|
|
++ struct pt_regs *regs;
|
|
|
++
|
|
|
++ regs = task_pt_regs(prev);
|
|
|
++ if (unlikely(regs->sstatus & SR_SD))
|
|
|
++ vstate_save(prev, regs);
|
|
|
++ vstate_restore(next, task_pt_regs(next));
|
|
|
++}
|
|
|
++
|
|
|
++extern bool has_vector;
|
|
|
++#else
|
|
|
++#define has_vector false
|
|
|
++#define vstate_save(task, regs) do { } while (0)
|
|
|
++#define vstate_restore(task, regs) do { } while (0)
|
|
|
++#define __switch_to_vector(__prev, __next) do { } while (0)
|
|
|
++#endif
|
|
|
++
|
|
|
+ extern struct task_struct *__switch_to(struct task_struct *,
|
|
|
+ struct task_struct *);
|
|
|
+
|
|
|
+@@ -72,6 +118,8 @@
|
|
|
+ struct task_struct *__next = (next); \
|
|
|
+ if (has_fpu) \
|
|
|
+ __switch_to_aux(__prev, __next); \
|
|
|
++ if (has_vector) \
|
|
|
++ __switch_to_vector(__prev, __next); \
|
|
|
+ ((last) = __switch_to(__prev, __next)); \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/asm/tlbflush.h kernel/arch/riscv/include/asm/tlbflush.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/asm/tlbflush.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/asm/tlbflush.h 2020-09-03 06:01:13.904989796 +0000
|
|
|
+@@ -12,12 +12,18 @@
|
|
|
+
|
|
|
+ static inline void local_flush_tlb_all(void)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Flush one page from local TLB */
|
|
|
+ static inline void local_flush_tlb_page(unsigned long addr)
|
|
|
+ {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
+ __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -44,7 +50,17 @@
|
|
|
+ static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
+ unsigned long end)
|
|
|
+ {
|
|
|
+- flush_tlb_all();
|
|
|
++ start &= PAGE_MASK;
|
|
|
++ end += PAGE_SIZE - 1;
|
|
|
++ end &= PAGE_MASK;
|
|
|
++
|
|
|
++ while (start < end) {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ __asm__ __volatile__ ("sfence.vma %0" : : "r" (start) : "memory");
|
|
|
++ start += PAGE_SIZE;
|
|
|
++ }
|
|
|
+ }
|
|
|
+
|
|
|
+ #endif /* _ASM_RISCV_TLBFLUSH_H */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/elf.h kernel/arch/riscv/include/uapi/asm/elf.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/uapi/asm/elf.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/uapi/asm/elf.h 2020-09-03 06:01:13.904989796 +0000
|
|
|
+@@ -24,6 +24,8 @@
|
|
|
+ typedef union __riscv_fp_state elf_fpregset_t;
|
|
|
+ #define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
|
|
|
+
|
|
|
++#define ELF_NVREG (sizeof(struct __riscv_v_state) / sizeof(elf_greg_t))
|
|
|
++
|
|
|
+ #if __riscv_xlen == 64
|
|
|
+ #define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
|
|
|
+ #define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/hwcap.h kernel/arch/riscv/include/uapi/asm/hwcap.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/uapi/asm/hwcap.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/uapi/asm/hwcap.h 2020-09-03 06:01:13.904989796 +0000
|
|
|
+@@ -21,5 +21,6 @@
|
|
|
+ #define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
|
|
|
+ #define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
|
|
|
+ #define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
|
|
|
++#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A'))
|
|
|
+
|
|
|
+ #endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/ptrace.h kernel/arch/riscv/include/uapi/asm/ptrace.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/uapi/asm/ptrace.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/uapi/asm/ptrace.h 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -77,6 +77,15 @@
|
|
|
+ struct __riscv_q_ext_state q;
|
|
|
+ };
|
|
|
+
|
|
|
++struct __riscv_v_state {
|
|
|
++ __uint128_t v[32];
|
|
|
++ unsigned long vstart;
|
|
|
++ unsigned long vxsat;
|
|
|
++ unsigned long vxrm;
|
|
|
++ unsigned long vl;
|
|
|
++ unsigned long vtype;
|
|
|
++};
|
|
|
++
|
|
|
+ #endif /* __ASSEMBLY__ */
|
|
|
+
|
|
|
+ #endif /* _UAPI_ASM_RISCV_PTRACE_H */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/sigcontext.h kernel/arch/riscv/include/uapi/asm/sigcontext.h
|
|
|
+--- linux-5.4.36/arch/riscv/include/uapi/asm/sigcontext.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/include/uapi/asm/sigcontext.h 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -17,6 +17,7 @@
|
|
|
+ struct sigcontext {
|
|
|
+ struct user_regs_struct sc_regs;
|
|
|
+ union __riscv_fp_state sc_fpregs;
|
|
|
++ struct __riscv_v_state sc_vregs;
|
|
|
+ };
|
|
|
+
|
|
|
+ #endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/Kconfig kernel/arch/riscv/Kconfig
|
|
|
+--- linux-5.4.36/arch/riscv/Kconfig 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/Kconfig 2020-09-14 01:45:17.782702016 +0000
|
|
|
+@@ -30,9 +30,11 @@
|
|
|
+ select GENERIC_STRNLEN_USER
|
|
|
+ select GENERIC_SMP_IDLE_THREAD
|
|
|
+ select GENERIC_ATOMIC64 if !64BIT
|
|
|
++ select GENERIC_ALLOCATOR
|
|
|
+ select HAVE_ARCH_AUDITSYSCALL
|
|
|
+ select HAVE_ASM_MODVERSIONS
|
|
|
+ select HAVE_MEMBLOCK_NODE_MAP
|
|
|
++ select DMA_DIRECT_REMAP
|
|
|
+ select HAVE_DMA_CONTIGUOUS
|
|
|
+ select HAVE_FUTEX_CMPXCHG if FUTEX
|
|
|
+ select HAVE_PERF_EVENTS
|
|
|
+@@ -50,10 +52,16 @@
|
|
|
+ select PCI_DOMAINS_GENERIC if PCI
|
|
|
+ select PCI_MSI if PCI
|
|
|
+ select RISCV_TIMER
|
|
|
++ select DW_APB_TIMER_OF
|
|
|
+ select GENERIC_IRQ_MULTI_HANDLER
|
|
|
+ select GENERIC_ARCH_TOPOLOGY if SMP
|
|
|
+ select ARCH_HAS_PTE_SPECIAL
|
|
|
+ select ARCH_HAS_MMIOWB
|
|
|
++ select ARCH_HAS_DMA_PREP_COHERENT
|
|
|
++ select ARCH_HAS_SYNC_DMA_FOR_CPU
|
|
|
++ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
|
|
++ select ARCH_HAS_DMA_WRITE_COMBINE
|
|
|
++ select ARCH_HAS_DMA_MMAP_PGPROT
|
|
|
+ select HAVE_EBPF_JIT if 64BIT
|
|
|
+ select EDAC_SUPPORT
|
|
|
+ select ARCH_HAS_GIGANTIC_PAGE
|
|
|
+@@ -62,6 +70,7 @@
|
|
|
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
|
|
+ select HAVE_ARCH_MMAP_RND_BITS
|
|
|
+ select HAVE_COPY_THREAD_TLS
|
|
|
++ select RTC_DRV_XGENE
|
|
|
+
|
|
|
+ config ARCH_MMAP_RND_BITS_MIN
|
|
|
+ default 18 if 64BIT
|
|
|
+@@ -268,12 +277,41 @@
|
|
|
+
|
|
|
+ If you don't know what to do here, say Y.
|
|
|
+
|
|
|
++config VECTOR
|
|
|
++ bool "VECTOR support"
|
|
|
++ default n
|
|
|
++ help
|
|
|
++ Say N here if you want to disable all vector related procedure
|
|
|
++ in the kernel.
|
|
|
++
|
|
|
++ If you don't know what to do here, say Y.
|
|
|
++
|
|
|
+ endmenu
|
|
|
+
|
|
|
+ menu "Kernel features"
|
|
|
+
|
|
|
+ source "kernel/Kconfig.hz"
|
|
|
+
|
|
|
++config KEXEC
|
|
|
++ select KEXEC_CORE
|
|
|
++ bool "kexec system call"
|
|
|
++ ---help---
|
|
|
++ kexec is a system call that implements the ability to shutdown your
|
|
|
++ current kernel, and to start another kernel. It is like a reboot
|
|
|
++ but it is independent of the system firmware. And like a reboot
|
|
|
++ you can start any kernel with it, not just Linux.
|
|
|
++
|
|
|
++config CRASH_DUMP
|
|
|
++ bool "Build kdump crash kernel"
|
|
|
++ help
|
|
|
++ Generate crash dump after being started by kexec. This should
|
|
|
++ be normally only set in special crash dump kernels which are
|
|
|
++ loaded in the main kernel with kexec-tools into a specially
|
|
|
++ reserved region and then later executed after a crash by
|
|
|
++ kdump/kexec.
|
|
|
++
|
|
|
++ For more details see Documentation/kdump/kdump.txt
|
|
|
++
|
|
|
+ endmenu
|
|
|
+
|
|
|
+ menu "Boot options"
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/asm-offsets.c kernel/arch/riscv/kernel/asm-offsets.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/asm-offsets.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/asm-offsets.c 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -70,6 +70,45 @@
|
|
|
+ OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
|
|
|
+ OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
|
|
|
+
|
|
|
++ OFFSET(TASK_THREAD_V0, task_struct, thread.vstate.v[0]);
|
|
|
++ OFFSET(TASK_THREAD_V1, task_struct, thread.vstate.v[1]);
|
|
|
++ OFFSET(TASK_THREAD_V2, task_struct, thread.vstate.v[2]);
|
|
|
++ OFFSET(TASK_THREAD_V3, task_struct, thread.vstate.v[3]);
|
|
|
++ OFFSET(TASK_THREAD_V4, task_struct, thread.vstate.v[4]);
|
|
|
++ OFFSET(TASK_THREAD_V5, task_struct, thread.vstate.v[5]);
|
|
|
++ OFFSET(TASK_THREAD_V6, task_struct, thread.vstate.v[6]);
|
|
|
++ OFFSET(TASK_THREAD_V7, task_struct, thread.vstate.v[7]);
|
|
|
++ OFFSET(TASK_THREAD_V8, task_struct, thread.vstate.v[8]);
|
|
|
++ OFFSET(TASK_THREAD_V9, task_struct, thread.vstate.v[9]);
|
|
|
++ OFFSET(TASK_THREAD_V10, task_struct, thread.vstate.v[10]);
|
|
|
++ OFFSET(TASK_THREAD_V11, task_struct, thread.vstate.v[11]);
|
|
|
++ OFFSET(TASK_THREAD_V12, task_struct, thread.vstate.v[12]);
|
|
|
++ OFFSET(TASK_THREAD_V13, task_struct, thread.vstate.v[13]);
|
|
|
++ OFFSET(TASK_THREAD_V14, task_struct, thread.vstate.v[14]);
|
|
|
++ OFFSET(TASK_THREAD_V15, task_struct, thread.vstate.v[15]);
|
|
|
++ OFFSET(TASK_THREAD_V16, task_struct, thread.vstate.v[16]);
|
|
|
++ OFFSET(TASK_THREAD_V17, task_struct, thread.vstate.v[17]);
|
|
|
++ OFFSET(TASK_THREAD_V18, task_struct, thread.vstate.v[18]);
|
|
|
++ OFFSET(TASK_THREAD_V19, task_struct, thread.vstate.v[19]);
|
|
|
++ OFFSET(TASK_THREAD_V20, task_struct, thread.vstate.v[20]);
|
|
|
++ OFFSET(TASK_THREAD_V21, task_struct, thread.vstate.v[21]);
|
|
|
++ OFFSET(TASK_THREAD_V22, task_struct, thread.vstate.v[22]);
|
|
|
++ OFFSET(TASK_THREAD_V23, task_struct, thread.vstate.v[23]);
|
|
|
++ OFFSET(TASK_THREAD_V24, task_struct, thread.vstate.v[24]);
|
|
|
++ OFFSET(TASK_THREAD_V25, task_struct, thread.vstate.v[25]);
|
|
|
++ OFFSET(TASK_THREAD_V26, task_struct, thread.vstate.v[26]);
|
|
|
++ OFFSET(TASK_THREAD_V27, task_struct, thread.vstate.v[27]);
|
|
|
++ OFFSET(TASK_THREAD_V28, task_struct, thread.vstate.v[28]);
|
|
|
++ OFFSET(TASK_THREAD_V29, task_struct, thread.vstate.v[29]);
|
|
|
++ OFFSET(TASK_THREAD_V30, task_struct, thread.vstate.v[30]);
|
|
|
++ OFFSET(TASK_THREAD_V31, task_struct, thread.vstate.v[31]);
|
|
|
++ OFFSET(TASK_THREAD_VSTART, task_struct, thread.vstate.vstart);
|
|
|
++ OFFSET(TASK_THREAD_VXSAT, task_struct, thread.vstate.vxsat);
|
|
|
++ OFFSET(TASK_THREAD_VXRM, task_struct, thread.vstate.vxrm);
|
|
|
++ OFFSET(TASK_THREAD_VL, task_struct, thread.vstate.vl);
|
|
|
++ OFFSET(TASK_THREAD_VTYPE, task_struct, thread.vstate.vtype);
|
|
|
++ DEFINE(RISCV_VECTOR_VLENB, sizeof(__uint128_t));
|
|
|
++
|
|
|
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
|
|
|
+ OFFSET(PT_SEPC, pt_regs, sepc);
|
|
|
+ OFFSET(PT_RA, pt_regs, ra);
|
|
|
+@@ -171,6 +210,7 @@
|
|
|
+ - offsetof(struct task_struct, thread.ra)
|
|
|
+ );
|
|
|
+
|
|
|
++ /* Float Point */
|
|
|
+ DEFINE(TASK_THREAD_F0_F0,
|
|
|
+ offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
+ - offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
+@@ -304,6 +344,156 @@
|
|
|
+ - offsetof(struct task_struct, thread.fstate.f[0])
|
|
|
+ );
|
|
|
+
|
|
|
++ /* Vector */
|
|
|
++ DEFINE(TASK_THREAD_V0_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V1_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[1])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V2_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[2])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V3_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[3])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V4_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[4])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V5_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[5])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V6_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[6])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V7_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[7])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V8_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[8])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V9_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[9])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V10_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[10])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V11_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[11])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V12_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[12])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V13_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[13])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V14_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[14])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V15_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[15])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V16_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[16])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V17_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[17])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V18_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[18])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V19_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[19])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V20_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[20])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V21_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[21])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V22_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[22])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V23_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[23])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V24_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[24])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V25_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[25])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V26_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[26])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V27_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[27])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V28_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[28])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V29_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[29])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V30_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[30])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_V31_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.v[31])
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_VSTART_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.vstart)
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_VXSAT_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.vxsat)
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_VXRM_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.vxrm)
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_VL_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.vl)
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++ DEFINE(TASK_THREAD_VTYPE_V0,
|
|
|
++ offsetof(struct task_struct, thread.vstate.vtype)
|
|
|
++ - offsetof(struct task_struct, thread.vstate.v[0])
|
|
|
++ );
|
|
|
++
|
|
|
+ /*
|
|
|
+ * We allocate a pt_regs on the stack when entering the kernel. This
|
|
|
+ * ensures the alignment is sane.
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/cpu.c kernel/arch/riscv/kernel/cpu.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/cpu.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/cpu.c 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -48,7 +48,7 @@
|
|
|
+
|
|
|
+ static void print_isa(struct seq_file *f, const char *orig_isa)
|
|
|
+ {
|
|
|
+- static const char *ext = "mafdcsu";
|
|
|
++ static const char *ext = "mafdcvsu";
|
|
|
+ const char *isa = orig_isa;
|
|
|
+ const char *e;
|
|
|
+
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/cpufeature.c kernel/arch/riscv/kernel/cpufeature.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/cpufeature.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/cpufeature.c 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -17,6 +17,10 @@
|
|
|
+ bool has_fpu __read_mostly;
|
|
|
+ #endif
|
|
|
+
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++bool has_vector __read_mostly;
|
|
|
++#endif
|
|
|
++
|
|
|
+ void riscv_fill_hwcap(void)
|
|
|
+ {
|
|
|
+ struct device_node *node;
|
|
|
+@@ -30,6 +34,7 @@
|
|
|
+ isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
|
|
|
+ isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
|
|
|
+ isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
|
|
|
++ isa2hwcap['v'] = isa2hwcap['V'] = COMPAT_HWCAP_ISA_V;
|
|
|
+
|
|
|
+ elf_hwcap = 0;
|
|
|
+
|
|
|
+@@ -44,7 +49,8 @@
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+- for (i = 0; i < strlen(isa); ++i)
|
|
|
++ /* skip "rv64" */
|
|
|
++ for (i = 4; i < strlen(isa); ++i)
|
|
|
+ this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
|
|
|
+
|
|
|
+ /*
|
|
|
+@@ -71,4 +77,9 @@
|
|
|
+ if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
|
|
|
+ has_fpu = true;
|
|
|
+ #endif
|
|
|
++
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++ if (elf_hwcap & COMPAT_HWCAP_ISA_V)
|
|
|
++ has_vector = true;
|
|
|
++#endif
|
|
|
+ }
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/cpu-reset.h kernel/arch/riscv/kernel/cpu-reset.h
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/cpu-reset.h 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/cpu-reset.h 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -0,0 +1,58 @@
|
|
|
++/*
|
|
|
++ * CPU reset routines
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#ifndef _RISCV_CPU_RESET_H
|
|
|
++#define _RISCV_CPU_RESET_H
|
|
|
++
|
|
|
++extern struct resource *standard_resources;
|
|
|
++void __cpu_soft_restart(unsigned long entry, unsigned long arg0, unsigned long arg1,
|
|
|
++ unsigned long arg2);
|
|
|
++
|
|
|
++__attribute__ ((optimize("-O0"))) static void __noreturn cpu_soft_restart(unsigned long entry,
|
|
|
++ unsigned long arg0,
|
|
|
++ unsigned long arg1,
|
|
|
++ unsigned long arg2)
|
|
|
++{
|
|
|
++ typeof(__cpu_soft_restart) *restart;
|
|
|
++ pgd_t *idmap_pgd;
|
|
|
++ pmd_t *idmap_pmd;
|
|
|
++ long pa_start, pa_end;
|
|
|
++ long i, j, m, n, delta;
|
|
|
++ long idmap_pmd_size;
|
|
|
++
|
|
|
++ pa_start = standard_resources->start;
|
|
|
++ pa_end = standard_resources->end;
|
|
|
++
|
|
|
++ idmap_pmd_size = (pa_end - pa_start + 1) / PMD_SIZE * sizeof(pmd_t);
|
|
|
++
|
|
|
++ idmap_pgd = (pgd_t *)__va((csr_read(CSR_SATP) & ((1UL<<44)-1))<< PAGE_SHIFT);
|
|
|
++ idmap_pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, get_order(idmap_pmd_size));
|
|
|
++
|
|
|
++ m = (pa_start >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
|
|
++ n = (pa_end >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
|
|
++
|
|
|
++ for (i = 0; m <= n; m++,i++)
|
|
|
++ idmap_pgd[m] = pfn_pgd(PFN_DOWN(__pa(idmap_pmd)) + i,
|
|
|
++ __pgprot(_PAGE_TABLE));
|
|
|
++
|
|
|
++ m = pa_start >> PMD_SHIFT;
|
|
|
++ n = (pa_end + 1) >> PMD_SHIFT;
|
|
|
++ delta = n - m;
|
|
|
++
|
|
|
++ for (i = (pa_start + 1) % PMD_SIZE,j=0; i <= delta; i++,j++)
|
|
|
++ idmap_pmd[i] = pfn_pmd(PFN_DOWN(pa_start + j * PMD_SIZE),
|
|
|
++ __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC));
|
|
|
++
|
|
|
++ restart = (void *)__pa_symbol(__cpu_soft_restart);
|
|
|
++ restart(entry, arg0, arg1, arg2);
|
|
|
++ unreachable();
|
|
|
++}
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/cpu-reset.S kernel/arch/riscv/kernel/cpu-reset.S
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/cpu-reset.S 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/cpu-reset.S 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -0,0 +1,23 @@
|
|
|
++/*
|
|
|
++ * CPU reset routines
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++ENTRY(__cpu_soft_restart)
|
|
|
++ fence
|
|
|
++ fence.i
|
|
|
++ sfence.vma
|
|
|
++ mv s1, a0 //entry
|
|
|
++ mv a0, a1 //arg0
|
|
|
++ mv a1, a2 //arg1
|
|
|
++ mv a2, a3 //arg2
|
|
|
++ jr s1
|
|
|
++ ebreak
|
|
|
++ENDPROC(__cpu_soft_restart)
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/crash_dump.c kernel/arch/riscv/kernel/crash_dump.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/crash_dump.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/crash_dump.c 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -0,0 +1,69 @@
|
|
|
++/*
|
|
|
++ * Routines for doing kexec-based kdump
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/crash_dump.h>
|
|
|
++#include <linux/errno.h>
|
|
|
++#include <linux/io.h>
|
|
|
++#include <linux/memblock.h>
|
|
|
++#include <linux/uaccess.h>
|
|
|
++
|
|
|
++/**
|
|
|
++ * copy_oldmem_page() - copy one page from old kernel memory
|
|
|
++ * @pfn: page frame number to be copied
|
|
|
++ * @buf: buffer where the copied page is placed
|
|
|
++ * @csize: number of bytes to copy
|
|
|
++ * @offset: offset in bytes into the page
|
|
|
++ * @userbuf: if set, @buf is in a user address space
|
|
|
++ *
|
|
|
++ * This function copies one page from old kernel memory into buffer pointed by
|
|
|
++ * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
|
|
|
++ * copied or negative error in case of failure.
|
|
|
++ */
|
|
|
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
|
++ size_t csize, unsigned long offset,
|
|
|
++ int userbuf)
|
|
|
++{
|
|
|
++ void *vaddr;
|
|
|
++
|
|
|
++ if (!csize)
|
|
|
++ return 0;
|
|
|
++
|
|
|
++ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
|
|
|
++ if (!vaddr)
|
|
|
++ return -ENOMEM;
|
|
|
++
|
|
|
++ if (userbuf) {
|
|
|
++ if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
|
|
|
++ memunmap(vaddr);
|
|
|
++ return -EFAULT;
|
|
|
++ }
|
|
|
++ } else {
|
|
|
++ memcpy(buf, vaddr + offset, csize);
|
|
|
++ }
|
|
|
++
|
|
|
++ memunmap(vaddr);
|
|
|
++
|
|
|
++ return csize;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * elfcorehdr_read - read from ELF core header
|
|
|
++ * @buf: buffer where the data is placed
|
|
|
++ * @count: number of bytes to read
|
|
|
++ * @ppos: address in the memory
|
|
|
++ *
|
|
|
++ * This function reads @count bytes from elf core header which exists
|
|
|
++ * on crash dump kernel's memory.
|
|
|
++ */
|
|
|
++ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
|
|
++{
|
|
|
++ memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
|
|
|
++ return count;
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/entry.S kernel/arch/riscv/kernel/entry.S
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/entry.S 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/entry.S 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -76,7 +76,7 @@
|
|
|
+ * Disable the FPU to detect illegal usage of floating point in kernel
|
|
|
+ * space.
|
|
|
+ */
|
|
|
+- li t0, SR_SUM | SR_FS
|
|
|
++ li t0, SR_SUM | SR_FS | SR_VS
|
|
|
+
|
|
|
+ REG_L s0, TASK_TI_USER_SP(tp)
|
|
|
+ csrrc s1, CSR_SSTATUS, t0
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/head.S kernel/arch/riscv/kernel/head.S
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/head.S 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/head.S 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -60,7 +60,7 @@
|
|
|
+ * Disable FPU to detect illegal usage of
|
|
|
+ * floating point in kernel space
|
|
|
+ */
|
|
|
+- li t0, SR_FS
|
|
|
++ li t0, SR_FS | SR_VS
|
|
|
+ csrc CSR_SSTATUS, t0
|
|
|
+
|
|
|
+ #ifdef CONFIG_SMP
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/irq.c kernel/arch/riscv/kernel/irq.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/irq.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/irq.c 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -17,6 +17,7 @@
|
|
|
+ #define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
|
|
|
+ #define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
|
|
|
+ #define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
|
|
|
++#define INTERRUPT_CAUSE_PMU IRQ_S_PMU
|
|
|
+
|
|
|
+ int arch_show_interrupts(struct seq_file *p, int prec)
|
|
|
+ {
|
|
|
+@@ -24,6 +25,7 @@
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
++extern int riscv_pmu_handle_irq(void);
|
|
|
+ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
|
|
|
+ {
|
|
|
+ struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
+@@ -33,6 +35,11 @@
|
|
|
+ case INTERRUPT_CAUSE_TIMER:
|
|
|
+ riscv_timer_interrupt();
|
|
|
+ break;
|
|
|
++#ifdef CONFIG_THEAD_XT_V1_PMU
|
|
|
++ case INTERRUPT_CAUSE_PMU:
|
|
|
++ riscv_pmu_handle_irq();
|
|
|
++ break;
|
|
|
++#endif
|
|
|
+ #ifdef CONFIG_SMP
|
|
|
+ case INTERRUPT_CAUSE_SOFTWARE:
|
|
|
+ /*
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/machine_kexec.c kernel/arch/riscv/kernel/machine_kexec.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/machine_kexec.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/machine_kexec.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -0,0 +1,163 @@
|
|
|
++/*
|
|
|
++ * kexec for riscv
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/irq.h>
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/kexec.h>
|
|
|
++#include <linux/page-flags.h>
|
|
|
++#include <linux/smp.h>
|
|
|
++
|
|
|
++#include <asm/cacheflush.h>
|
|
|
++#include <asm/mmu.h>
|
|
|
++#include <asm/mmu_context.h>
|
|
|
++#include <asm/page.h>
|
|
|
++
|
|
|
++#include "cpu-reset.h"
|
|
|
++
|
|
|
++/* Global variables for the riscv_relocate_new_kernel routine. */
|
|
|
++extern const unsigned char riscv_relocate_new_kernel[];
|
|
|
++extern const unsigned long riscv_relocate_new_kernel_size;
|
|
|
++
|
|
|
++/*
|
|
|
++ * kexec_image_info - For debugging output.
|
|
|
++ */
|
|
|
++#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
|
|
|
++static void _kexec_image_info(const char *func, int line,
|
|
|
++ const struct kimage *kimage)
|
|
|
++{
|
|
|
++ unsigned long i;
|
|
|
++
|
|
|
++ pr_debug("%s:%d:\n", func, line);
|
|
|
++ pr_debug(" kexec kimage info:\n");
|
|
|
++ pr_debug(" type: %d\n", kimage->type);
|
|
|
++ pr_debug(" start: 0x%lx\n", kimage->start);
|
|
|
++ pr_debug(" head: 0x%lx\n", kimage->head);
|
|
|
++ pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
|
|
|
++
|
|
|
++ for (i = 0; i < kimage->nr_segments; i++) {
|
|
|
++ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
|
|
|
++ i,
|
|
|
++ kimage->segment[i].mem,
|
|
|
++ kimage->segment[i].mem + kimage->segment[i].memsz,
|
|
|
++ kimage->segment[i].memsz,
|
|
|
++ kimage->segment[i].memsz / PAGE_SIZE);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++void machine_kexec_cleanup(struct kimage *kimage)
|
|
|
++{
|
|
|
++ /* Empty routine needed to avoid build errors. */
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * machine_kexec_prepare - Prepare for a kexec reboot.
|
|
|
++ *
|
|
|
++ * Called from the core kexec code when a kernel image is loaded.
|
|
|
++ * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
|
|
|
++ * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
|
|
|
++ */
|
|
|
++int machine_kexec_prepare(struct kimage *kimage)
|
|
|
++{
|
|
|
++ kexec_image_info(kimage);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * machine_kexec - Do the kexec reboot.
|
|
|
++ *
|
|
|
++ * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
|
|
|
++ */
|
|
|
++void machine_kexec(struct kimage *kimage)
|
|
|
++{
|
|
|
++ phys_addr_t reboot_code_buffer_phys;
|
|
|
++ void *reboot_code_buffer;
|
|
|
++
|
|
|
++ reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
|
|
|
++ reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
|
|
|
++
|
|
|
++ pr_debug("%s:%d: control_code_page: 0x%lx\n", __func__, __LINE__,
|
|
|
++ (long)kimage->control_code_page);
|
|
|
++ pr_debug("%s:%d: reboot_code_buffer_phys: 0x%lx\n", __func__, __LINE__,
|
|
|
++ (long)reboot_code_buffer_phys);
|
|
|
++ pr_debug("%s:%d: reboot_code_buffer: 0x%lx\n", __func__, __LINE__,
|
|
|
++ (long)reboot_code_buffer);
|
|
|
++ pr_debug("%s:%d: relocate_new_kernel: 0x%lx\n", __func__, __LINE__,
|
|
|
++ (long)riscv_relocate_new_kernel);
|
|
|
++ pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
|
|
|
++ __func__, __LINE__, riscv_relocate_new_kernel_size,
|
|
|
++ riscv_relocate_new_kernel_size);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Copy riscv_relocate_new_kernel to the reboot_code_buffer for use
|
|
|
++ * after the kernel is shut down.
|
|
|
++ */
|
|
|
++ memcpy(reboot_code_buffer, riscv_relocate_new_kernel,
|
|
|
++ riscv_relocate_new_kernel_size);
|
|
|
++
|
|
|
++ pr_info("Bye!\n");
|
|
|
++
|
|
|
++ local_irq_disable();
|
|
|
++
|
|
|
++ cpu_soft_restart(reboot_code_buffer_phys, kimage->head,
|
|
|
++ kimage->segment[0].mem, kimage->segment[2].mem);
|
|
|
++
|
|
|
++ BUG(); /* Should never get here. */
|
|
|
++}
|
|
|
++
|
|
|
++static void machine_kexec_mask_interrupts(void)
|
|
|
++{
|
|
|
++ unsigned int i;
|
|
|
++ struct irq_desc *desc;
|
|
|
++
|
|
|
++ for_each_irq_desc(i, desc) {
|
|
|
++ struct irq_chip *chip;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ chip = irq_desc_get_chip(desc);
|
|
|
++ if (!chip)
|
|
|
++ continue;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * First try to remove the active state. If this
|
|
|
++ * fails, try to EOI the interrupt.
|
|
|
++ */
|
|
|
++ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
|
|
|
++
|
|
|
++ if (ret && irqd_irq_inprogress(&desc->irq_data) &&
|
|
|
++ chip->irq_eoi)
|
|
|
++ chip->irq_eoi(&desc->irq_data);
|
|
|
++
|
|
|
++ if (chip->irq_mask)
|
|
|
++ chip->irq_mask(&desc->irq_data);
|
|
|
++
|
|
|
++ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
|
|
|
++ chip->irq_disable(&desc->irq_data);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * machine_crash_shutdown - shutdown non-crashing cpus and save registers
|
|
|
++ */
|
|
|
++extern void crash_smp_send_stop(void);
|
|
|
++void machine_crash_shutdown(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ local_irq_disable();
|
|
|
++
|
|
|
++ /* shutdown non-crashing cpus */
|
|
|
++ crash_smp_send_stop();
|
|
|
++
|
|
|
++ /* for crashing cpu */
|
|
|
++ crash_save_cpu(regs, smp_processor_id());
|
|
|
++ machine_kexec_mask_interrupts();
|
|
|
++
|
|
|
++ pr_info("Starting crashdump kernel...\n");
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/Makefile kernel/arch/riscv/kernel/Makefile
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/Makefile 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/Makefile 2020-09-03 06:01:13.905989796 +0000
|
|
|
+@@ -30,6 +30,7 @@
|
|
|
+ obj-y += vdso/
|
|
|
+
|
|
|
+ obj-$(CONFIG_FPU) += fpu.o
|
|
|
++obj-$(CONFIG_VECTOR) += vector.o
|
|
|
+ obj-$(CONFIG_SMP) += smpboot.o
|
|
|
+ obj-$(CONFIG_SMP) += smp.o
|
|
|
+ obj-$(CONFIG_MODULES) += module.o
|
|
|
+@@ -42,4 +43,8 @@
|
|
|
+ obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
|
|
|
+ obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
|
|
|
+
|
|
|
++obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
|
|
|
++ cpu-reset.o
|
|
|
++obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
|
|
++
|
|
|
+ clean:
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/module.c kernel/arch/riscv/kernel/module.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/module.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/module.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -253,7 +253,7 @@
|
|
|
+ pr_err(
|
|
|
+ "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
|
|
|
+ me->name, location);
|
|
|
+- return -EINVAL;
|
|
|
++ return 0; /* Do not return -EINVAL when relocation type is R_RISCV_ALIGN */
|
|
|
+ }
|
|
|
+
|
|
|
+ static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/perf_event.c kernel/arch/riscv/kernel/perf_event.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/perf_event.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/perf_event.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -477,9 +477,9 @@
|
|
|
+ if (of_id)
|
|
|
+ riscv_pmu = of_id->data;
|
|
|
+ of_node_put(node);
|
|
|
++ perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
|
|
|
+ }
|
|
|
+
|
|
|
+- perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ arch_initcall(init_hw_perf_events);
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/process.c kernel/arch/riscv/kernel/process.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/process.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/process.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -74,6 +74,12 @@
|
|
|
+ */
|
|
|
+ fstate_restore(current, regs);
|
|
|
+ }
|
|
|
++
|
|
|
++ if (has_vector) {
|
|
|
++ regs->sstatus |= SR_VS_INITIAL;
|
|
|
++ vstate_restore(current, regs);
|
|
|
++ }
|
|
|
++
|
|
|
+ regs->sepc = pc;
|
|
|
+ regs->sp = sp;
|
|
|
+ set_fs(USER_DS);
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/ptrace.c kernel/arch/riscv/kernel/ptrace.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/ptrace.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/ptrace.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -26,6 +26,9 @@
|
|
|
+ #ifdef CONFIG_FPU
|
|
|
+ REGSET_F,
|
|
|
+ #endif
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++ REGSET_V,
|
|
|
++#endif
|
|
|
+ };
|
|
|
+
|
|
|
+ static int riscv_gpr_get(struct task_struct *target,
|
|
|
+@@ -92,6 +95,34 @@
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++static int riscv_vr_get(struct task_struct *target,
|
|
|
++ const struct user_regset *regset,
|
|
|
++ unsigned int pos, unsigned int count,
|
|
|
++ void *kbuf, void __user *ubuf)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++ struct __riscv_v_state *vstate = &target->thread.vstate;
|
|
|
++
|
|
|
++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, vstate, 0,
|
|
|
++ offsetof(struct __riscv_v_state, vtype));
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_vr_set(struct task_struct *target,
|
|
|
++ const struct user_regset *regset,
|
|
|
++ unsigned int pos, unsigned int count,
|
|
|
++ const void *kbuf, const void __user *ubuf)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++ struct __riscv_v_state *vstate = &target->thread.vstate;
|
|
|
++
|
|
|
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0,
|
|
|
++ offsetof(struct __riscv_v_state, vtype));
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++#endif
|
|
|
++
|
|
|
+ static const struct user_regset riscv_user_regset[] = {
|
|
|
+ [REGSET_X] = {
|
|
|
+ .core_note_type = NT_PRSTATUS,
|
|
|
+@@ -111,6 +142,16 @@
|
|
|
+ .set = &riscv_fpr_set,
|
|
|
+ },
|
|
|
+ #endif
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++ [REGSET_V] = {
|
|
|
++ .core_note_type = NT_RISCV_VECTOR,
|
|
|
++ .n = ELF_NVREG,
|
|
|
++ .size = sizeof(elf_greg_t),
|
|
|
++ .align = sizeof(elf_greg_t),
|
|
|
++ .get = &riscv_vr_get,
|
|
|
++ .set = &riscv_vr_set,
|
|
|
++ },
|
|
|
++#endif
|
|
|
+ };
|
|
|
+
|
|
|
+ static const struct user_regset_view riscv_user_native_view = {
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/relocate_kernel.S kernel/arch/riscv/kernel/relocate_kernel.S
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/relocate_kernel.S 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/relocate_kernel.S 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -0,0 +1,32 @@
|
|
|
++/*
|
|
|
++ * kexec for riscv
|
|
|
++ *
|
|
|
++ * Copyright (C) 2020-2025 Alibaba Group Holding Limited
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/kexec.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#include <asm/kexec.h>
|
|
|
++#include <asm/page.h>
|
|
|
++
|
|
|
++ENTRY(riscv_relocate_new_kernel)
|
|
|
++ /* Start new image. */
|
|
|
++ mv s1, a1
|
|
|
++ mv a0, zero
|
|
|
++ mv a1, a2
|
|
|
++ mv a2, zero
|
|
|
++ mv a3, zero
|
|
|
++ jr s1
|
|
|
++ENDPROC(riscv_relocate_new_kernel)
|
|
|
++
|
|
|
++.Lcopy_end:
|
|
|
++.org KEXEC_CONTROL_PAGE_SIZE
|
|
|
++
|
|
|
++.globl riscv_relocate_new_kernel_size
|
|
|
++riscv_relocate_new_kernel_size:
|
|
|
++ .quad .Lcopy_end - riscv_relocate_new_kernel
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/reset.c kernel/arch/riscv/kernel/reset.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/reset.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/reset.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -31,3 +31,7 @@
|
|
|
+ {
|
|
|
+ pm_power_off();
|
|
|
+ }
|
|
|
++
|
|
|
++void machine_shutdown(void)
|
|
|
++{
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/setup.c kernel/arch/riscv/kernel/setup.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/setup.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/setup.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -38,7 +38,7 @@
|
|
|
+ #endif
|
|
|
+
|
|
|
+ /* The lucky hart to first increment this variable will boot the other cores */
|
|
|
+-atomic_t hart_lottery;
|
|
|
++__section(.data) atomic_t hart_lottery;
|
|
|
+ unsigned long boot_cpu_hartid;
|
|
|
+
|
|
|
+ void __init parse_dtb(void)
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/signal.c kernel/arch/riscv/kernel/signal.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/signal.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/signal.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -78,6 +78,41 @@
|
|
|
+ #define restore_fp_state(task, regs) (0)
|
|
|
+ #endif
|
|
|
+
|
|
|
++#ifdef CONFIG_VECTOR
|
|
|
++static long restore_v_state(struct pt_regs *regs,
|
|
|
++ struct __riscv_v_state *sc_vregs)
|
|
|
++{
|
|
|
++ long err;
|
|
|
++ struct __riscv_v_state __user *state = sc_vregs;
|
|
|
++
|
|
|
++ err = __copy_from_user(¤t->thread.vstate, state, sizeof(*state));
|
|
|
++ if (unlikely(err))
|
|
|
++ return err;
|
|
|
++
|
|
|
++ vstate_restore(current, regs);
|
|
|
++
|
|
|
++ return err;
|
|
|
++}
|
|
|
++
|
|
|
++static long save_v_state(struct pt_regs *regs,
|
|
|
++ struct __riscv_v_state *sc_vregs)
|
|
|
++{
|
|
|
++ long err;
|
|
|
++ struct __riscv_v_state __user *state = sc_vregs;
|
|
|
++
|
|
|
++ vstate_save(current, regs);
|
|
|
++ err = __copy_to_user(state, ¤t->thread.vstate, sizeof(*state));
|
|
|
++ if (unlikely(err))
|
|
|
++ return err;
|
|
|
++
|
|
|
++ return err;
|
|
|
++}
|
|
|
++#else
|
|
|
++#define save_v_state(task, regs) (0)
|
|
|
++#define restore_v_state(task, regs) (0)
|
|
|
++#endif
|
|
|
++
|
|
|
++
|
|
|
+ static long restore_sigcontext(struct pt_regs *regs,
|
|
|
+ struct sigcontext __user *sc)
|
|
|
+ {
|
|
|
+@@ -87,6 +122,9 @@
|
|
|
+ /* Restore the floating-point state. */
|
|
|
+ if (has_fpu)
|
|
|
+ err |= restore_fp_state(regs, &sc->sc_fpregs);
|
|
|
++ /* Restore the vector state. */
|
|
|
++ if (has_vector)
|
|
|
++ err |= restore_v_state(regs, &sc->sc_vregs);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -140,6 +178,9 @@
|
|
|
+ /* Save the floating-point state. */
|
|
|
+ if (has_fpu)
|
|
|
+ err |= save_fp_state(regs, &sc->sc_fpregs);
|
|
|
++ /* Save the vector state. */
|
|
|
++ if (has_vector)
|
|
|
++ err |= save_v_state(regs, &sc->sc_vregs);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/smpboot.c kernel/arch/riscv/kernel/smpboot.c
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/smpboot.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/smpboot.c 2020-09-03 06:01:13.906989796 +0000
|
|
|
+@@ -33,8 +33,8 @@
|
|
|
+
|
|
|
+ #include "head.h"
|
|
|
+
|
|
|
+-void *__cpu_up_stack_pointer[NR_CPUS];
|
|
|
+-void *__cpu_up_task_pointer[NR_CPUS];
|
|
|
++__section(.data) void *__cpu_up_stack_pointer[NR_CPUS];
|
|
|
++__section(.data) void *__cpu_up_task_pointer[NR_CPUS];
|
|
|
+ static DECLARE_COMPLETION(cpu_running);
|
|
|
+
|
|
|
+ void __init smp_prepare_boot_cpu(void)
|
|
|
+@@ -102,6 +102,7 @@
|
|
|
+ int hartid = cpuid_to_hartid_map(cpu);
|
|
|
+ tidle->thread_info.cpu = cpu;
|
|
|
+
|
|
|
++ SBI_CALL_1(0x09000003, hartid);
|
|
|
+ /*
|
|
|
+ * On RISC-V systems, all harts boot on their own accord. Our _start
|
|
|
+ * selects the first hart to boot the kernel and causes the remainder
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/kernel/vector.S kernel/arch/riscv/kernel/vector.S
|
|
|
+--- linux-5.4.36/arch/riscv/kernel/vector.S 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/kernel/vector.S 2020-09-03 06:01:13.907989796 +0000
|
|
|
+@@ -0,0 +1,84 @@
|
|
|
++/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
++/*
|
|
|
++ * Copyright (C) 2012 Regents of the University of California
|
|
|
++ * Copyright (C) 2017 SiFive
|
|
|
++ * Copyright (C) 2019 T-HEAD
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or
|
|
|
++ * modify it under the terms of the GNU General Public License
|
|
|
++ * as published by the Free Software Foundation, version 2.
|
|
|
++ *
|
|
|
++ * This program is distributed in the hope that it will be useful,
|
|
|
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
++ * GNU General Public License for more details.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#include <asm/asm.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++#include <asm/asm-offsets.h>
|
|
|
++
|
|
|
++ENTRY(__vstate_save)
|
|
|
++ li a2, TASK_THREAD_V0
|
|
|
++ add a0, a0, a2
|
|
|
++
|
|
|
++ li t1, (SR_VS | SR_FS)
|
|
|
++ csrs sstatus, t1
|
|
|
++
|
|
|
++ csrr t0, CSR_VSTART
|
|
|
++ sd t0, TASK_THREAD_VSTART_V0(a0)
|
|
|
++ csrr t0, CSR_VXSAT
|
|
|
++ sd t0, TASK_THREAD_VXSAT_V0(a0)
|
|
|
++ csrr t0, CSR_VXRM
|
|
|
++ sd t0, TASK_THREAD_VXRM_V0(a0)
|
|
|
++ csrr t0, CSR_VL
|
|
|
++ sd t0, TASK_THREAD_VL_V0(a0)
|
|
|
++ csrr t0, CSR_VTYPE
|
|
|
++ sd t0, TASK_THREAD_VTYPE_V0(a0)
|
|
|
++
|
|
|
++ vsetvli t0, x0, e8,m8
|
|
|
++ vsb.v v0, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vsb.v v8, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vsb.v v16, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vsb.v v24, (a0)
|
|
|
++
|
|
|
++ csrc sstatus, t1
|
|
|
++ ret
|
|
|
++ENDPROC(__vstate_save)
|
|
|
++
|
|
|
++ENTRY(__vstate_restore)
|
|
|
++ li a2, TASK_THREAD_V0
|
|
|
++ add a0, a0, a2
|
|
|
++ mv t2, a0
|
|
|
++
|
|
|
++ li t1, (SR_VS | SR_FS)
|
|
|
++ csrs sstatus, t1
|
|
|
++
|
|
|
++ vsetvli t0, x0, e8,m8
|
|
|
++ vlb.v v0, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vlb.v v8, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vlb.v v16, (a0)
|
|
|
++ addi a0, a0, RISCV_VECTOR_VLENB*8
|
|
|
++ vlb.v v24, (a0)
|
|
|
++
|
|
|
++ mv a0, t2
|
|
|
++ ld t0, TASK_THREAD_VSTART_V0(a0)
|
|
|
++ csrw CSR_VSTART, t0
|
|
|
++ ld t0, TASK_THREAD_VXSAT_V0(a0)
|
|
|
++ csrw CSR_VXSAT, t0
|
|
|
++ ld t0, TASK_THREAD_VXRM_V0(a0)
|
|
|
++ csrw CSR_VXRM, t0
|
|
|
++ ld t0, TASK_THREAD_VL_V0(a0)
|
|
|
++ ld t2, TASK_THREAD_VTYPE_V0(a0)
|
|
|
++ vsetvl t0, t0, t2
|
|
|
++
|
|
|
++ csrc sstatus, t1
|
|
|
++ ret
|
|
|
++ENDPROC(__vstate_restore)
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/Makefile kernel/arch/riscv/Makefile
|
|
|
+--- linux-5.4.36/arch/riscv/Makefile 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/Makefile 2020-09-03 06:01:13.901989796 +0000
|
|
|
+@@ -35,12 +35,19 @@
|
|
|
+ endif
|
|
|
+
|
|
|
+ # ISA string setting
|
|
|
+-riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
+-riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
|
|
|
+-riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
|
|
|
+-riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
|
|
|
+-KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
|
|
|
+-KBUILD_AFLAGS += -march=$(riscv-march-y)
|
|
|
++riscv-march-cflags-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
++riscv-march-cflags-$(CONFIG_ARCH_RV64I) := rv64ima
|
|
|
++riscv-march-cflags-$(CONFIG_RISCV_ISA_C) := $(riscv-march-cflags-y)c
|
|
|
++
|
|
|
++riscv-march-aflags-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
++riscv-march-aflags-$(CONFIG_ARCH_RV64I) := rv64ima
|
|
|
++riscv-march-aflags-$(CONFIG_FPU) := $(riscv-march-aflags-y)fd
|
|
|
++riscv-march-aflags-$(CONFIG_RISCV_ISA_C) := $(riscv-march-aflags-y)c
|
|
|
++riscv-march-aflags-$(CONFIG_VECTOR) := $(riscv-march-aflags-y)v
|
|
|
++riscv-march-aflags-$(CONFIG_RISCV_ISA_THEAD) := $(riscv-march-aflags-y)xthead
|
|
|
++
|
|
|
++KBUILD_CFLAGS += -march=$(riscv-march-cflags-y) -Wa,-march=$(riscv-march-aflags-y)
|
|
|
++KBUILD_AFLAGS += -march=$(riscv-march-aflags-y)
|
|
|
+
|
|
|
+ KBUILD_CFLAGS += -mno-save-restore
|
|
|
+ KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/asid.c kernel/arch/riscv/mm/asid.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/asid.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/asid.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -0,0 +1,189 @@
|
|
|
++// SPDX-License-Identifier: GPL-2.0
|
|
|
++/*
|
|
|
++ * Generic ASID allocator.
|
|
|
++ *
|
|
|
++ * Based on arch/arm/mm/context.c
|
|
|
++ *
|
|
|
++ * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
|
|
|
++ * Copyright (C) 2012 ARM Ltd.
|
|
|
++ */
|
|
|
++
|
|
|
++#include <linux/slab.h>
|
|
|
++#include <linux/mm_types.h>
|
|
|
++
|
|
|
++#include <asm/asid.h>
|
|
|
++
|
|
|
++#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
|
|
|
++
|
|
|
++#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
|
|
|
++#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
|
|
|
++
|
|
|
++#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
|
|
|
++#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
|
|
|
++
|
|
|
++static void flush_context(struct asid_info *info)
|
|
|
++{
|
|
|
++ int i;
|
|
|
++ u64 asid;
|
|
|
++
|
|
|
++ /* Update the list of reserved ASIDs and the ASID bitmap. */
|
|
|
++ bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
|
|
|
++
|
|
|
++ for_each_possible_cpu(i) {
|
|
|
++ asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
|
|
|
++ /*
|
|
|
++ * If this CPU has already been through a
|
|
|
++ * rollover, but hasn't run another task in
|
|
|
++ * the meantime, we must preserve its reserved
|
|
|
++ * ASID, as this is the only trace we have of
|
|
|
++ * the process it is still running.
|
|
|
++ */
|
|
|
++ if (asid == 0)
|
|
|
++ asid = reserved_asid(info, i);
|
|
|
++ __set_bit(asid2idx(info, asid), info->map);
|
|
|
++ reserved_asid(info, i) = asid;
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Queue a TLB invalidation for each CPU to perform on next
|
|
|
++ * context-switch
|
|
|
++ */
|
|
|
++ cpumask_setall(&info->flush_pending);
|
|
|
++}
|
|
|
++
|
|
|
++static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
|
|
|
++ u64 newasid)
|
|
|
++{
|
|
|
++ int cpu;
|
|
|
++ bool hit = false;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Iterate over the set of reserved ASIDs looking for a match.
|
|
|
++ * If we find one, then we can update our mm to use newasid
|
|
|
++ * (i.e. the same ASID in the current generation) but we can't
|
|
|
++ * exit the loop early, since we need to ensure that all copies
|
|
|
++ * of the old ASID are updated to reflect the mm. Failure to do
|
|
|
++ * so could result in us missing the reserved ASID in a future
|
|
|
++ * generation.
|
|
|
++ */
|
|
|
++ for_each_possible_cpu(cpu) {
|
|
|
++ if (reserved_asid(info, cpu) == asid) {
|
|
|
++ hit = true;
|
|
|
++ reserved_asid(info, cpu) = newasid;
|
|
|
++ }
|
|
|
++ }
|
|
|
++
|
|
|
++ return hit;
|
|
|
++}
|
|
|
++
|
|
|
++static u64 new_context(struct asid_info *info, atomic64_t *pasid,
|
|
|
++ struct mm_struct *mm)
|
|
|
++{
|
|
|
++ static u32 cur_idx = 1;
|
|
|
++ u64 asid = atomic64_read(pasid);
|
|
|
++ u64 generation = atomic64_read(&info->generation);
|
|
|
++
|
|
|
++ if (asid != 0) {
|
|
|
++ u64 newasid = generation | (asid & ~ASID_MASK(info));
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If our current ASID was active during a rollover, we
|
|
|
++ * can continue to use it and this was just a false alarm.
|
|
|
++ */
|
|
|
++ if (check_update_reserved_asid(info, asid, newasid))
|
|
|
++ return newasid;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * We had a valid ASID in a previous life, so try to re-use
|
|
|
++ * it if possible.
|
|
|
++ */
|
|
|
++ if (!__test_and_set_bit(asid2idx(info, asid), info->map))
|
|
|
++ return newasid;
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Allocate a free ASID. If we can't find one, take a note of the
|
|
|
++ * currently active ASIDs and mark the TLBs as requiring flushes. We
|
|
|
++ * always count from ASID #2 (index 1), as we use ASID #0 when setting
|
|
|
++ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
|
|
|
++ * pairs.
|
|
|
++ */
|
|
|
++ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
|
|
|
++ if (asid != NUM_CTXT_ASIDS(info))
|
|
|
++ goto set_asid;
|
|
|
++
|
|
|
++ /* We're out of ASIDs, so increment the global generation count */
|
|
|
++ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
|
|
|
++ &info->generation);
|
|
|
++ flush_context(info);
|
|
|
++
|
|
|
++ /* We have more ASIDs than CPUs, so this will always succeed */
|
|
|
++ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
|
|
|
++
|
|
|
++set_asid:
|
|
|
++ __set_bit(asid, info->map);
|
|
|
++ cur_idx = asid;
|
|
|
++ cpumask_clear(mm_cpumask(mm));
|
|
|
++ return idx2asid(info, asid) | generation;
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Generate a new ASID for the context.
|
|
|
++ *
|
|
|
++ * @pasid: Pointer to the current ASID batch allocated. It will be updated
|
|
|
++ * with the new ASID batch.
|
|
|
++ * @cpu: current CPU ID. Must have been acquired through get_cpu()
|
|
|
++ */
|
|
|
++void asid_new_context(struct asid_info *info, atomic64_t *pasid,
|
|
|
++ unsigned int cpu, struct mm_struct *mm)
|
|
|
++{
|
|
|
++ unsigned long flags;
|
|
|
++ u64 asid;
|
|
|
++
|
|
|
++ raw_spin_lock_irqsave(&info->lock, flags);
|
|
|
++ /* Check that our ASID belongs to the current generation. */
|
|
|
++ asid = atomic64_read(pasid);
|
|
|
++ if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
|
|
|
++ asid = new_context(info, pasid, mm);
|
|
|
++ atomic64_set(pasid, asid);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
|
|
|
++ info->flush_cpu_ctxt_cb();
|
|
|
++
|
|
|
++ atomic64_set(&active_asid(info, cpu), asid);
|
|
|
++ cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
|
++ raw_spin_unlock_irqrestore(&info->lock, flags);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Initialize the ASID allocator
|
|
|
++ *
|
|
|
++ * @info: Pointer to the asid allocator structure
|
|
|
++ * @bits: Number of ASIDs available
|
|
|
++ * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
|
|
|
++ * allocated contiguously for a given context. This value should be a power of
|
|
|
++ * 2.
|
|
|
++ */
|
|
|
++int asid_allocator_init(struct asid_info *info,
|
|
|
++ u32 bits, unsigned int asid_per_ctxt,
|
|
|
++ void (*flush_cpu_ctxt_cb)(void))
|
|
|
++{
|
|
|
++ info->bits = bits;
|
|
|
++ info->ctxt_shift = ilog2(asid_per_ctxt);
|
|
|
++ info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
|
|
|
++ /*
|
|
|
++ * Expect allocation after rollover to fail if we don't have at least
|
|
|
++ * one more ASID than CPUs. ASID #0 is always reserved.
|
|
|
++ */
|
|
|
++ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
|
|
|
++ atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
|
|
|
++ info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
|
|
|
++ sizeof(*info->map), GFP_KERNEL);
|
|
|
++ if (!info->map)
|
|
|
++ return -ENOMEM;
|
|
|
++
|
|
|
++ raw_spin_lock_init(&info->lock);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/cacheflush.c kernel/arch/riscv/mm/cacheflush.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/cacheflush.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/cacheflush.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -74,3 +74,24 @@
|
|
|
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
|
|
+ flush_icache_all();
|
|
|
+ }
|
|
|
++
|
|
|
++#define sync_is() asm volatile (".long 0x01b0000b")
|
|
|
++void dma_wbinv_range(unsigned long start, unsigned long end)
|
|
|
++{
|
|
|
++ register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
|
|
|
++
|
|
|
++ for (; i < end; i += L1_CACHE_BYTES)
|
|
|
++ asm volatile (".long 0x02b5000b"); /* dcache.cipa a0 */
|
|
|
++
|
|
|
++ sync_is();
|
|
|
++}
|
|
|
++
|
|
|
++void dma_wb_range(unsigned long start, unsigned long end)
|
|
|
++{
|
|
|
++ register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
|
|
|
++
|
|
|
++ for (; i < end; i += L1_CACHE_BYTES)
|
|
|
++ asm volatile (".long 0x0295000b"); /* dcache.cpa a0 */
|
|
|
++
|
|
|
++ sync_is();
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/context.c kernel/arch/riscv/mm/context.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/context.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/context.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -8,6 +8,7 @@
|
|
|
+ #include <asm/tlbflush.h>
|
|
|
+ #include <asm/cacheflush.h>
|
|
|
+ #include <asm/mmu_context.h>
|
|
|
++#include <asm/fence.h>
|
|
|
+
|
|
|
+ /*
|
|
|
+ * When necessary, performs a deferred icache flush for the given MM context,
|
|
|
+@@ -44,6 +45,8 @@
|
|
|
+ struct task_struct *task)
|
|
|
+ {
|
|
|
+ unsigned int cpu;
|
|
|
++ unsigned long asid;
|
|
|
++ unsigned long x;
|
|
|
+
|
|
|
+ if (unlikely(prev == next))
|
|
|
+ return;
|
|
|
+@@ -58,8 +61,50 @@
|
|
|
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
|
+ cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
+
|
|
|
+- csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
|
|
|
+- local_flush_tlb_all();
|
|
|
++ check_and_switch_context(next, cpu);
|
|
|
++ asid = (next->context.asid.counter & SATP_ASID_MASK)
|
|
|
++ << SATP_ASID_SHIFT;
|
|
|
++
|
|
|
++ x = virt_to_pfn(next->pgd) | SATP_MODE | asid;
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ csr_write(sptbr, x);
|
|
|
+
|
|
|
+ flush_icache_deferred(next);
|
|
|
+ }
|
|
|
++
|
|
|
++static DEFINE_PER_CPU(atomic64_t, active_asids);
|
|
|
++static DEFINE_PER_CPU(u64, reserved_asids);
|
|
|
++
|
|
|
++struct asid_info asid_info;
|
|
|
++
|
|
|
++void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
|
|
|
++{
|
|
|
++ asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
|
|
|
++}
|
|
|
++
|
|
|
++static void asid_flush_cpu_ctxt(void)
|
|
|
++{
|
|
|
++ local_flush_tlb_all();
|
|
|
++}
|
|
|
++
|
|
|
++static int asids_init(void)
|
|
|
++{
|
|
|
++ BUG_ON(((1 << SATP_ASID_BITS) - 1) <= num_possible_cpus());
|
|
|
++
|
|
|
++ if (asid_allocator_init(&asid_info, SATP_ASID_BITS, 1,
|
|
|
++ asid_flush_cpu_ctxt))
|
|
|
++ panic("Unable to initialize ASID allocator for %lu ASIDs\n",
|
|
|
++ NUM_ASIDS(&asid_info));
|
|
|
++
|
|
|
++ asid_info.active = &active_asids;
|
|
|
++ asid_info.reserved = &reserved_asids;
|
|
|
++
|
|
|
++ pr_info("ASID allocator initialised with %lu entries\n",
|
|
|
++ NUM_CTXT_ASIDS(&asid_info));
|
|
|
++
|
|
|
++ local_flush_tlb_all();
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++early_initcall(asids_init);
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/dma-mapping.c kernel/arch/riscv/mm/dma-mapping.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/dma-mapping.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/dma-mapping.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -0,0 +1,70 @@
|
|
|
++// SPDX-License-Identifier: GPL-2.0
|
|
|
++// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
++
|
|
|
++#include <linux/cache.h>
|
|
|
++#include <linux/dma-mapping.h>
|
|
|
++#include <linux/dma-contiguous.h>
|
|
|
++#include <linux/dma-noncoherent.h>
|
|
|
++#include <linux/genalloc.h>
|
|
|
++#include <linux/highmem.h>
|
|
|
++#include <linux/io.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/scatterlist.h>
|
|
|
++#include <linux/types.h>
|
|
|
++#include <linux/version.h>
|
|
|
++#include <asm/cache.h>
|
|
|
++
|
|
|
++void arch_dma_prep_coherent(struct page *page, size_t size)
|
|
|
++{
|
|
|
++ void *ptr = page_address(page);
|
|
|
++
|
|
|
++ memset(ptr, 0, size);
|
|
|
++ dma_wbinv_range(page_to_phys(page), page_to_phys(page) + size);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void cache_op(phys_addr_t paddr, size_t size,
|
|
|
++ void (*fn)(unsigned long start, unsigned long end))
|
|
|
++{
|
|
|
++ unsigned long start = (unsigned long)paddr;
|
|
|
++
|
|
|
++ fn(start, start + size);
|
|
|
++}
|
|
|
++
|
|
|
++void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|
|
++ size_t size, enum dma_data_direction dir)
|
|
|
++{
|
|
|
++ switch (dir) {
|
|
|
++ case DMA_TO_DEVICE:
|
|
|
++ cache_op(paddr, size, dma_wb_range);
|
|
|
++ break;
|
|
|
++ case DMA_FROM_DEVICE:
|
|
|
++ case DMA_BIDIRECTIONAL:
|
|
|
++ cache_op(paddr, size, dma_wbinv_range);
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ BUG();
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|
|
++ size_t size, enum dma_data_direction dir)
|
|
|
++{
|
|
|
++ switch (dir) {
|
|
|
++ case DMA_TO_DEVICE:
|
|
|
++ return;
|
|
|
++ case DMA_FROM_DEVICE:
|
|
|
++ case DMA_BIDIRECTIONAL:
|
|
|
++ cache_op(paddr, size, dma_wbinv_range);
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ BUG();
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
|
|
++ unsigned long attrs)
|
|
|
++{
|
|
|
++ if (attrs & DMA_ATTR_WRITE_COMBINE)
|
|
|
++ return pgprot_writecombine(prot);
|
|
|
++ return pgprot_noncached(prot);
|
|
|
++}
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/fault.c kernel/arch/riscv/mm/fault.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/fault.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/fault.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -232,7 +232,7 @@
|
|
|
+ * of a task switch.
|
|
|
+ */
|
|
|
+ index = pgd_index(addr);
|
|
|
+- pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
|
|
|
++ pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP) & SATP_PPN) + index;
|
|
|
+ pgd_k = init_mm.pgd + index;
|
|
|
+
|
|
|
+ if (!pgd_present(*pgd_k))
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/init.c kernel/arch/riscv/mm/init.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/init.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/init.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -13,6 +13,8 @@
|
|
|
+ #include <linux/of_fdt.h>
|
|
|
+ #include <linux/libfdt.h>
|
|
|
+
|
|
|
++#include <linux/crash_dump.h>
|
|
|
++
|
|
|
+ #include <asm/fixmap.h>
|
|
|
+ #include <asm/tlbflush.h>
|
|
|
+ #include <asm/sections.h>
|
|
|
+@@ -27,6 +29,167 @@
|
|
|
+
|
|
|
+ extern char _start[];
|
|
|
+
|
|
|
++#ifdef CONFIG_KEXEC_CORE
|
|
|
++static void __init reserve_crashkernel(void)
|
|
|
++{
|
|
|
++ unsigned long long crash_base, crash_size;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
|
|
++ &crash_size, &crash_base);
|
|
|
++ if (ret || !crash_size)
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (crash_base == 0) {
|
|
|
++ crash_base = memblock_find_in_range(0, __pfn_to_phys(max_low_pfn)-1,
|
|
|
++ crash_size, SZ_2M);
|
|
|
++ pr_debug("crash_base: 0x%llx\n", crash_base);
|
|
|
++ }
|
|
|
++
|
|
|
++ memblock_reserve(crash_base, crash_size);
|
|
|
++
|
|
|
++ pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
|
|
|
++ crash_base, crash_base + crash_size, crash_size >> 20);
|
|
|
++
|
|
|
++ crashk_res.start = crash_base;
|
|
|
++ crashk_res.end = crash_base + crash_size - 1;
|
|
|
++}
|
|
|
++#else
|
|
|
++static void __init reserve_crashkernel(void)
|
|
|
++{
|
|
|
++}
|
|
|
++#endif /* CONFIG_KEXEC_CORE */
|
|
|
++
|
|
|
++#ifdef CONFIG_CRASH_DUMP
|
|
|
++static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
|
|
|
++ const char *uname, int depth, void *data)
|
|
|
++{
|
|
|
++ const __be32 *reg;
|
|
|
++ int len;
|
|
|
++
|
|
|
++ if (depth != 1 || strcmp(uname, "chosen") != 0)
|
|
|
++ return 0;
|
|
|
++
|
|
|
++ reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
|
|
|
++ if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
|
|
|
++ return 1;
|
|
|
++
|
|
|
++ elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
|
|
|
++ elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
|
|
|
++
|
|
|
++ return 1;
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * reserve_elfcorehdr() - reserves memory for elf core header
|
|
|
++ *
|
|
|
++ * This function reserves the memory occupied by an elf core header
|
|
|
++ * described in the device tree. This region contains all the
|
|
|
++ * information about primary kernel's core image and is used by a dump
|
|
|
++ * capture kernel to access the system memory on primary kernel.
|
|
|
++ */
|
|
|
++static void __init reserve_elfcorehdr(void)
|
|
|
++{
|
|
|
++ of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
|
|
|
++
|
|
|
++ if (!elfcorehdr_size)
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
|
|
|
++ pr_warn("elfcorehdr is overlapped\n");
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
|
|
|
++
|
|
|
++ pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
|
|
|
++ elfcorehdr_size >> 10, elfcorehdr_addr);
|
|
|
++}
|
|
|
++#else
|
|
|
++static void __init reserve_elfcorehdr(void)
|
|
|
++{
|
|
|
++}
|
|
|
++#endif /* CONFIG_CRASH_DUMP */
|
|
|
++
|
|
|
++/*
|
|
|
++ * Standard memory resources
|
|
|
++ */
|
|
|
++static struct resource mem_res[] = {
|
|
|
++ {
|
|
|
++ .name = "Kernel code",
|
|
|
++ .start = 0,
|
|
|
++ .end = 0,
|
|
|
++ .flags = IORESOURCE_SYSTEM_RAM
|
|
|
++ },
|
|
|
++ {
|
|
|
++ .name = "Kernel data",
|
|
|
++ .start = 0,
|
|
|
++ .end = 0,
|
|
|
++ .flags = IORESOURCE_SYSTEM_RAM
|
|
|
++ }
|
|
|
++};
|
|
|
++
|
|
|
++#define kernel_code mem_res[0]
|
|
|
++#define kernel_data mem_res[1]
|
|
|
++
|
|
|
++static int num_standard_resources;
|
|
|
++struct resource *standard_resources;
|
|
|
++
|
|
|
++extern char _start[];
|
|
|
++static void __init request_standard_resources(void)
|
|
|
++{
|
|
|
++ struct memblock_region *region;
|
|
|
++ struct resource *res;
|
|
|
++ unsigned long i = 0;
|
|
|
++ size_t res_size;
|
|
|
++
|
|
|
++ kernel_code.start = __pa_symbol(_start);
|
|
|
++ kernel_code.end = __pa_symbol(__init_end - 1);
|
|
|
++ kernel_data.start = __pa_symbol(_sdata);
|
|
|
++ kernel_data.end = __pa_symbol(_end - 1);
|
|
|
++
|
|
|
++ num_standard_resources = memblock.memory.cnt;
|
|
|
++ res_size = num_standard_resources * sizeof(*standard_resources);
|
|
|
++ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
|
|
|
++ if (!standard_resources)
|
|
|
++ panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
|
|
|
++
|
|
|
++ for_each_memblock(memory, region) {
|
|
|
++ res = &standard_resources[i++];
|
|
|
++ if (memblock_is_nomap(region)) {
|
|
|
++ res->name = "reserved";
|
|
|
++ res->flags = IORESOURCE_MEM;
|
|
|
++ } else {
|
|
|
++ res->name = "System RAM";
|
|
|
++ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
|
|
++ }
|
|
|
++ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
|
|
|
++ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
|
|
|
++
|
|
|
++ request_resource(&iomem_resource, res);
|
|
|
++
|
|
|
++ if (kernel_code.start >= res->start &&
|
|
|
++ kernel_code.end <= res->end)
|
|
|
++ request_resource(res, &kernel_code);
|
|
|
++ if (kernel_data.start >= res->start &&
|
|
|
++ kernel_data.end <= res->end)
|
|
|
++ request_resource(res, &kernel_data);
|
|
|
++#ifdef CONFIG_KEXEC_CORE
|
|
|
++ /* Userspace will find "Crash kernel" region in /proc/iomem. */
|
|
|
++ if (crashk_res.end && crashk_res.start >= res->start &&
|
|
|
++ crashk_res.end <= res->end)
|
|
|
++ request_resource(res, &crashk_res);
|
|
|
++#endif
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++void __init riscv_kdump_crash(void)
|
|
|
++{
|
|
|
++ reserve_crashkernel();
|
|
|
++ reserve_elfcorehdr();
|
|
|
++ request_standard_resources();
|
|
|
++}
|
|
|
++
|
|
|
+ static void __init zone_sizes_init(void)
|
|
|
+ {
|
|
|
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
|
|
|
+@@ -170,8 +333,8 @@
|
|
|
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
|
|
|
+ } else {
|
|
|
+ pte_clear(&init_mm, addr, ptep);
|
|
|
+- local_flush_tlb_page(addr);
|
|
|
+ }
|
|
|
++ local_flush_tlb_page(addr);
|
|
|
+ }
|
|
|
+
|
|
|
+ static pte_t *__init get_pte_virt(phys_addr_t pa)
|
|
|
+@@ -457,6 +620,7 @@
|
|
|
+ sparse_init();
|
|
|
+ setup_zero_page();
|
|
|
+ zone_sizes_init();
|
|
|
++ riscv_kdump_crash();
|
|
|
+ }
|
|
|
+
|
|
|
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/ioremap.c kernel/arch/riscv/mm/ioremap.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/ioremap.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/ioremap.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -50,26 +50,19 @@
|
|
|
+ return (void __iomem *)(vaddr + offset);
|
|
|
+ }
|
|
|
+
|
|
|
+-/*
|
|
|
+- * ioremap - map bus memory into CPU space
|
|
|
+- * @offset: bus address of the memory
|
|
|
+- * @size: size of the resource to map
|
|
|
+- *
|
|
|
+- * ioremap performs a platform specific sequence of operations to
|
|
|
+- * make bus memory CPU accessible via the readb/readw/readl/writeb/
|
|
|
+- * writew/writel functions and the other mmio helpers. The returned
|
|
|
+- * address is not guaranteed to be usable directly as a virtual
|
|
|
+- * address.
|
|
|
+- *
|
|
|
+- * Must be freed with iounmap.
|
|
|
+- */
|
|
|
+-void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
|
|
++void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
|
|
|
+ {
|
|
|
+- return __ioremap_caller(offset, size, PAGE_KERNEL,
|
|
|
+- __builtin_return_address(0));
|
|
|
++ return __ioremap_caller(phys_addr, size, prot,
|
|
|
++ __builtin_return_address(0));
|
|
|
+ }
|
|
|
+-EXPORT_SYMBOL(ioremap);
|
|
|
++EXPORT_SYMBOL(__ioremap);
|
|
|
+
|
|
|
++void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
|
|
|
++{
|
|
|
++ return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
|
|
|
++ __builtin_return_address(0));
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(ioremap_cache);
|
|
|
+
|
|
|
+ /**
|
|
|
+ * iounmap - Free a IO remapping
|
|
|
+@@ -82,3 +75,16 @@
|
|
|
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
|
|
+ }
|
|
|
+ EXPORT_SYMBOL(iounmap);
|
|
|
++
|
|
|
++pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
++ unsigned long size, pgprot_t vma_prot)
|
|
|
++{
|
|
|
++ if (!pfn_valid(pfn)) {
|
|
|
++ return pgprot_noncached(vma_prot);
|
|
|
++ } else if (file->f_flags & O_SYNC) {
|
|
|
++ return pgprot_writecombine(vma_prot);
|
|
|
++ }
|
|
|
++
|
|
|
++ return vma_prot;
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/Makefile kernel/arch/riscv/mm/Makefile
|
|
|
+--- linux-5.4.36/arch/riscv/mm/Makefile 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/Makefile 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -12,6 +12,9 @@
|
|
|
+ obj-y += cacheflush.o
|
|
|
+ obj-y += context.o
|
|
|
+ obj-y += sifive_l2_cache.o
|
|
|
++obj-y += dma-mapping.o
|
|
|
++obj-y += asid.o
|
|
|
++obj-y += context.o
|
|
|
+
|
|
|
+ ifeq ($(CONFIG_MMU),y)
|
|
|
+ obj-$(CONFIG_SMP) += tlbflush.o
|
|
|
+diff -Nur linux-5.4.36/arch/riscv/mm/tlbflush.c kernel/arch/riscv/mm/tlbflush.c
|
|
|
+--- linux-5.4.36/arch/riscv/mm/tlbflush.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/arch/riscv/mm/tlbflush.c 2020-09-03 06:01:13.908989796 +0000
|
|
|
+@@ -2,6 +2,106 @@
|
|
|
+
|
|
|
+ #include <linux/mm.h>
|
|
|
+ #include <linux/smp.h>
|
|
|
++
|
|
|
++#define XUANTIE
|
|
|
++#ifdef XUANTIE
|
|
|
++#include <asm/mmu_context.h>
|
|
|
++
|
|
|
++int c910_mmu_v1_flag = 0;
|
|
|
++
|
|
|
++void flush_tlb_all(void)
|
|
|
++{
|
|
|
++if (c910_mmu_v1_flag) {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++}
|
|
|
++
|
|
|
++ __asm__ __volatile__ ("sfence.vma" : : : "memory");
|
|
|
++}
|
|
|
++
|
|
|
++void flush_tlb_mm(struct mm_struct *mm)
|
|
|
++{
|
|
|
++if (c910_mmu_v1_flag) {
|
|
|
++ int newpid = cpu_asid(mm);
|
|
|
++
|
|
|
++ __asm__ __volatile__ ("sfence.vma zero, %0"
|
|
|
++ :
|
|
|
++ : "r"(newpid)
|
|
|
++ : "memory");
|
|
|
++} else {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ __asm__ __volatile__ ("sfence.vma" : : : "memory");
|
|
|
++}
|
|
|
++}
|
|
|
++
|
|
|
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
|
|
++{
|
|
|
++if (c910_mmu_v1_flag) {
|
|
|
++ int newpid = cpu_asid(vma->vm_mm);
|
|
|
++
|
|
|
++ addr &= PAGE_MASK;
|
|
|
++
|
|
|
++ __asm__ __volatile__ ("sfence.vma %0, %1"
|
|
|
++ :
|
|
|
++ : "r"(addr), "r"(newpid)
|
|
|
++ : "memory");
|
|
|
++
|
|
|
++} else {
|
|
|
++ addr &= PAGE_MASK;
|
|
|
++
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ __asm__ __volatile__ ("sfence.vma %0"
|
|
|
++ :
|
|
|
++ : "r"(addr)
|
|
|
++ : "memory");
|
|
|
++}
|
|
|
++}
|
|
|
++
|
|
|
++void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
++ unsigned long end)
|
|
|
++{
|
|
|
++ unsigned long newpid = cpu_asid(vma->vm_mm);
|
|
|
++
|
|
|
++ start &= PAGE_MASK;
|
|
|
++ end += PAGE_SIZE - 1;
|
|
|
++ end &= PAGE_MASK;
|
|
|
++
|
|
|
++if (c910_mmu_v1_flag) {
|
|
|
++ while (start < end) {
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ sync_mmu_v1();
|
|
|
++ __asm__ __volatile__ ("sfence.vma %0"
|
|
|
++ :
|
|
|
++ : "r"(start)
|
|
|
++ : "memory");
|
|
|
++ start += PAGE_SIZE;
|
|
|
++ }
|
|
|
++} else {
|
|
|
++ while (start < end) {
|
|
|
++ __asm__ __volatile__ ("sfence.vma %0, %1"
|
|
|
++ :
|
|
|
++ : "r"(start), "r"(newpid)
|
|
|
++ : "memory");
|
|
|
++ start += PAGE_SIZE;
|
|
|
++ }
|
|
|
++}
|
|
|
++}
|
|
|
++
|
|
|
++static int __init c910_mmu_v1(char *str)
|
|
|
++{
|
|
|
++ c910_mmu_v1_flag = 1;
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++early_param("c910_mmu_v1", c910_mmu_v1);
|
|
|
++EXPORT_SYMBOL(c910_mmu_v1_flag);
|
|
|
++
|
|
|
++#else
|
|
|
+ #include <asm/sbi.h>
|
|
|
+
|
|
|
+ void flush_tlb_all(void)
|
|
|
+@@ -33,3 +133,4 @@
|
|
|
+ {
|
|
|
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
|
|
|
+ }
|
|
|
++#endif
|
|
|
+diff -Nur linux-5.4.36/drivers/i2c/busses/i2c-designware-master.c kernel/drivers/i2c/busses/i2c-designware-master.c
|
|
|
+--- linux-5.4.36/drivers/i2c/busses/i2c-designware-master.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/i2c/busses/i2c-designware-master.c 2020-09-03 06:01:15.387989734 +0000
|
|
|
+@@ -54,6 +54,10 @@
|
|
|
+ /* Calculate SCL timing parameters for standard mode if not set */
|
|
|
+ if (!dev->ss_hcnt || !dev->ss_lcnt) {
|
|
|
+ ic_clk = i2c_dw_clk_rate(dev);
|
|
|
++ /* Fixme begin: If can't get ic_clk from devicetree */
|
|
|
++ if (ic_clk == 0)
|
|
|
++ ic_clk = 50000; // unit: khz Fix
|
|
|
++ /* Fixme end */
|
|
|
+ dev->ss_hcnt =
|
|
|
+ i2c_dw_scl_hcnt(ic_clk,
|
|
|
+ 4000, /* tHD;STA = tHIGH = 4.0 us */
|
|
|
+diff -Nur linux-5.4.36/drivers/mmc/host/Kconfig kernel/drivers/mmc/host/Kconfig
|
|
|
+--- linux-5.4.36/drivers/mmc/host/Kconfig 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/mmc/host/Kconfig 2020-09-03 06:01:15.973989709 +0000
|
|
|
+@@ -736,7 +736,7 @@
|
|
|
+
|
|
|
+ config MMC_DW
|
|
|
+ tristate "Synopsys DesignWare Memory Card Interface"
|
|
|
+- depends on ARC || ARM || ARM64 || MIPS || COMPILE_TEST
|
|
|
++ depends on ARC || ARM || ARM64 || MIPS || RISCV || CSKY || COMPILE_TEST
|
|
|
+ help
|
|
|
+ This selects support for the Synopsys DesignWare Mobile Storage IP
|
|
|
+ block, this provides host support for SD and MMC interfaces, in both
|
|
|
+diff -Nur linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
|
|
|
+--- linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c 2020-09-03 06:01:16.358989693 +0000
|
|
|
+@@ -0,0 +1,602 @@
|
|
|
++// SPDX-License-Identifier: GPL-2.0
|
|
|
++
|
|
|
++#include <linux/module.h>
|
|
|
++#include <linux/of.h>
|
|
|
++#include <linux/of_net.h>
|
|
|
++#include <linux/platform_device.h>
|
|
|
++
|
|
|
++#include "stmmac_platform.h"
|
|
|
++
|
|
|
++/* clock registers */
|
|
|
++#define GMAC_CLK_CFG0 0x00
|
|
|
++#define GMAC_CLK_CFG1 0x04
|
|
|
++#define GMAC_CLK_CFG2 0x08
|
|
|
++#define GMAC_CLK_CFG3 0x0C
|
|
|
++#define GMAC_CLK_CFG4 0x10
|
|
|
++#define GMAC_CLK_CFG5 0x14
|
|
|
++#define GMAC_CLK_CFG6 0x18
|
|
|
++
|
|
|
++/* phy interface */
|
|
|
++#define DWMAC_PHYIF_MII_GMII 0
|
|
|
++#define DWMAC_PHYIF_RGMII 1
|
|
|
++#define DWMAC_PHYIF_RMII 4
|
|
|
++/* register bit fields, bit[3]: reserved, bit[2:0]: phy interface */
|
|
|
++#define DWMAC_PHYIF_MASK 0x7
|
|
|
++#define DWMAC_PHYIF_BIT_WIDTH 4
|
|
|
++
|
|
|
++/* TXCLK direction, 1:input, 0:output */
|
|
|
++#define TXCLK_DIR_OUTPUT 0
|
|
|
++#define TXCLK_DIR_INPUT 1
|
|
|
++
|
|
|
++#define GMAC_CLK_PLLOUT_250M 250000000
|
|
|
++#define GMAC_GMII_RGMII_RATE 125000000
|
|
|
++#define GMAC_MII_RATE 25000000
|
|
|
++/* clock divider for speed */
|
|
|
++#define GMAC_CLKDIV_125M (GMAC_CLK_PLLOUT_250M / GMAC_GMII_RGMII_RATE)
|
|
|
++#define GMAC_CLKDIV_25M (GMAC_CLK_PLLOUT_250M / GMAC_MII_RATE)
|
|
|
++
|
|
|
++struct thead_dwmac_priv_data {
|
|
|
++ int id;
|
|
|
++ void __iomem *phy_if_reg;
|
|
|
++ void __iomem *txclk_dir_reg;
|
|
|
++ void __iomem *gmac_clk_reg;
|
|
|
++ int interface;
|
|
|
++ struct clk *gmac_pll_clk;
|
|
|
++ unsigned int gmac_pll_clk_freq;
|
|
|
++};
|
|
|
++
|
|
|
++/* set GMAC PHY interface, 0:MII/GMII, 1:RGMII, 4:RMII */
|
|
|
++static void thead_dwmac_set_phy_if(struct platform_device *pdev,
|
|
|
++ void __iomem *phy_if_reg, int interface,
|
|
|
++ int devid)
|
|
|
++{
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ unsigned int phyif = PHY_INTERFACE_MODE_MII;
|
|
|
++ volatile uint32_t reg;
|
|
|
++
|
|
|
++ if (phy_if_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ switch (interface)
|
|
|
++ {
|
|
|
++ case PHY_INTERFACE_MODE_MII:
|
|
|
++ case PHY_INTERFACE_MODE_GMII:
|
|
|
++ phyif = DWMAC_PHYIF_MII_GMII;
|
|
|
++ break;
|
|
|
++ case PHY_INTERFACE_MODE_RGMII:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_TXID:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_RXID:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_ID:
|
|
|
++ phyif = DWMAC_PHYIF_RGMII;
|
|
|
++ break;
|
|
|
++ case PHY_INTERFACE_MODE_RMII:
|
|
|
++ phyif = DWMAC_PHYIF_RMII;
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ dev_err(dev, "phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ };
|
|
|
++
|
|
|
++ reg = readl(phy_if_reg);
|
|
|
++ reg &= ~(DWMAC_PHYIF_MASK << (DWMAC_PHYIF_BIT_WIDTH * devid));
|
|
|
++ reg |= (phyif & DWMAC_PHYIF_MASK) << (DWMAC_PHYIF_BIT_WIDTH * devid);
|
|
|
++ writel(reg, phy_if_reg);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * set GMAC TXCLK direction
|
|
|
++ * MII : TXCLK is input
|
|
|
++ * GMII/RGMII : TXCLK is output
|
|
|
++ */
|
|
|
++static void thead_dwmac_set_txclk_dir(struct platform_device *pdev,
|
|
|
++ void __iomem *txclk_dir_reg, int interface)
|
|
|
++{
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ unsigned int txclk_dir = TXCLK_DIR_INPUT;
|
|
|
++
|
|
|
++ if (txclk_dir_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ switch (interface)
|
|
|
++ {
|
|
|
++ case PHY_INTERFACE_MODE_MII:
|
|
|
++ case PHY_INTERFACE_MODE_RMII:
|
|
|
++ txclk_dir = TXCLK_DIR_INPUT;
|
|
|
++ break;
|
|
|
++ case PHY_INTERFACE_MODE_GMII:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_TXID:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_RXID:
|
|
|
++ case PHY_INTERFACE_MODE_RGMII_ID:
|
|
|
++ txclk_dir = TXCLK_DIR_OUTPUT;
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ dev_err(dev, "phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ };
|
|
|
++
|
|
|
++ writel(txclk_dir, txclk_dir_reg);
|
|
|
++}
|
|
|
++
|
|
|
++static void thead_dwmac_set_clk_source(struct platform_device *pdev,
|
|
|
++ void __iomem *gmac_clk_reg, int interface)
|
|
|
++{
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ volatile uint32_t reg;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++
|
|
|
++ /* RX clock source */
|
|
|
++ reg |= BIT(7); /* gmac_rx_clk_sel: extern pin */
|
|
|
++
|
|
|
++ /* TX clock source */
|
|
|
++ if (interface == PHY_INTERFACE_MODE_MII) {
|
|
|
++ reg |= BIT(1); /* gmac_tx_clk_sel: extern pin */
|
|
|
++ reg &= ~BIT(2); /* gmac_tx_clk_gbit_sel: u_tx_clk_mux */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_GMII) {
|
|
|
++ reg &= ~BIT(5); /* gmac_tx_clk_out_sel: GMAC PLL */
|
|
|
++ reg |= BIT(2); /* gmac_tx_clk_gbit_sel: GMAC PLL */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_RGMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_ID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_RXID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
++ reg &= ~BIT(5); /* gmac_tx_clk_out_sel: GMAC PLL */
|
|
|
++ reg |= BIT(2); /* gmac_tx_clk_gbit_sel: GMAC PLL */
|
|
|
++ } else {
|
|
|
++ dev_err(dev, "phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++/* set clock source */
|
|
|
++static void thead_dwmac_set_clock_delay(struct platform_device *pdev,
|
|
|
++ void __iomem *gmac_clk_reg, int interface)
|
|
|
++{
|
|
|
++ unsigned int delay;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (of_property_read_u32(pdev->dev.of_node, "rx-clk-delay",
|
|
|
++ &delay) == 0) {
|
|
|
++ /* RX clk delay */
|
|
|
++ writel(delay, gmac_clk_reg + GMAC_CLK_CFG1);
|
|
|
++ pr_info("RX clk delay: 0x%X\n", delay);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (of_property_read_u32(pdev->dev.of_node, "tx-clk-delay",
|
|
|
++ &delay) == 0) {
|
|
|
++ /* TX clk delay */
|
|
|
++ writel(delay, gmac_clk_reg + GMAC_CLK_CFG2);
|
|
|
++ pr_info("TX clk delay: 0x%X\n", delay);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/* set gmac pll divider (u_pll_clk_div) to get 250MHz clock */
|
|
|
++static void thead_dwmac_set_pll_250M(void __iomem *gmac_clk_reg, int interface,
|
|
|
++ unsigned int src_freq)
|
|
|
++{
|
|
|
++ volatile unsigned int reg;
|
|
|
++ unsigned int div = 1;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (interface == PHY_INTERFACE_MODE_MII) {
|
|
|
++ /* For MII, no internal PLL is used */
|
|
|
++ return;
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_GMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_ID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_RXID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
++
|
|
|
++ /* check clock */
|
|
|
++ if ((src_freq == 0) || (src_freq % GMAC_CLK_PLLOUT_250M != 0)) {
|
|
|
++ pr_err("error! invalid gmac pll freq %d\n", src_freq);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++ div = src_freq / GMAC_CLK_PLLOUT_250M;
|
|
|
++
|
|
|
++ /* disable pll_clk_div */
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG3);
|
|
|
++ reg &= ~BIT(31);
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG3);
|
|
|
++
|
|
|
++ /* modify divider */
|
|
|
++ writel(div, gmac_clk_reg + GMAC_CLK_CFG3);
|
|
|
++
|
|
|
++ /* enable pll_clk_div */
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG3);
|
|
|
++ reg |= BIT(31);
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG3);
|
|
|
++ } else {
|
|
|
++ pr_err("phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/* set gmac speed */
|
|
|
++static void thead_dwmac_set_speed(void __iomem *gmac_clk_reg, int interface,
|
|
|
++ unsigned int speed)
|
|
|
++{
|
|
|
++ volatile unsigned int reg;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (interface == PHY_INTERFACE_MODE_MII) {
|
|
|
++ /* For MII, no internal PLL is used */
|
|
|
++ return;
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_GMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_ID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_RXID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
++
|
|
|
++ /* disable gtx_clk_div */
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ reg &= ~BIT(31);
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * modify divider
|
|
|
++ */
|
|
|
++ /* gtx_clk_div */
|
|
|
++ if (speed == SPEED_1000) {
|
|
|
++ writel(GMAC_CLKDIV_125M, gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ } else if (speed == SPEED_100) {
|
|
|
++ writel(GMAC_CLKDIV_25M, gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ } else {
|
|
|
++ writel(GMAC_CLKDIV_25M / 10, gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ }
|
|
|
++
|
|
|
++ /* enable gtx_clk_div */
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ reg |= BIT(31);
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG4);
|
|
|
++ } else {
|
|
|
++ pr_err("phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/* enable gmac clock */
|
|
|
++static void thead_dwmac_enable_clock(struct platform_device *pdev,
|
|
|
++ void __iomem *gmac_clk_reg, int interface)
|
|
|
++{
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ volatile unsigned int reg;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++
|
|
|
++ /* enable gmac_hclk */
|
|
|
++ reg |= BIT(14);
|
|
|
++
|
|
|
++ if (interface == PHY_INTERFACE_MODE_MII) {
|
|
|
++ reg |= BIT(8); /* enable gmac_rx_clk */
|
|
|
++ reg |= BIT(3); /* enable gmac_tx_clk */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_GMII) {
|
|
|
++ reg |= BIT(8); /* enable gmac_rx_clk */
|
|
|
++ reg |= BIT(3); /* enable gmac_tx_clk */
|
|
|
++ reg |= BIT(6); /* enable gmac_tx_clk_out */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_RGMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_ID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_RXID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
++ reg |= BIT(8); /* enable gmac_rx_clk */
|
|
|
++ reg |= BIT(3); /* enable gmac_tx_clk */
|
|
|
++ reg |= BIT(6); /* enable gmac_tx_clk_out */
|
|
|
++ reg |= BIT(9); /* enable gmac_rx_clk_n */
|
|
|
++ reg |= BIT(4); /* enable gmac_tx_clk_n */
|
|
|
++ } else {
|
|
|
++ dev_err(dev, "phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++}
|
|
|
++
|
|
|
++#if 0
|
|
|
++/* disable gmac clock */
|
|
|
++static void thead_dwmac_disable_clock(struct platform_device *pdev,
|
|
|
++ void __iomem *gmac_clk_reg, int interface)
|
|
|
++{
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ volatile unsigned int reg;
|
|
|
++
|
|
|
++ if (gmac_clk_reg == NULL)
|
|
|
++ return;
|
|
|
++
|
|
|
++ reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++
|
|
|
++ /* disable gmac_hclk */
|
|
|
++ reg &= ~BIT(14);
|
|
|
++
|
|
|
++ if (interface == PHY_INTERFACE_MODE_MII) {
|
|
|
++ reg &= ~BIT(8); /* disable gmac_rx_clk */
|
|
|
++ reg &= ~BIT(3); /* disable gmac_tx_clk */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_GMII) {
|
|
|
++ reg &= ~BIT(8); /* disable gmac_rx_clk */
|
|
|
++ reg &= ~BIT(3); /* disable gmac_tx_clk */
|
|
|
++ reg &= ~BIT(6); /* disable gmac_tx_clk_out */
|
|
|
++ } else if (interface == PHY_INTERFACE_MODE_RGMII
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_ID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_RXID
|
|
|
++ || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
++ reg &= ~BIT(8); /* disable gmac_rx_clk */
|
|
|
++ reg &= ~BIT(3); /* disable gmac_tx_clk */
|
|
|
++ reg &= ~BIT(6); /* disable gmac_tx_clk_out */
|
|
|
++ reg &= ~BIT(9); /* disable gmac_rx_clk_n */
|
|
|
++ reg &= ~BIT(4); /* disable gmac_tx_clk_n */
|
|
|
++ } else {
|
|
|
++ dev_err(dev, "phy interface %d not supported\n", interface);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
|
|
|
++}
|
|
|
++#endif
|
|
|
++
|
|
|
++static int thead_dwmac_init(struct platform_device *pdev, void *bsp_priv)
|
|
|
++{
|
|
|
++ struct thead_dwmac_priv_data *thead_plat_dat = bsp_priv;
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ struct device_node *np = pdev->dev.of_node;
|
|
|
++ struct resource *res;
|
|
|
++ void __iomem *ptr;
|
|
|
++ struct clk *clktmp;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ thead_plat_dat->id = of_alias_get_id(np, "ethernet");
|
|
|
++ if (thead_plat_dat->id < 0) {
|
|
|
++ thead_plat_dat->id = 0;
|
|
|
++ }
|
|
|
++ dev_info(dev, "id: %d\n", thead_plat_dat->id);
|
|
|
++
|
|
|
++ thead_plat_dat->interface = of_get_phy_mode(dev->of_node);
|
|
|
++ dev_info(dev, "phy interface: %d\n", thead_plat_dat->interface);
|
|
|
++
|
|
|
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_if_reg");
|
|
|
++ if ((res != NULL) && (resource_type(res) == IORESOURCE_MEM)) {
|
|
|
++ ptr = devm_ioremap(dev, res->start, resource_size(res));
|
|
|
++ if (!ptr) {
|
|
|
++ dev_err(dev, "phy interface register not exist, skipped it\n");
|
|
|
++ } else {
|
|
|
++ thead_plat_dat->phy_if_reg = ptr;
|
|
|
++ }
|
|
|
++ }
|
|
|
++
|
|
|
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "txclk_dir_reg");
|
|
|
++ ptr = devm_ioremap_resource(dev, res);
|
|
|
++ if (IS_ERR(ptr)) {
|
|
|
++ dev_err(dev, "txclk_dir register not exist, skipped it\n");
|
|
|
++ } else {
|
|
|
++ thead_plat_dat->txclk_dir_reg = ptr;
|
|
|
++ }
|
|
|
++
|
|
|
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clk_mgr_reg");
|
|
|
++ ptr = devm_ioremap_resource(dev, res);
|
|
|
++ if (IS_ERR(ptr)) {
|
|
|
++ dev_err(dev, "gmac_clk register not exist, skipped it\n");
|
|
|
++ } else {
|
|
|
++ thead_plat_dat->gmac_clk_reg = ptr;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* get gmac pll clk */
|
|
|
++ clktmp = devm_clk_get(dev, "gmac_pll_clk");
|
|
|
++ if (IS_ERR(clktmp)) {
|
|
|
++ dev_err(dev, "gmac_pll_clk not exist, skipped it\n");
|
|
|
++ } else {
|
|
|
++ thead_plat_dat->gmac_pll_clk = clktmp;
|
|
|
++
|
|
|
++ ret = clk_prepare_enable(thead_plat_dat->gmac_pll_clk);
|
|
|
++ if (ret) {
|
|
|
++ dev_err(dev, "Failed to enable clk 'gmac_pll_clk'\n");
|
|
|
++ return -1;
|
|
|
++ }
|
|
|
++
|
|
|
++ thead_plat_dat->gmac_pll_clk_freq =
|
|
|
++ clk_get_rate(thead_plat_dat->gmac_pll_clk);
|
|
|
++ }
|
|
|
++
|
|
|
++ thead_dwmac_set_phy_if(pdev, thead_plat_dat->phy_if_reg,
|
|
|
++ thead_plat_dat->interface, thead_plat_dat->id);
|
|
|
++
|
|
|
++ thead_dwmac_set_txclk_dir(pdev, thead_plat_dat->txclk_dir_reg,
|
|
|
++ thead_plat_dat->interface);
|
|
|
++
|
|
|
++ thead_dwmac_set_clk_source(pdev, thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface);
|
|
|
++ thead_dwmac_set_clock_delay(pdev, thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface);
|
|
|
++
|
|
|
++ thead_dwmac_set_pll_250M(thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface,
|
|
|
++ thead_plat_dat->gmac_pll_clk_freq);
|
|
|
++
|
|
|
++ /* default speed is 1Gbps */
|
|
|
++ thead_dwmac_set_speed(thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface, SPEED_1000);
|
|
|
++
|
|
|
++ thead_dwmac_enable_clock(pdev, thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface);
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static void thead_dwmac_fix_speed(void *bsp_priv, unsigned int speed)
|
|
|
++{
|
|
|
++ struct thead_dwmac_priv_data *thead_plat_dat = bsp_priv;
|
|
|
++
|
|
|
++ thead_dwmac_set_speed(thead_plat_dat->gmac_clk_reg,
|
|
|
++ thead_plat_dat->interface, speed);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
|
|
|
++ * @mcast_bins: Multicast filtering bins
|
|
|
++ * Description:
|
|
|
++ * this function validates the number of Multicast filtering bins specified
|
|
|
++ * by the configuration through the device tree. The Synopsys GMAC supports
|
|
|
++ * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
|
|
|
++ * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
|
|
|
++ * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
|
|
|
++ * invalid and will cause the filtering algorithm to use Multicast
|
|
|
++ * promiscuous mode.
|
|
|
++ */
|
|
|
++static int dwmac1000_validate_mcast_bins(int mcast_bins)
|
|
|
++{
|
|
|
++ int x = mcast_bins;
|
|
|
++
|
|
|
++ switch (x) {
|
|
|
++ case HASH_TABLE_SIZE:
|
|
|
++ case 128:
|
|
|
++ case 256:
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ x = 0;
|
|
|
++ pr_info("Hash table entries set to unexpected value %d",
|
|
|
++ mcast_bins);
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ return x;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * dwmac1000_validate_ucast_entries - validate the Unicast address entries
|
|
|
++ * @ucast_entries: number of Unicast address entries
|
|
|
++ * Description:
|
|
|
++ * This function validates the number of Unicast address entries supported
|
|
|
++ * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
|
|
|
++ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
|
|
|
++ * logic. This function validates a valid, supported configuration is
|
|
|
++ * selected, and defaults to 1 Unicast address if an unsupported
|
|
|
++ * configuration is selected.
|
|
|
++ */
|
|
|
++static int dwmac1000_validate_ucast_entries(int ucast_entries)
|
|
|
++{
|
|
|
++ int x = ucast_entries;
|
|
|
++
|
|
|
++ switch (x) {
|
|
|
++ case 1 ... 32:
|
|
|
++ case 64:
|
|
|
++ case 128:
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ x = 1;
|
|
|
++ pr_info("Unicast table entries set to unexpected value %d\n",
|
|
|
++ ucast_entries);
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ return x;
|
|
|
++}
|
|
|
++
|
|
|
++static int thead_dwmac_probe(struct platform_device *pdev)
|
|
|
++{
|
|
|
++ struct plat_stmmacenet_data *plat_dat;
|
|
|
++ struct stmmac_resources stmmac_res;
|
|
|
++ struct thead_dwmac_priv_data *thead_plat_dat;
|
|
|
++ struct device *dev = &pdev->dev;
|
|
|
++ struct device_node *np = pdev->dev.of_node;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ thead_plat_dat = devm_kzalloc(dev, sizeof(*thead_plat_dat), GFP_KERNEL);
|
|
|
++ if (thead_plat_dat == NULL) {
|
|
|
++ dev_err(&pdev->dev, "allocate memory failed\n");
|
|
|
++ return -ENOMEM;
|
|
|
++ }
|
|
|
++
|
|
|
++ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
|
|
|
++ if (ret)
|
|
|
++ return ret;
|
|
|
++
|
|
|
++ if (pdev->dev.of_node) {
|
|
|
++ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
|
|
|
++ if (IS_ERR(plat_dat)) {
|
|
|
++ dev_err(&pdev->dev, "dt configuration failed\n");
|
|
|
++ return PTR_ERR(plat_dat);
|
|
|
++ }
|
|
|
++ } else {
|
|
|
++ plat_dat = dev_get_platdata(&pdev->dev);
|
|
|
++ if (!plat_dat) {
|
|
|
++ dev_err(&pdev->dev, "no platform data provided\n");
|
|
|
++ return -EINVAL;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Set default value for multicast hash bins */
|
|
|
++ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
|
|
|
++
|
|
|
++ /* Set default value for unicast filter entries */
|
|
|
++ plat_dat->unicast_filter_entries = 1;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Custom initialisation (if needed) */
|
|
|
++ if (plat_dat->init) {
|
|
|
++ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
|
|
|
++ if (ret)
|
|
|
++ goto err_remove_config_dt;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* populate bsp private data */
|
|
|
++ plat_dat->bsp_priv = thead_plat_dat;
|
|
|
++ plat_dat->fix_mac_speed = thead_dwmac_fix_speed;
|
|
|
++ of_property_read_u32(np, "max-frame-size", &plat_dat->maxmtu);
|
|
|
++ of_property_read_u32(np, "snps,multicast-filter-bins",
|
|
|
++ &plat_dat->multicast_filter_bins);
|
|
|
++ of_property_read_u32(np, "snps,perfect-filter-entries",
|
|
|
++ &plat_dat->unicast_filter_entries);
|
|
|
++ plat_dat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
|
|
|
++ plat_dat->unicast_filter_entries);
|
|
|
++ plat_dat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
|
|
|
++ plat_dat->multicast_filter_bins);
|
|
|
++ plat_dat->has_gmac = 1;
|
|
|
++ plat_dat->pmt = 1;
|
|
|
++
|
|
|
++ ret = thead_dwmac_init(pdev, plat_dat->bsp_priv);
|
|
|
++ if (ret)
|
|
|
++ goto err_exit;
|
|
|
++
|
|
|
++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
|
|
++ if (ret)
|
|
|
++ goto err_exit;
|
|
|
++
|
|
|
++ return 0;
|
|
|
++
|
|
|
++err_exit:
|
|
|
++ if (plat_dat->exit)
|
|
|
++ plat_dat->exit(pdev, plat_dat->bsp_priv);
|
|
|
++err_remove_config_dt:
|
|
|
++ if (pdev->dev.of_node)
|
|
|
++ stmmac_remove_config_dt(pdev, plat_dat);
|
|
|
++
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++static const struct of_device_id thead_dwmac_match[] = {
|
|
|
++ { .compatible = "thead,dwmac"},
|
|
|
++ { }
|
|
|
++};
|
|
|
++MODULE_DEVICE_TABLE(of, thead_dwmac_match);
|
|
|
++
|
|
|
++static struct platform_driver thead_dwmac_driver = {
|
|
|
++ .probe = thead_dwmac_probe,
|
|
|
++ .remove = stmmac_pltfr_remove,
|
|
|
++ .driver = {
|
|
|
++ .name = "thead_dwmac_eth",
|
|
|
++ .pm = &stmmac_pltfr_pm_ops,
|
|
|
++ .of_match_table = of_match_ptr(thead_dwmac_match),
|
|
|
++ },
|
|
|
++};
|
|
|
++module_platform_driver(thead_dwmac_driver);
|
|
|
++
|
|
|
++MODULE_DESCRIPTION("T-HEAD dwmac driver");
|
|
|
++MODULE_LICENSE("GPL v2");
|
|
|
+diff -Nur linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/Makefile kernel/drivers/net/ethernet/stmicro/stmmac/Makefile
|
|
|
+--- linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/Makefile 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/net/ethernet/stmicro/stmmac/Makefile 2020-09-03 06:01:16.356989693 +0000
|
|
|
+@@ -1,5 +1,5 @@
|
|
|
+ # SPDX-License-Identifier: GPL-2.0
|
|
|
+-obj-$(CONFIG_STMMAC_ETH) += stmmac.o
|
|
|
++obj-$(CONFIG_STMMAC_ETH) += stmmac.o dwmac-thead.o
|
|
|
+ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
|
|
|
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
|
|
|
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
|
|
|
+diff -Nur linux-5.4.36/drivers/perf/Kconfig kernel/drivers/perf/Kconfig
|
|
|
+--- linux-5.4.36/drivers/perf/Kconfig 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/perf/Kconfig 2020-09-03 06:01:16.736989677 +0000
|
|
|
+@@ -71,6 +71,14 @@
|
|
|
+ system, control logic. The PMU allows counting various events related
|
|
|
+ to DSU.
|
|
|
+
|
|
|
++config THEAD_XT_V1_PMU
|
|
|
++ bool "T-HEAD XuanTie v1 Performance Monitoring Unit"
|
|
|
++ depends on RISCV
|
|
|
++ def_bool y
|
|
|
++ help
|
|
|
++ T-HEAD XuanTie PMU support various hardware event, including cycles,
|
|
|
++ instructions, cache access/miss, LSU event and etc.
|
|
|
++
|
|
|
+ config FSL_IMX8_DDR_PMU
|
|
|
+ tristate "Freescale i.MX8 DDR perf monitor"
|
|
|
+ depends on ARCH_MXC
|
|
|
+diff -Nur linux-5.4.36/drivers/perf/Makefile kernel/drivers/perf/Makefile
|
|
|
+--- linux-5.4.36/drivers/perf/Makefile 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/perf/Makefile 2020-09-03 06:01:16.736989677 +0000
|
|
|
+@@ -12,3 +12,4 @@
|
|
|
+ obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
|
|
|
+ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
|
|
|
+ obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
|
|
|
++obj-$(CONFIG_THEAD_XT_V1_PMU) += thead_xt_pmu_v1.o
|
|
|
+diff -Nur linux-5.4.36/drivers/perf/thead_xt_pmu_v1.c kernel/drivers/perf/thead_xt_pmu_v1.c
|
|
|
+--- linux-5.4.36/drivers/perf/thead_xt_pmu_v1.c 1970-01-01 00:00:00.000000000 +0000
|
|
|
++++ kernel/drivers/perf/thead_xt_pmu_v1.c 2020-09-03 06:01:16.738989677 +0000
|
|
|
+@@ -0,0 +1,768 @@
|
|
|
++// SPDX-License-Identifier: GPL-2.0
|
|
|
++/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
|
|
|
++
|
|
|
++#include <linux/errno.h>
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/module.h>
|
|
|
++#include <linux/of.h>
|
|
|
++#include <linux/perf_event.h>
|
|
|
++#include <linux/platform_device.h>
|
|
|
++#include <linux/smp.h>
|
|
|
++#include <asm/perf_event.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++#define RISCV_PMU_CYCLE 0
|
|
|
++#define RISCV_PMU_TIME 1
|
|
|
++#undef RISCV_PMU_INSTRET
|
|
|
++#define RISCV_PMU_INSTRET 2
|
|
|
++#define RISCV_PMU_L1ICAC 3 /* ICache Access */
|
|
|
++#define RISCV_PMU_L1ICMC 4 /* ICache Miss */
|
|
|
++#define RISCV_PMU_IUTLBMC 5 /* I-UTLB Miss */
|
|
|
++#define RISCV_PMU_DUTLBMC 6 /* D-UTLB Miss */
|
|
|
++#define RISCV_PMU_JTLBMC 7 /* JTLB Miss Counter */
|
|
|
++
|
|
|
++#define RISCV_PMU_CBMC 8 /* Cond-br-mispredict */
|
|
|
++#define RISCV_PMU_CBIC 9 /* Cond-br-instruction */
|
|
|
++#define RISCV_PMU_IBMC 10 /* Indirect Branch Mispredict */
|
|
|
++#define RISCV_PMU_IBIC 11 /* Indirect Branch Instruction */
|
|
|
++#define RISCV_PMU_LSUSFC 12 /* LSU Spec Fail */
|
|
|
++#define RISCV_PMU_STC 13 /* Store Instruction */
|
|
|
++
|
|
|
++#define RISCV_PMU_L1DCRAC 14 /* L1 DCache Read Access */
|
|
|
++#define RISCV_PMU_L1DCRMC 15 /* L1 DCache Read Miss */
|
|
|
++#define RISCV_PMU_L1DCWAC 16 /* L1 DCache Write Access */
|
|
|
++#define RISCV_PMU_L1DCWMC 17 /* L1 DCache Write Miss */
|
|
|
++
|
|
|
++#define RISCV_PMU_L2CRAC 18 /* L2 Cache Read Access */
|
|
|
++#define RISCV_PMU_L2CRMC 19 /* L2 Cache Read Miss */
|
|
|
++#define RISCV_PMU_L2CWAC 20 /* L2 Cache Write Access */
|
|
|
++#define RISCV_PMU_L2CWMC 21 /* L2 Cache Write Miss */
|
|
|
++
|
|
|
++#define RISCV_PMU_RFLFC 22 /* RF Launch Fail */
|
|
|
++#define RISCV_PMU_RFRLFC 23 /* RF Reg Launch Fail */
|
|
|
++#define RISCV_PMU_RFIC 24 /* RF Instruction */
|
|
|
++
|
|
|
++#define RISCV_PMU_LSUC4SC 25 /* LSU Cross 4K Stall */
|
|
|
++#define RISCV_PMU_LSUOSC 26 /* LSU Other Stall */
|
|
|
++#define RISCV_PMU_LSUSQDC 27 /* LSU SQ Discard */
|
|
|
++#define RISCV_PMU_LSUSQDDC 28 /* LSU SQ Data Discard */
|
|
|
++
|
|
|
++#define SCOUNTERINTEN 0x5c4
|
|
|
++#define SCOUNTEROF 0x5c5
|
|
|
++#define SCOUNTERBASE 0x5e0
|
|
|
++
|
|
|
++#define WRITE_COUNTER(idx, value) \
|
|
|
++ csr_write(SCOUNTERBASE + idx, value)
|
|
|
++
|
|
|
++/* The events for a given PMU register set. */
|
|
|
++struct pmu_hw_events {
|
|
|
++ /*
|
|
|
++ * The events that are active on the PMU for the given index.
|
|
|
++ */
|
|
|
++ struct perf_event *events[RISCV_MAX_COUNTERS];
|
|
|
++
|
|
|
++ /*
|
|
|
++ * A 1 bit for an index indicates that the counter is being used for
|
|
|
++ * an event. A 0 means that the counter can be used.
|
|
|
++ */
|
|
|
++ unsigned long used_mask[BITS_TO_LONGS(RISCV_MAX_COUNTERS)];
|
|
|
++};
|
|
|
++
|
|
|
++static struct riscv_pmu_t {
|
|
|
++ struct pmu pmu;
|
|
|
++ struct pmu_hw_events __percpu *hw_events;
|
|
|
++ struct platform_device *plat_device;
|
|
|
++ u64 max_period;
|
|
|
++} riscv_pmu;
|
|
|
++
|
|
|
++/*
|
|
|
++ * Hardware & cache maps and their methods
|
|
|
++ */
|
|
|
++
|
|
|
++static const int riscv_hw_event_map[] = {
|
|
|
++ [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE,
|
|
|
++ [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET,
|
|
|
++
|
|
|
++ [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_PMU_L1ICAC,
|
|
|
++ [PERF_COUNT_HW_CACHE_MISSES] = RISCV_PMU_L1ICMC,
|
|
|
++
|
|
|
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_PMU_CBIC,
|
|
|
++ [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_PMU_CBMC,
|
|
|
++
|
|
|
++ [PERF_COUNT_HW_BUS_CYCLES] = RISCV_PMU_IBMC,
|
|
|
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = RISCV_PMU_IBIC,
|
|
|
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = RISCV_PMU_LSUSFC,
|
|
|
++ [PERF_COUNT_HW_REF_CPU_CYCLES] = RISCV_PMU_STC,
|
|
|
++};
|
|
|
++
|
|
|
++#define C(x) PERF_COUNT_HW_CACHE_##x
|
|
|
++static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
|
|
++[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
++[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
|
|
++ [C(L1D)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_PMU_L1DCRAC,
|
|
|
++ [C(RESULT_MISS)] = RISCV_PMU_L1DCRMC,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_PMU_L1DCWAC,
|
|
|
++ [C(RESULT_MISS)] = RISCV_PMU_L1DCWMC,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(L1I)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(LL)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_PMU_L2CRAC,
|
|
|
++ [C(RESULT_MISS)] = RISCV_PMU_L2CRMC,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_PMU_L2CWAC,
|
|
|
++ [C(RESULT_MISS)] = RISCV_PMU_L2CWMC,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(DTLB)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(ITLB)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(BPU)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++ [C(NODE)] = {
|
|
|
++ [C(OP_READ)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_WRITE)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ [C(OP_PREFETCH)] = {
|
|
|
++ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
|
|
|
++ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
|
|
|
++ },
|
|
|
++ },
|
|
|
++};
|
|
|
++
|
|
|
++/*
|
|
|
++ * Low-level functions: reading/writing counters
|
|
|
++ */
|
|
|
++static inline u64 read_counter(int idx)
|
|
|
++{
|
|
|
++ u64 val = 0;
|
|
|
++
|
|
|
++ switch (idx) {
|
|
|
++ case RISCV_PMU_CYCLE:
|
|
|
++ val = csr_read(cycle);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_INSTRET:
|
|
|
++ val = csr_read(instret);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1ICAC:
|
|
|
++ val = csr_read(hpmcounter3);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1ICMC:
|
|
|
++ val = csr_read(hpmcounter4);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IUTLBMC:
|
|
|
++ val = csr_read(hpmcounter5);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_DUTLBMC:
|
|
|
++ val = csr_read(hpmcounter6);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_JTLBMC:
|
|
|
++ val = csr_read(hpmcounter7);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_CBMC:
|
|
|
++ val = csr_read(hpmcounter8);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_CBIC:
|
|
|
++ val = csr_read(hpmcounter9);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IBMC:
|
|
|
++ val = csr_read(hpmcounter10);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IBIC:
|
|
|
++ val = csr_read(hpmcounter11);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSFC:
|
|
|
++ val = csr_read(hpmcounter12);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_STC:
|
|
|
++ val = csr_read(hpmcounter13);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCRAC:
|
|
|
++ val = csr_read(hpmcounter14);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCRMC:
|
|
|
++ val = csr_read(hpmcounter15);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCWAC:
|
|
|
++ val = csr_read(hpmcounter16);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCWMC:
|
|
|
++ val = csr_read(hpmcounter17);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CRAC:
|
|
|
++ val = csr_read(hpmcounter18);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CRMC:
|
|
|
++ val = csr_read(hpmcounter19);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CWAC:
|
|
|
++ val = csr_read(hpmcounter20);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CWMC:
|
|
|
++ val = csr_read(hpmcounter21);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFLFC:
|
|
|
++ val = csr_read(hpmcounter22);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFRLFC:
|
|
|
++ val = csr_read(hpmcounter23);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFIC:
|
|
|
++ val = csr_read(hpmcounter24);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUC4SC:
|
|
|
++ val = csr_read(hpmcounter25);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUOSC:
|
|
|
++ val = csr_read(hpmcounter26);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSQDC:
|
|
|
++ val = csr_read(hpmcounter27);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSQDDC:
|
|
|
++ val = csr_read(hpmcounter28);
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
|
|
|
++ return -EINVAL;
|
|
|
++ }
|
|
|
++
|
|
|
++ return val;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void write_counter(int idx, u64 value)
|
|
|
++{
|
|
|
++ switch (idx) {
|
|
|
++ case RISCV_PMU_CYCLE:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_CYCLE, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_INSTRET:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_INSTRET, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1ICAC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1ICAC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1ICMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1ICMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IUTLBMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_IUTLBMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_DUTLBMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_DUTLBMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_JTLBMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_JTLBMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_CBMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_CBMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_CBIC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_CBIC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IBMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_IBMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_IBIC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_IBIC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSFC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_LSUSFC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_STC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_STC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCRAC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1DCRAC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCRMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1DCRMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCWAC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1DCWAC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L1DCWMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L1DCWMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CRAC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L2CRAC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CRMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L2CRMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CWAC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L2CWAC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_L2CWMC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_L2CWMC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFLFC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_RFLFC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFRLFC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_RFRLFC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_RFIC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_RFIC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUC4SC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_LSUC4SC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUOSC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_LSUOSC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSQDC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_LSUSQDC, value);
|
|
|
++ break;
|
|
|
++ case RISCV_PMU_LSUSQDDC:
|
|
|
++ WRITE_COUNTER(RISCV_PMU_LSUSQDDC, value);
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++int riscv_pmu_event_is_frequent(int idx)
|
|
|
++{
|
|
|
++ return idx >= RISCV_PMU_CYCLE &&
|
|
|
++ idx <= RISCV_PMU_L1DCWMC;
|
|
|
++}
|
|
|
++
|
|
|
++int riscv_pmu_event_set_period(struct perf_event *event)
|
|
|
++{
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++ s64 left = local64_read(&hwc->period_left);
|
|
|
++ s64 period = hwc->sample_period;
|
|
|
++ int ret = 0;
|
|
|
++
|
|
|
++ if (period < 4096 && period != 0 &&
|
|
|
++ riscv_pmu_event_is_frequent(hwc->idx)) {
|
|
|
++ hwc->sample_period = period = 4096;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (unlikely(left <= -period)) {
|
|
|
++ left = period;
|
|
|
++ local64_set(&hwc->period_left, left);
|
|
|
++ hwc->last_period = period;
|
|
|
++ ret = 1;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (unlikely(left <= 0)) {
|
|
|
++ left += period;
|
|
|
++ local64_set(&hwc->period_left, left);
|
|
|
++ hwc->last_period = period;
|
|
|
++ ret = 1;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (left < 0)
|
|
|
++ left = riscv_pmu.max_period;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * The hw event starts counting from this event offset,
|
|
|
++ * mark it to be able to extract future "deltas":
|
|
|
++ */
|
|
|
++ local64_set(&hwc->prev_count, (u64)(-left));
|
|
|
++ csr_write(SCOUNTEROF, csr_read(SCOUNTEROF) & ~BIT(hwc->idx));
|
|
|
++ write_counter(hwc->idx, (u64)(-left));
|
|
|
++
|
|
|
++ perf_event_update_userpage(event);
|
|
|
++
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_perf_event_update(struct perf_event *event,
|
|
|
++ struct hw_perf_event *hwc)
|
|
|
++{
|
|
|
++ uint64_t prev_raw_count = local64_read(&hwc->prev_count);
|
|
|
++ /*
|
|
|
++ * Sign extend count value to 64bit, otherwise delta calculation
|
|
|
++ * would be incorrect when overflow occurs.
|
|
|
++ */
|
|
|
++ uint64_t new_raw_count = read_counter(hwc->idx);
|
|
|
++ int64_t delta = new_raw_count - prev_raw_count;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * We aren't afraid of hwc->prev_count changing beneath our feet
|
|
|
++ * because there's no way for us to re-enter this function anytime.
|
|
|
++ */
|
|
|
++ local64_set(&hwc->prev_count, new_raw_count);
|
|
|
++ local64_add(delta, &event->count);
|
|
|
++ local64_sub(delta, &hwc->period_left);
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_read(struct perf_event *event)
|
|
|
++{
|
|
|
++ riscv_perf_event_update(event, &event->hw);
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_pmu_cache_event(u64 config)
|
|
|
++{
|
|
|
++ unsigned int cache_type, cache_op, cache_result;
|
|
|
++
|
|
|
++ cache_type = (config >> 0) & 0xff;
|
|
|
++ cache_op = (config >> 8) & 0xff;
|
|
|
++ cache_result = (config >> 16) & 0xff;
|
|
|
++
|
|
|
++ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
|
|
|
++ return -EINVAL;
|
|
|
++ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
|
|
|
++ return -EINVAL;
|
|
|
++ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
|
|
++ return -EINVAL;
|
|
|
++
|
|
|
++ return riscv_cache_event_map[cache_type][cache_op][cache_result];
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_pmu_event_init(struct perf_event *event)
|
|
|
++{
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ switch (event->attr.type) {
|
|
|
++ case PERF_TYPE_HARDWARE:
|
|
|
++ if (event->attr.config >= PERF_COUNT_HW_MAX)
|
|
|
++ return -ENOENT;
|
|
|
++ ret = riscv_hw_event_map[event->attr.config];
|
|
|
++ if (ret == RISCV_OP_UNSUPP)
|
|
|
++ return -ENOENT;
|
|
|
++ hwc->idx = ret;
|
|
|
++ break;
|
|
|
++ case PERF_TYPE_HW_CACHE:
|
|
|
++ ret = riscv_pmu_cache_event(event->attr.config);
|
|
|
++ if (ret == RISCV_OP_UNSUPP)
|
|
|
++ return -ENOENT;
|
|
|
++ hwc->idx = ret;
|
|
|
++ break;
|
|
|
++ case PERF_TYPE_RAW:
|
|
|
++ if (event->attr.config < 0 || event->attr.config >
|
|
|
++ RISCV_MAX_COUNTERS)
|
|
|
++ return -ENOENT;
|
|
|
++ hwc->idx = event->attr.config;
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ return -ENOENT;
|
|
|
++ }
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_enable(struct pmu *pmu)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++/* stops all counters */
|
|
|
++static void riscv_pmu_disable(struct pmu *pmu)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_start(struct perf_event *event, int flags)
|
|
|
++{
|
|
|
++ unsigned long flg;
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++ int idx = hwc->idx;
|
|
|
++
|
|
|
++ if (WARN_ON_ONCE(idx == -1))
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (flags & PERF_EF_RELOAD)
|
|
|
++ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
|
|
++
|
|
|
++ hwc->state = 0;
|
|
|
++
|
|
|
++ riscv_pmu_event_set_period(event);
|
|
|
++
|
|
|
++ local_irq_save(flg);
|
|
|
++
|
|
|
++ csr_write(SCOUNTERINTEN, BIT(idx) | csr_read(SCOUNTERINTEN));
|
|
|
++
|
|
|
++ local_irq_restore(flg);
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_stop_event(struct perf_event *event)
|
|
|
++{
|
|
|
++ unsigned long flg;
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++ int idx = hwc->idx;
|
|
|
++
|
|
|
++ local_irq_save(flg);
|
|
|
++
|
|
|
++ csr_write(SCOUNTERINTEN, ~BIT(idx) & csr_read(SCOUNTERINTEN));
|
|
|
++
|
|
|
++ local_irq_restore(flg);
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_stop(struct perf_event *event, int flags)
|
|
|
++{
|
|
|
++ if (!(event->hw.state & PERF_HES_STOPPED)) {
|
|
|
++ riscv_pmu_stop_event(event);
|
|
|
++ event->hw.state |= PERF_HES_STOPPED;
|
|
|
++ }
|
|
|
++
|
|
|
++ if ((flags & PERF_EF_UPDATE) &&
|
|
|
++ !(event->hw.state & PERF_HES_UPTODATE)) {
|
|
|
++ riscv_perf_event_update(event, &event->hw);
|
|
|
++ event->hw.state |= PERF_HES_UPTODATE;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_del(struct perf_event *event, int flags)
|
|
|
++{
|
|
|
++ struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++
|
|
|
++ riscv_pmu_stop(event, PERF_EF_UPDATE);
|
|
|
++
|
|
|
++ hw_events->events[hwc->idx] = NULL;
|
|
|
++
|
|
|
++ perf_event_update_userpage(event);
|
|
|
++}
|
|
|
++
|
|
|
++/* allocate hardware counter and optionally start counting */
|
|
|
++static int riscv_pmu_add(struct perf_event *event, int flags)
|
|
|
++{
|
|
|
++ struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
|
|
|
++ struct hw_perf_event *hwc = &event->hw;
|
|
|
++
|
|
|
++ hw_events->events[hwc->idx] = event;
|
|
|
++
|
|
|
++ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
|
|
++
|
|
|
++ if (flags & PERF_EF_START)
|
|
|
++ riscv_pmu_start(event, PERF_EF_RELOAD);
|
|
|
++
|
|
|
++ perf_event_update_userpage(event);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++irqreturn_t riscv_pmu_handle_irq(void)
|
|
|
++{
|
|
|
++ struct perf_sample_data data;
|
|
|
++ struct pmu_hw_events *cpuc = this_cpu_ptr(riscv_pmu.hw_events);
|
|
|
++ struct pt_regs *regs;
|
|
|
++ int idx;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Did an overflow occur?
|
|
|
++ */
|
|
|
++ if (!csr_read(SCOUNTEROF))
|
|
|
++ return IRQ_NONE;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Handle the counter(s) overflow(s)
|
|
|
++ */
|
|
|
++ regs = get_irq_regs();
|
|
|
++
|
|
|
++ for (idx = 0; idx < RISCV_MAX_COUNTERS; ++idx) {
|
|
|
++ struct perf_event *event = cpuc->events[idx];
|
|
|
++ struct hw_perf_event *hwc;
|
|
|
++
|
|
|
++ /* Ignore if we don't have an event. */
|
|
|
++ if (!event)
|
|
|
++ continue;
|
|
|
++ /*
|
|
|
++ * We have a single interrupt for all counters. Check that
|
|
|
++ * each counter has overflowed before we process it.
|
|
|
++ */
|
|
|
++ if (!(csr_read(SCOUNTEROF) & BIT(idx)))
|
|
|
++ continue;
|
|
|
++
|
|
|
++ hwc = &event->hw;
|
|
|
++ riscv_perf_event_update(event, &event->hw);
|
|
|
++ perf_sample_data_init(&data, 0, hwc->last_period);
|
|
|
++ riscv_pmu_event_set_period(event);
|
|
|
++
|
|
|
++ if (perf_event_overflow(event, &data, regs))
|
|
|
++ riscv_pmu_stop_event(event);
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Handle the pending perf events.
|
|
|
++ *
|
|
|
++ * Note: this call *must* be run with interrupts disabled. For
|
|
|
++ * platforms that can have the PMU interrupts raised as an NMI, this
|
|
|
++ * will not work.
|
|
|
++ */
|
|
|
++ irq_work_run();
|
|
|
++
|
|
|
++ return IRQ_HANDLED;
|
|
|
++}
|
|
|
++
|
|
|
++static void riscv_pmu_free_irq(void)
|
|
|
++{
|
|
|
++ int irq;
|
|
|
++ struct platform_device *pmu_device = riscv_pmu.plat_device;
|
|
|
++
|
|
|
++ irq = platform_get_irq(pmu_device, 0);
|
|
|
++ if (irq >= 0)
|
|
|
++ free_percpu_irq(irq, this_cpu_ptr(riscv_pmu.hw_events));
|
|
|
++}
|
|
|
++
|
|
|
++static int init_hw_perf_events(void)
|
|
|
++{
|
|
|
++ riscv_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
|
|
|
++ GFP_KERNEL);
|
|
|
++ if (!riscv_pmu.hw_events) {
|
|
|
++ pr_info("failed to allocate per-cpu PMU data.\n");
|
|
|
++ return -ENOMEM;
|
|
|
++ }
|
|
|
++
|
|
|
++ riscv_pmu.pmu = (struct pmu) {
|
|
|
++ .pmu_enable = riscv_pmu_enable,
|
|
|
++ .pmu_disable = riscv_pmu_disable,
|
|
|
++ .event_init = riscv_pmu_event_init,
|
|
|
++ .add = riscv_pmu_add,
|
|
|
++ .del = riscv_pmu_del,
|
|
|
++ .start = riscv_pmu_start,
|
|
|
++ .stop = riscv_pmu_stop,
|
|
|
++ .read = riscv_pmu_read,
|
|
|
++ };
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_pmu_starting_cpu(unsigned int cpu)
|
|
|
++{
|
|
|
++ sbi_set_pmu(1);
|
|
|
++ csr_set(sie, SIE_SMIE);
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_pmu_dying_cpu(unsigned int cpu)
|
|
|
++{
|
|
|
++ csr_clear(sie, SIE_SMIE);
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++int riscv_pmu_device_probe(struct platform_device *pdev,
|
|
|
++ const struct of_device_id *of_table)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ ret = init_hw_perf_events();
|
|
|
++ if (ret) {
|
|
|
++ pr_notice("[perf] failed to probe PMU!\n");
|
|
|
++ return ret;
|
|
|
++ }
|
|
|
++ riscv_pmu.max_period = ULONG_MAX;
|
|
|
++ riscv_pmu.plat_device = pdev;
|
|
|
++
|
|
|
++ ret = cpuhp_setup_state(CPUHP_AP_PERF_RISCV_ONLINE, "perf riscv:online",
|
|
|
++ riscv_pmu_starting_cpu,
|
|
|
++ riscv_pmu_dying_cpu);
|
|
|
++ if (ret) {
|
|
|
++ riscv_pmu_free_irq();
|
|
|
++ free_percpu(riscv_pmu.hw_events);
|
|
|
++ return ret;
|
|
|
++ }
|
|
|
++
|
|
|
++ ret = perf_pmu_register(&riscv_pmu.pmu, "thead_xt_pmu", PERF_TYPE_RAW);
|
|
|
++ if (ret) {
|
|
|
++ riscv_pmu_free_irq();
|
|
|
++ free_percpu(riscv_pmu.hw_events);
|
|
|
++ }
|
|
|
++
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++const static struct of_device_id riscv_pmu_of_device_ids[] = {
|
|
|
++ {.compatible = "riscv,thead_xt_pmu"},
|
|
|
++ {.compatible = "riscv,c910_pmu"},
|
|
|
++ {},
|
|
|
++};
|
|
|
++
|
|
|
++static int riscv_pmu_dev_probe(struct platform_device *pdev)
|
|
|
++{
|
|
|
++ return riscv_pmu_device_probe(pdev, riscv_pmu_of_device_ids);
|
|
|
++}
|
|
|
++
|
|
|
++static struct platform_driver riscv_pmu_driver = {
|
|
|
++ .driver = {
|
|
|
++ .name = "thead_xt_pmu",
|
|
|
++ .of_match_table = riscv_pmu_of_device_ids,
|
|
|
++ },
|
|
|
++ .probe = riscv_pmu_dev_probe,
|
|
|
++};
|
|
|
++
|
|
|
++int __init riscv_pmu_probe(void)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ ret = platform_driver_register(&riscv_pmu_driver);
|
|
|
++ if (ret)
|
|
|
++ pr_notice("[perf] PMU initialization failed\n");
|
|
|
++ else
|
|
|
++ pr_notice("[perf] PMU initialization done\n");
|
|
|
++
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++device_initcall(riscv_pmu_probe);
|
|
|
+diff -Nur linux-5.4.36/drivers/rtc/rtc-xgene.c kernel/drivers/rtc/rtc-xgene.c
|
|
|
+--- linux-5.4.36/drivers/rtc/rtc-xgene.c 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/drivers/rtc/rtc-xgene.c 2020-09-14 01:45:17.783702016 +0000
|
|
|
+@@ -26,11 +26,13 @@
|
|
|
+ #define RTC_CCR_MASK BIT(1)
|
|
|
+ #define RTC_CCR_EN BIT(2)
|
|
|
+ #define RTC_CCR_WEN BIT(3)
|
|
|
++#define RTC_CCR_PSCLR BIT(4)
|
|
|
+ #define RTC_STAT 0x10
|
|
|
+ #define RTC_STAT_BIT BIT(0)
|
|
|
+ #define RTC_RSTAT 0x14
|
|
|
+ #define RTC_EOI 0x18
|
|
|
+ #define RTC_VER 0x1C
|
|
|
++#define RTC_CPSR 0x20
|
|
|
+
|
|
|
+ struct xgene_rtc_dev {
|
|
|
+ struct rtc_device *rtc;
|
|
|
+@@ -140,6 +142,7 @@
|
|
|
+ struct resource *res;
|
|
|
+ int ret;
|
|
|
+ int irq;
|
|
|
++ u32 freq;
|
|
|
+
|
|
|
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
|
+ if (!pdata)
|
|
|
+@@ -175,8 +178,15 @@
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+- /* Turn on the clock and the crystal */
|
|
|
+- writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
|
|
|
++ freq = clk_get_rate(pdata->clk);
|
|
|
++ if (freq) {
|
|
|
++ writel(freq, pdata->csr_base + RTC_CPSR);
|
|
|
++ /* Turn on the clock and prescaler counter */
|
|
|
++ writel(RTC_CCR_EN | RTC_CCR_PSCLR, pdata->csr_base + RTC_CCR);
|
|
|
++ } else {
|
|
|
++ /* Turn on the clock and the crystal */
|
|
|
++ writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
|
|
|
++ }
|
|
|
+
|
|
|
+ ret = device_init_wakeup(&pdev->dev, 1);
|
|
|
+ if (ret) {
|
|
|
+diff -Nur linux-5.4.36/include/linux/cpuhotplug.h kernel/include/linux/cpuhotplug.h
|
|
|
+--- linux-5.4.36/include/linux/cpuhotplug.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/include/linux/cpuhotplug.h 2020-09-03 06:01:17.919989627 +0000
|
|
|
+@@ -174,6 +174,7 @@
|
|
|
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
|
|
|
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
|
|
|
+ CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
|
|
|
++ CPUHP_AP_PERF_RISCV_ONLINE,
|
|
|
+ CPUHP_AP_WATCHDOG_ONLINE,
|
|
|
+ CPUHP_AP_WORKQUEUE_ONLINE,
|
|
|
+ CPUHP_AP_RCUTREE_ONLINE,
|
|
|
+diff -Nur linux-5.4.36/include/uapi/linux/elf.h kernel/include/uapi/linux/elf.h
|
|
|
+--- linux-5.4.36/include/uapi/linux/elf.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/include/uapi/linux/elf.h 2020-09-03 06:01:18.158989617 +0000
|
|
|
+@@ -428,6 +428,7 @@
|
|
|
+ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
|
|
|
+ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
|
|
|
+ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
|
|
|
++#define NT_RISCV_VECTOR 0x900 /* RISC-V vector registers */
|
|
|
+
|
|
|
+ /* Note header in a PT_NOTE section */
|
|
|
+ typedef struct elf32_note {
|
|
|
+diff -Nur linux-5.4.36/include/uapi/linux/kexec.h kernel/include/uapi/linux/kexec.h
|
|
|
+--- linux-5.4.36/include/uapi/linux/kexec.h 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/include/uapi/linux/kexec.h 2020-09-03 06:01:18.167989617 +0000
|
|
|
+@@ -42,6 +42,7 @@
|
|
|
+ #define KEXEC_ARCH_MIPS_LE (10 << 16)
|
|
|
+ #define KEXEC_ARCH_MIPS ( 8 << 16)
|
|
|
+ #define KEXEC_ARCH_AARCH64 (183 << 16)
|
|
|
++#define KEXEC_ARCH_RISCV (243 << 16)
|
|
|
+
|
|
|
+ /* The artificial cap on the number of segments passed to kexec_load. */
|
|
|
+ #define KEXEC_SEGMENT_MAX 16
|
|
|
+diff -Nur linux-5.4.36/kernel/Kconfig.hz kernel/kernel/Kconfig.hz
|
|
|
+--- linux-5.4.36/kernel/Kconfig.hz 2020-04-29 14:33:25.000000000 +0000
|
|
|
++++ kernel/kernel/Kconfig.hz 2020-09-03 06:01:18.218989615 +0000
|
|
|
+@@ -16,6 +16,8 @@
|
|
|
+ environment leading to NR_CPUS * HZ number of timer interrupts
|
|
|
+ per second.
|
|
|
+
|
|
|
++ config HZ_12
|
|
|
++ bool "12 HZ"
|
|
|
+
|
|
|
+ config HZ_100
|
|
|
+ bool "100 HZ"
|
|
|
+@@ -50,6 +52,7 @@
|
|
|
+
|
|
|
+ config HZ
|
|
|
+ int
|
|
|
++ default 12 if HZ_12
|
|
|
+ default 100 if HZ_100
|
|
|
+ default 250 if HZ_250
|
|
|
+ default 300 if HZ_300
|