core_rv64.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. /*
  2. * Copyright (C) 2017-2019 Alibaba Group Holding Limited
  3. */
  4. /******************************************************************************
  5. * @file core_rv64.h
  6. * @brief CSI RV32 Core Peripheral Access Layer Header File
  7. * @version V1.0
  8. * @date 01. Sep 2018
  9. ******************************************************************************/
  10. #ifndef __CORE_RV32_H_GENERIC
  11. #define __CORE_RV32_H_GENERIC
  12. #include <stdint.h>
  13. #ifdef __cplusplus
  14. extern "C" {
  15. #endif
  16. /*******************************************************************************
  17. * CSI definitions
  18. ******************************************************************************/
  19. /**
  20. \ingroup RV32
  21. @{
  22. */
  23. #ifndef __RV64
  24. #define __RV64 (0x01U)
  25. #endif
  26. /** __FPU_USED indicates whether an FPU is used or not.
  27. This core does not support an FPU at all
  28. */
  29. #define __FPU_USED 0U
  30. #if defined ( __GNUC__ )
  31. #if defined (__VFP_FP__) && !defined(__SOFTFP__)
  32. #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
  33. #endif
  34. #endif
  35. #ifdef __cplusplus
  36. }
  37. #endif
  38. #endif /* __CORE_RV32_H_GENERIC */
  39. #ifndef __CSI_GENERIC
  40. #ifndef __CORE_RV32_H_DEPENDANT
  41. #define __CORE_RV32_H_DEPENDANT
  42. #ifdef __cplusplus
  43. extern "C" {
  44. #endif
  45. /* check device defines and use defaults */
  46. #ifndef __RV64_REV
  47. #define __RV64_REV 0x0000U
  48. #endif
  49. #ifndef __VIC_PRIO_BITS
  50. #define __VIC_PRIO_BITS 2U
  51. #endif
  52. #ifndef __Vendor_SysTickConfig
  53. #define __Vendor_SysTickConfig 1U
  54. #endif
  55. #ifndef __MPU_PRESENT
  56. #define __MPU_PRESENT 1U
  57. #endif
  58. #ifndef __ICACHE_PRESENT
  59. #define __ICACHE_PRESENT 1U
  60. #endif
  61. #ifndef __DCACHE_PRESENT
  62. #define __DCACHE_PRESENT 1U
  63. #endif
  64. #ifndef __L2CACHE_PRESENT
  65. #define __L2CACHE_PRESENT 1U
  66. #endif
  67. #include "csi_rv64_gcc.h"
  68. /* IO definitions (access restrictions to peripheral registers) */
  69. /**
  70. \defgroup CSI_glob_defs CSI Global Defines
  71. <strong>IO Type Qualifiers</strong> are used
  72. \li to specify the access to peripheral variables.
  73. \li for automatic generation of peripheral register debug information.
  74. */
  75. #ifdef __cplusplus
  76. #define __I volatile /*!< Defines 'read only' permissions */
  77. #else
  78. #define __I volatile const /*!< Defines 'read only' permissions */
  79. #endif
  80. #define __O volatile /*!< Defines 'write only' permissions */
  81. #define __IO volatile /*!< Defines 'read / write' permissions */
  82. /* following defines should be used for structure members */
  83. #define __IM volatile const /*! Defines 'read only' structure member permissions */
  84. #define __OM volatile /*! Defines 'write only' structure member permissions */
  85. #define __IOM volatile /*! Defines 'read / write' structure member permissions */
  86. /*@} end of group C910 */
  87. /*******************************************************************************
  88. * Register Abstraction
  89. Core Register contain:
  90. - Core Register
  91. - Core CLINT Register
  92. ******************************************************************************/
  93. /**
  94. \defgroup CSI_core_register Defines and Type Definitions
  95. \brief Type definitions and defines for CK80X processor based devices.
  96. */
  97. /**
  98. \ingroup CSI_core_register
  99. \defgroup CSI_CORE Status and Control Registers
  100. \brief Core Register type definitions.
  101. @{
  102. */
  103. /**
  104. \ingroup CSI_core_register
  105. \defgroup CSI_CLINT Core-Local Interrupt Controller (CLINT)
  106. \brief Type definitions for the CLINT Registers
  107. @{
  108. */
  109. /**
  110. \brief Access to the structure of a vector interrupt controller.
  111. */
  112. typedef struct {
  113. uint32_t RESERVED0; /*!< Offset: 0x000 (R/W) CLINT configure register */
  114. __IOM uint32_t PLIC_PRIO[1023];
  115. __IOM uint32_t PLIC_IP[32];
  116. uint32_t RESERVED1[3972 / 4 - 1];
  117. __IOM uint32_t PLIC_H0_MIE[32];
  118. __IOM uint32_t PLIC_H0_SIE[32];
  119. __IOM uint32_t PLIC_H1_MIE[32];
  120. __IOM uint32_t PLIC_H1_SIE[32];
  121. __IOM uint32_t PLIC_H2_MIE[32];
  122. __IOM uint32_t PLIC_H2_SIE[32];
  123. __IOM uint32_t PLIC_H3_MIE[32];
  124. __IOM uint32_t PLIC_H3_SIE[32];
  125. uint32_t RESERVED2[(0x01FFFFC - 0x00023FC) / 4 - 1];
  126. __IOM uint32_t PLIC_PER;
  127. __IOM uint32_t PLIC_H0_MTH;
  128. __IOM uint32_t PLIC_H0_MCLAIM;
  129. uint32_t RESERVED3[0xFFC / 4 - 1];
  130. __IOM uint32_t PLIC_H0_STH;
  131. __IOM uint32_t PLIC_H0_SCLAIM;
  132. uint32_t RESERVED4[0xFFC / 4 - 1];
  133. __IOM uint32_t PLIC_H1_MTH;
  134. __IOM uint32_t PLIC_H1_MCLAIM;
  135. uint32_t RESERVED5[0xFFC / 4 - 1];
  136. __IOM uint32_t PLIC_H1_STH;
  137. __IOM uint32_t PLIC_H1_SCLAIM;
  138. uint32_t RESERVED6[0xFFC / 4 - 1];
  139. __IOM uint32_t PLIC_H2_MTH;
  140. __IOM uint32_t PLIC_H2_MCLAIM;
  141. uint32_t RESERVED7[0xFFC / 4 - 1];
  142. __IOM uint32_t PLIC_H2_STH;
  143. __IOM uint32_t PLIC_H2_SCLAIM;
  144. uint32_t RESERVED8[0xFFC / 4 - 1];
  145. __IOM uint32_t PLIC_H3_MTH;
  146. __IOM uint32_t PLIC_H3_MCLAIM;
  147. uint32_t RESERVED9[0xFFC / 4 - 1];
  148. __IOM uint32_t PLIC_H3_STH;
  149. __IOM uint32_t PLIC_H3_SCLAIM;
  150. uint32_t RESERVED10[0xFFC / 4 - 1];
  151. } PLIC_Type;
  152. /**
  153. \ingroup CSI_core_register
  154. \defgroup CSI_PMP Physical Memory Protection (PMP)
  155. \brief Type definitions for the PMP Registers
  156. @{
  157. */
  158. #define PMP_PMPCFG_R_Pos 0U /*!< PMP PMPCFG: R Position */
  159. #define PMP_PMPCFG_R_Msk (0x1UL << PMP_PMPCFG_R_Pos) /*!< PMP PMPCFG: R Mask */
  160. #define PMP_PMPCFG_W_Pos 1U /*!< PMP PMPCFG: W Position */
  161. #define PMP_PMPCFG_W_Msk (0x1UL << PMP_PMPCFG_W_Pos) /*!< PMP PMPCFG: W Mask */
  162. #define PMP_PMPCFG_X_Pos 2U /*!< PMP PMPCFG: X Position */
  163. #define PMP_PMPCFG_X_Msk (0x1UL << PMP_PMPCFG_X_Pos) /*!< PMP PMPCFG: X Mask */
  164. #define PMP_PMPCFG_A_Pos 3U /*!< PMP PMPCFG: A Position */
  165. #define PMP_PMPCFG_A_Msk (0x3UL << PMP_PMPCFG_A_Pos) /*!< PMP PMPCFG: A Mask */
  166. #define PMP_PMPCFG_L_Pos 7U /*!< PMP PMPCFG: L Position */
  167. #define PMP_PMPCFG_L_Msk (0x1UL << PMP_PMPCFG_L_Pos) /*!< PMP PMPCFG: L Mask */
  168. typedef enum {
  169. REGION_SIZE_4B = -1,
  170. REGION_SIZE_8B = 0,
  171. REGION_SIZE_16B = 1,
  172. REGION_SIZE_32B = 2,
  173. REGION_SIZE_64B = 3,
  174. REGION_SIZE_128B = 4,
  175. REGION_SIZE_256B = 5,
  176. REGION_SIZE_512B = 6,
  177. REGION_SIZE_1KB = 7,
  178. REGION_SIZE_2KB = 8,
  179. REGION_SIZE_4KB = 9,
  180. REGION_SIZE_8KB = 10,
  181. REGION_SIZE_16KB = 11,
  182. REGION_SIZE_32KB = 12,
  183. REGION_SIZE_64KB = 13,
  184. REGION_SIZE_128KB = 14,
  185. REGION_SIZE_256KB = 15,
  186. REGION_SIZE_512KB = 16,
  187. REGION_SIZE_1MB = 17,
  188. REGION_SIZE_2MB = 18,
  189. REGION_SIZE_4MB = 19,
  190. REGION_SIZE_8MB = 20,
  191. REGION_SIZE_16MB = 21,
  192. REGION_SIZE_32MB = 22,
  193. REGION_SIZE_64MB = 23,
  194. REGION_SIZE_128MB = 24,
  195. REGION_SIZE_256MB = 25,
  196. REGION_SIZE_512MB = 26,
  197. REGION_SIZE_1GB = 27,
  198. REGION_SIZE_2GB = 28,
  199. REGION_SIZE_4GB = 29,
  200. REGION_SIZE_8GB = 30,
  201. REGION_SIZE_16GB = 31
  202. } region_size_e;
  203. typedef enum {
  204. ADDRESS_MATCHING_TOR = 1,
  205. ADDRESS_MATCHING_NAPOT = 3
  206. } address_matching_e;
  207. typedef struct {
  208. uint32_t r: 1; /* readable enable */
  209. uint32_t w: 1; /* writeable enable */
  210. uint32_t x: 1; /* execable enable */
  211. address_matching_e a: 2; /* address matching mode */
  212. uint32_t reserved: 2; /* reserved */
  213. uint32_t l: 1; /* lock enable */
  214. } mpu_region_attr_t;
  215. /*@} end of group CSI_PMP */
  216. /* CACHE Register Definitions */
  217. #define CACHE_MHCR_L0BTB_Pos 12U /*!< CACHE MHCR: L0BTB Position */
  218. #define CACHE_MHCR_L0BTB_Msk (0x1UL << CACHE_MHCR_L0BTB_Pos) /*!< CACHE MHCR: WA Mask */
  219. #define CACHE_MHCR_WBR_Pos 8U /*!< CACHE MHCR: WBR Position */
  220. #define CACHE_MHCR_WBR_Msk (0x1UL << CACHE_MHCR_WBR_Pos) /*!< CACHE MHCR: WBR Mask */
  221. #define CACHE_MHCR_IBPE_Pos 7U /*!< CACHE MHCR: IBPE Position */
  222. #define CACHE_MHCR_IBPE_Msk (0x1UL << CACHE_MHCR_IBPE_Pos) /*!< CACHE MHCR: IBPE Mask */
  223. #define CACHE_MHCR_BTB_Pos 6U /*!< CACHE MHCR: BTB Position */
  224. #define CACHE_MHCR_BTB_Msk (0x1UL << CACHE_MHCR_BTB_Pos) /*!< CACHE MHCR: BTB Mask */
  225. #define CACHE_MHCR_BPE_Pos 5U /*!< CACHE MHCR: BPE Position */
  226. #define CACHE_MHCR_BPE_Msk (0x1UL << CACHE_MHCR_BPE_Pos) /*!< CACHE MHCR: BPE Mask */
  227. #define CACHE_MHCR_RS_Pos 4U /*!< CACHE MHCR: RS Position */
  228. #define CACHE_MHCR_RS_Msk (0x1UL << CACHE_MHCR_RS_Pos) /*!< CACHE MHCR: RS Mask */
  229. #define CACHE_MHCR_WB_Pos 3U /*!< CACHE MHCR: WB Position */
  230. #define CACHE_MHCR_WB_Msk (0x1UL << CACHE_MHCR_WB_Pos) /*!< CACHE MHCR: WB Mask */
  231. #define CACHE_MHCR_WA_Pos 2U /*!< CACHE MHCR: WA Position */
  232. #define CACHE_MHCR_WA_Msk (0x1UL << CACHE_MHCR_WA_Pos) /*!< CACHE MHCR: WA Mask */
  233. #define CACHE_MHCR_DE_Pos 1U /*!< CACHE MHCR: DE Position */
  234. #define CACHE_MHCR_DE_Msk (0x1UL << CACHE_MHCR_DE_Pos) /*!< CACHE MHCR: DE Mask */
  235. #define CACHE_MHCR_IE_Pos 0U /*!< CACHE MHCR: IE Position */
  236. #define CACHE_MHCR_IE_Msk (0x1UL << CACHE_MHCR_IE_Pos) /*!< CACHE MHCR: IE Mask */
  237. #define CACHE_INV_ADDR_Pos 5U
  238. #define CACHE_INV_ADDR_Msk (0xFFFFFFFFUL << CACHE_INV_ADDR_Pos)
  239. /*@} end of group CSI_CACHE */
  240. // MSTATUS Register
  241. #define MSTATUS_MPP_MASK (3L << 11) // mstatus.SPP [11:12]
  242. #define MSTATUS_MPP_M (3L << 11) // Machine mode 11
  243. #define MSTATUS_MPP_S (1L << 11) // Supervisor mode 01
  244. #define MSTATUS_MPP_U (0L << 11) // User mode 00
  245. // SSTATUS Register
  246. #define SSTATUS_SPP_MASK (3L << 8) // sstatus.SPP [8:9]
  247. #define SSTATUS_SPP_S (1L << 8) // Supervisor mode 01
  248. #define SSTATUS_SPP_U (0L << 8) // User mode 00
  249. typedef enum {
  250. USER_MODE = 0,
  251. SUPERVISOR_MODE = 1,
  252. MACHINE_MODE = 3,
  253. } cpu_work_mode_t;
  254. /**
  255. \ingroup CSI_core_register
  256. \defgroup CSI_SysTick System Tick Timer (CORET)
  257. \brief Type definitions for the System Timer Registers.
  258. @{
  259. */
  260. /**
  261. \brief The data structure of the access system timer.
  262. */
  263. typedef struct {
  264. __IOM uint32_t MSIP0;
  265. __IOM uint32_t MSIP1;
  266. __IOM uint32_t MSIP2;
  267. __IOM uint32_t MSIP3;
  268. uint32_t RESERVED0[(0x4004000 - 0x400000C) / 4 - 1];
  269. __IOM uint32_t MTIMECMPL0;
  270. __IOM uint32_t MTIMECMPH0;
  271. __IOM uint32_t MTIMECMPL1;
  272. __IOM uint32_t MTIMECMPH1;
  273. __IOM uint32_t MTIMECMPL2;
  274. __IOM uint32_t MTIMECMPH2;
  275. __IOM uint32_t MTIMECMPL3;
  276. __IOM uint32_t MTIMECMPH3;
  277. uint32_t RESERVED1[(0x400C000 - 0x400401C) / 4 - 1];
  278. __IOM uint32_t SSIP0;
  279. __IOM uint32_t SSIP1;
  280. __IOM uint32_t SSIP2;
  281. __IOM uint32_t SSIP3;
  282. uint32_t RESERVED2[(0x400D000 - 0x400C00C) / 4 - 1];
  283. __IOM uint32_t STIMECMPL0;
  284. __IOM uint32_t STIMECMPH0;
  285. __IOM uint32_t STIMECMPL1;
  286. __IOM uint32_t STIMECMPH1;
  287. __IOM uint32_t STIMECMPL2;
  288. __IOM uint32_t STIMECMPH2;
  289. __IOM uint32_t STIMECMPL3;
  290. __IOM uint32_t STIMECMPH3;
  291. } CORET_Type;
  292. /*@} end of group CSI_SysTick */
  293. /**
  294. \ingroup CSI_core_register
  295. \defgroup CSI_core_bitfield Core register bit field macros
  296. \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
  297. @{
  298. */
  299. /**
  300. \brief Mask and shift a bit field value for use in a register bit range.
  301. \param[in] field Name of the register bit field.
  302. \param[in] value Value of the bit field.
  303. \return Masked and shifted value.
  304. */
  305. #define _VAL2FLD(field, value) ((value << field ## _Pos) & field ## _Msk)
  306. /**
  307. \brief Mask and shift a register value to extract a bit filed value.
  308. \param[in] field Name of the register bit field.
  309. \param[in] value Value of register.
  310. \return Masked and shifted bit field value.
  311. */
  312. #define _FLD2VAL(field, value) ((value & field ## _Msk) >> field ## _Pos)
  313. /*@} end of group CSI_core_bitfield */
  314. /*******************************************************************************
  315. * Hardware Abstraction Layer
  316. Core Function Interface contains:
  317. - Core VIC Functions
  318. - Core CORET Functions
  319. - Core Register Access Functions
  320. ******************************************************************************/
  321. /**
  322. \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
  323. */
  324. /* ########################## VIC functions #################################### */
  325. /**
  326. \ingroup CSI_Core_FunctionInterface
  327. \defgroup CSI_Core_VICFunctions VIC Functions
  328. \brief Functions that manage interrupts and exceptions via the VIC.
  329. @{
  330. */
  331. /* The following MACROS handle generation of the register offset and byte masks */
  332. #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
  333. #define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 5UL) )
  334. #define _IP2_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
  335. /**
  336. \brief Enable External Interrupt
  337. \details Enable a device-specific interrupt in the VIC interrupt controller.
  338. \param [in] IRQn External interrupt number. Value cannot be negative.
  339. */
  340. __STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
  341. {
  342. }
  343. /**
  344. \brief Disable External Interrupt
  345. \details Disable a device-specific interrupt in the VIC interrupt controller.
  346. \param [in] IRQn External interrupt number. Value cannot be negative.
  347. */
  348. __STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
  349. {
  350. }
  351. /**
  352. \brief Enable External Secure Interrupt
  353. \details Enable a secure device-specific interrupt in the VIC interrupt controller.
  354. \param [in] IRQn External interrupt number. Value cannot be negative.
  355. */
  356. __STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
  357. {
  358. }
  359. /**
  360. \brief Disable External Secure Interrupt
  361. \details Disable a secure device-specific interrupt in the VIC interrupt controller.
  362. \param [in] IRQn External interrupt number. Value cannot be negative.
  363. */
  364. __STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
  365. {
  366. }
  367. /**
  368. \brief Check Interrupt is Enabled or not
  369. \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
  370. \param [in] IRQn Interrupt number.
  371. \return 0 Interrupt status is not enabled.
  372. \return 1 Interrupt status is enabled.
  373. */
  374. __STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
  375. {
  376. return 0;
  377. }
  378. /**
  379. \brief Check Interrupt is Pending or not
  380. \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
  381. \param [in] IRQn Interrupt number.
  382. \return 0 Interrupt status is not pending.
  383. \return 1 Interrupt status is pending.
  384. */
  385. __STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
  386. {
  387. return 0;
  388. }
  389. /**
  390. \brief Set Pending Interrupt
  391. \details Set the pending bit of an external interrupt.
  392. \param [in] IRQn Interrupt number. Value cannot be negative.
  393. */
  394. __STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
  395. {
  396. }
  397. /**
  398. \brief Clear Pending Interrupt
  399. \details Clear the pending bit of an external interrupt.
  400. \param [in] IRQn External interrupt number. Value cannot be negative.
  401. */
  402. __STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
  403. {
  404. }
  405. /**
  406. \brief Set Wake up Interrupt
  407. \details Set the wake up bit of an external interrupt.
  408. \param [in] IRQn Interrupt number. Value cannot be negative.
  409. */
  410. __STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
  411. {
  412. }
  413. /**
  414. \brief Clear Wake up Interrupt
  415. \details Clear the wake up bit of an external interrupt.
  416. \param [in] IRQn External interrupt number. Value cannot be negative.
  417. */
  418. __STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
  419. {
  420. }
  421. /**
  422. \brief Set Interrupt Priority
  423. \details Set the priority of an interrupt.
  424. \note The priority cannot be set for every core interrupt.
  425. \param [in] IRQn Interrupt number.
  426. \param [in] priority Priority to set.
  427. */
  428. __STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
  429. {
  430. }
  431. /**
  432. \brief Get Interrupt Priority
  433. \details Read the priority of an interrupt.
  434. The interrupt number can be positive to specify an external (device specific) interrupt,
  435. or negative to specify an internal (core) interrupt.
  436. \param [in] IRQn Interrupt number.
  437. \return Interrupt Priority.
  438. Value is aligned automatically to the implemented priority bits of the microcontroller.
  439. */
  440. __STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
  441. {
  442. return 0;
  443. }
  444. /**
  445. \brief Set interrupt handler
  446. \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
  447. \param [in] IRQn Interrupt number.
  448. \param [in] handler Interrupt handler.
  449. */
  450. __STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint64_t handler)
  451. {
  452. if (IRQn >= 0 && IRQn < 1024) {
  453. uint64_t *vectors = (uint64_t *)__get_MTVT();
  454. vectors[IRQn] = handler;
  455. }
  456. }
  457. /**
  458. \brief Get interrupt handler
  459. \details Get the address of interrupt handler function.
  460. \param [in] IRQn Interrupt number.
  461. */
  462. __STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
  463. {
  464. if (IRQn >= 0 && IRQn < 1024) {
  465. uint64_t *vectors = (uint64_t *)__get_MTVT();
  466. return (uint32_t)vectors[IRQn];
  467. }
  468. return 0;
  469. }
  470. /*@} end of CSI_Core_VICFunctions */
  471. /* ########################## PMP functions #################################### */
  472. /**
  473. \ingroup CSI_Core_FunctionInterface
  474. \defgroup CSI_Core_PMPFunctions PMP Functions
  475. \brief Functions that manage interrupts and exceptions via the VIC.
  476. @{
  477. */
  478. /**
  479. \brief configure memory protected region.
  480. \details
  481. \param [in] idx memory protected region (0, 1, 2, ..., 15).
  482. \param [in] base_addr base address must be aligned with page size.
  483. \param [in] size \ref region_size_e. memory protected region size.
  484. \param [in] attr \ref region_size_t. memory protected region attribute.
  485. \param [in] enable enable or disable memory protected region.
  486. */
  487. __STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint64_t base_addr, region_size_e size,
  488. mpu_region_attr_t attr, uint32_t enable)
  489. {
  490. uint8_t pmpxcfg = 0;
  491. uint64_t addr = 0;
  492. if (idx > 15) {
  493. return;
  494. }
  495. if (!enable) {
  496. attr.a = 0;
  497. }
  498. if (attr.a == ADDRESS_MATCHING_TOR) {
  499. addr = base_addr >> 2;
  500. } else {
  501. if (size == REGION_SIZE_4B) {
  502. addr = base_addr >> 2;
  503. attr.a = 2;
  504. } else {
  505. addr = ((base_addr >> 2) & (0xFFFFFFFFFFFFFFFFU - ((1 << (size + 1)) - 1))) | ((1 << size) - 1);
  506. }
  507. }
  508. __set_PMPADDRx(idx, addr);
  509. pmpxcfg |= (attr.r << PMP_PMPCFG_R_Pos) | (attr.w << PMP_PMPCFG_W_Pos) |
  510. (attr.x << PMP_PMPCFG_X_Pos) | (attr.a << PMP_PMPCFG_A_Pos) |
  511. (attr.l << PMP_PMPCFG_L_Pos);
  512. __set_PMPxCFG(idx, pmpxcfg);
  513. }
  514. /**
  515. \brief disable mpu region by idx.
  516. \details
  517. \param [in] idx memory protected region (0, 1, 2, ..., 15).
  518. */
  519. __STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
  520. {
  521. __set_PMPxCFG(idx, __get_PMPxCFG(idx) & (~PMP_PMPCFG_A_Msk));
  522. }
  523. /*@} end of CSI_Core_PMPFunctions */
  524. /* ################################## SysTick function ############################################ */
  525. /**
  526. \ingroup CSI_Core_FunctionInterface
  527. \defgroup CSI_Core_SysTickFunctions SysTick Functions
  528. \brief Functions that configure the System.
  529. @{
  530. */
  531. /**
  532. \brief CORE timer Configuration
  533. \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
  534. Counter is in free running mode to generate periodic interrupts.
  535. \param [in] ticks Number of ticks between two interrupts.
  536. \param [in] IRQn core timer Interrupt number.
  537. \return 0 Function succeeded.
  538. \return 1 Function failed.
  539. \note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
  540. function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
  541. must contain a vendor-specific implementation of this function.
  542. */
  543. __STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
  544. {
  545. return (0UL);
  546. }
  547. /**
  548. \brief get CORE timer reload value
  549. \return CORE timer counter value.
  550. */
  551. __STATIC_INLINE uint64_t csi_coret_get_load(void)
  552. {
  553. return 0;
  554. }
  555. /**
  556. \brief get CORE timer reload high value
  557. \return CORE timer counter value.
  558. */
  559. __STATIC_INLINE uint32_t csi_coret_get_loadh(void)
  560. {
  561. return 0;
  562. }
  563. /**
  564. \brief get CORE timer counter value
  565. \return CORE timer counter value.
  566. */
  567. __STATIC_INLINE uint64_t csi_coret_get_value(void)
  568. {
  569. uint64_t result;
  570. __ASM volatile("csrr %0, 0xc01" : "=r"(result));
  571. return result;
  572. }
  573. /**
  574. \brief get CORE timer counter high value
  575. \return CORE timer counter value.
  576. */
  577. __STATIC_INLINE uint32_t csi_coret_get_valueh(void)
  578. {
  579. uint64_t result;
  580. __ASM volatile("csrr %0, time" : "=r"(result));
  581. return (result >> 32) & 0xFFFFFFFF;
  582. }
  583. /*@} end of CSI_core_DebugFunctions */
  584. /* ########################## Cache functions #################################### */
  585. /**
  586. \ingroup CSI_Core_FunctionInterface
  587. \defgroup CSI_Core_CacheFunctions Cache Functions
  588. \brief Functions that configure Instruction and Data cache.
  589. @{
  590. */
  591. /**
  592. \brief Enable I-Cache
  593. \details Turns on I-Cache
  594. */
  595. __STATIC_INLINE void csi_icache_enable(void)
  596. {
  597. #if (__ICACHE_PRESENT == 1U)
  598. uint32_t cache;
  599. __DSB();
  600. __ISB();
  601. __ICACHE_IALL();
  602. cache = __get_MHCR();
  603. cache |= CACHE_MHCR_IE_Msk;
  604. __set_MHCR(cache);
  605. __DSB();
  606. __ISB();
  607. #endif
  608. }
  609. /**
  610. \brief Disable I-Cache
  611. \details Turns off I-Cache
  612. */
  613. __STATIC_INLINE void csi_icache_disable(void)
  614. {
  615. #if (__ICACHE_PRESENT == 1U)
  616. uint32_t cache;
  617. __DSB();
  618. __ISB();
  619. cache = __get_MHCR();
  620. cache &= ~CACHE_MHCR_IE_Msk; /* disable icache */
  621. __set_MHCR(cache);
  622. __ICACHE_IALL(); /* invalidate all icache */
  623. __DSB();
  624. __ISB();
  625. #endif
  626. }
  627. /**
  628. \brief Invalidate I-Cache
  629. \details Invalidates I-Cache
  630. */
  631. __STATIC_INLINE void csi_icache_invalid(void)
  632. {
  633. #if (__ICACHE_PRESENT == 1U)
  634. __DSB();
  635. __ISB();
  636. __ICACHE_IALL(); /* invalidate all icache */
  637. __DSB();
  638. __ISB();
  639. #endif
  640. }
  641. /**
  642. \brief Enable D-Cache
  643. \details Turns on D-Cache
  644. \note I-Cache also turns on.
  645. */
  646. __STATIC_INLINE void csi_dcache_enable(void)
  647. {
  648. #if (__DCACHE_PRESENT == 1U)
  649. uint32_t cache;
  650. __DSB();
  651. __ISB();
  652. __DCACHE_IALL(); /* invalidate all dcache */
  653. cache = __get_MHCR();
  654. cache |= (CACHE_MHCR_DE_Msk | CACHE_MHCR_WB_Msk | CACHE_MHCR_WA_Msk | CACHE_MHCR_RS_Msk | CACHE_MHCR_BPE_Msk | CACHE_MHCR_BTB_Msk | CACHE_MHCR_IBPE_Msk | CACHE_MHCR_WBR_Msk | CACHE_MHCR_L0BTB_Msk); /* enable all Cache */
  655. __set_MHCR(cache);
  656. __DSB();
  657. __ISB();
  658. #endif
  659. }
  660. /**
  661. \brief Disable D-Cache
  662. \details Turns off D-Cache
  663. \note I-Cache also turns off.
  664. */
  665. __STATIC_INLINE void csi_dcache_disable(void)
  666. {
  667. #if (__DCACHE_PRESENT == 1U)
  668. uint32_t cache;
  669. __DSB();
  670. __ISB();
  671. cache = __get_MHCR();
  672. cache &= ~(uint32_t)CACHE_MHCR_DE_Msk; /* disable all Cache */
  673. __set_MHCR(cache);
  674. __DCACHE_IALL(); /* invalidate all Cache */
  675. __DSB();
  676. __ISB();
  677. #endif
  678. }
  679. /**
  680. \brief Invalidate D-Cache
  681. \details Invalidates D-Cache
  682. \note I-Cache also invalid
  683. */
  684. __STATIC_INLINE void csi_dcache_invalid(void)
  685. {
  686. #if (__DCACHE_PRESENT == 1U)
  687. __DSB();
  688. __ISB();
  689. __DCACHE_IALL(); /* invalidate all Cache */
  690. __DSB();
  691. __ISB();
  692. #endif
  693. }
  694. /**
  695. \brief Clean D-Cache
  696. \details Cleans D-Cache
  697. \note I-Cache also cleans
  698. */
  699. __STATIC_INLINE void csi_dcache_clean(void)
  700. {
  701. #if (__DCACHE_PRESENT == 1U)
  702. __DSB();
  703. __ISB();
  704. __DCACHE_CALL(); /* clean all Cache */
  705. __DSB();
  706. __ISB();
  707. #endif
  708. }
  709. /**
  710. \brief Clean & Invalidate D-Cache
  711. \details Cleans and Invalidates D-Cache
  712. \note I-Cache also flush.
  713. */
  714. __STATIC_INLINE void csi_dcache_clean_invalid(void)
  715. {
  716. #if (__DCACHE_PRESENT == 1U)
  717. __DSB();
  718. __ISB();
  719. __DCACHE_CIALL(); /* clean and inv all Cache */
  720. __DSB();
  721. __ISB();
  722. #endif
  723. }
  724. /**
  725. \brief Invalidate L2-Cache
  726. \details Invalidates L2-Cache
  727. \note
  728. */
  729. __STATIC_INLINE void csi_l2cache_invalid(void)
  730. {
  731. #if (__L2CACHE_PRESENT == 1U)
  732. __DSB();
  733. __ISB();
  734. __L2CACHE_IALL(); /* invalidate l2 Cache */
  735. __DSB();
  736. __ISB();
  737. #endif
  738. }
  739. /**
  740. \brief Clean L2-Cache
  741. \details Cleans L2-Cache
  742. \note
  743. */
  744. __STATIC_INLINE void csi_l2cache_clean(void)
  745. {
  746. #if (__L2CACHE_PRESENT == 1U)
  747. __DSB();
  748. __ISB();
  749. __L2CACHE_CALL(); /* clean l2 Cache */
  750. __DSB();
  751. __ISB();
  752. #endif
  753. }
  754. /**
  755. \brief Clean & Invalidate L2-Cache
  756. \details Cleans and Invalidates L2-Cache
  757. \note
  758. */
  759. __STATIC_INLINE void csi_l2cache_clean_invalid(void)
  760. {
  761. #if (__L2CACHE_PRESENT == 1U)
  762. __DSB();
  763. __ISB();
  764. __L2CACHE_CIALL(); /* clean and inv l2 Cache */
  765. __DSB();
  766. __ISB();
  767. #endif
  768. }
  769. /**
  770. \brief D-Cache Invalidate by address
  771. \details Invalidates D-Cache for the given address
  772. \param[in] addr address (aligned to 64-byte boundary)
  773. \param[in] dsize size of memory block (in number of bytes)
  774. */
  775. __STATIC_INLINE void csi_dcache_invalid_range(uint64_t *addr, int64_t dsize)
  776. {
  777. #if (__DCACHE_PRESENT == 1U)
  778. int64_t op_size = dsize + (uint64_t)addr % 64;
  779. uint64_t op_addr = (uint64_t)addr;
  780. int64_t linesize = 64;
  781. cpu_work_mode_t cpu_work_mode;
  782. cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
  783. __DSB();
  784. if (cpu_work_mode == MACHINE_MODE) {
  785. while (op_size > 0) {
  786. __DCACHE_IPA(op_addr);
  787. op_addr += linesize;
  788. op_size -= linesize;
  789. }
  790. } else if (cpu_work_mode == SUPERVISOR_MODE) {
  791. while (op_size > 0) {
  792. __DCACHE_IVA(op_addr);
  793. op_addr += linesize;
  794. op_size -= linesize;
  795. }
  796. }
  797. __SYNC_IS();
  798. __DSB();
  799. __ISB();
  800. #endif
  801. }
  802. /**
  803. \brief D-Cache Clean by address
  804. \details Cleans D-Cache for the given address
  805. \param[in] addr address (aligned to 64-byte boundary)
  806. \param[in] dsize size of memory block (in number of bytes)
  807. */
  808. __STATIC_INLINE void csi_dcache_clean_range(uint64_t *addr, int64_t dsize)
  809. {
  810. #if (__DCACHE_PRESENT == 1)
  811. int64_t op_size = dsize + (uint64_t)addr % 64;
  812. uint64_t op_addr = (uint64_t) addr & CACHE_INV_ADDR_Msk;
  813. int64_t linesize = 64;
  814. cpu_work_mode_t cpu_work_mode;
  815. cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
  816. __DSB();
  817. if (cpu_work_mode == MACHINE_MODE) {
  818. while (op_size > 0) {
  819. __DCACHE_CPA(op_addr);
  820. op_addr += linesize;
  821. op_size -= linesize;
  822. }
  823. } else if (cpu_work_mode == SUPERVISOR_MODE) {
  824. while (op_size > 0) {
  825. __DCACHE_CVA(op_addr);
  826. op_addr += linesize;
  827. op_size -= linesize;
  828. }
  829. }
  830. __SYNC_IS();
  831. __DSB();
  832. __ISB();
  833. #endif
  834. }
  835. /**
  836. \brief D-Cache Clean and Invalidate by address
  837. \details Cleans and invalidates D_Cache for the given address
  838. \param[in] addr address (aligned to 64-byte boundary)
  839. \param[in] dsize size of memory block (aligned to 64-byte boundary)
  840. */
  841. __STATIC_INLINE void csi_dcache_clean_invalid_range(uint64_t *addr, int64_t dsize)
  842. {
  843. #if (__DCACHE_PRESENT == 1U)
  844. int64_t op_size = dsize + (uint64_t)addr % 64;
  845. uint64_t op_addr = (uint64_t) addr;
  846. int64_t linesize = 64;
  847. cpu_work_mode_t cpu_work_mode;
  848. cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
  849. __DSB();
  850. if (cpu_work_mode == MACHINE_MODE) {
  851. while (op_size > 0) {
  852. __DCACHE_CIPA(op_addr);
  853. op_addr += linesize;
  854. op_size -= linesize;
  855. }
  856. } else if (cpu_work_mode == SUPERVISOR_MODE) {
  857. while (op_size > 0) {
  858. __DCACHE_CIVA(op_addr);
  859. op_addr += linesize;
  860. op_size -= linesize;
  861. }
  862. }
  863. __SYNC_IS();
  864. __DSB();
  865. __ISB();
  866. #endif
  867. }
  868. /**
  869. \brief setup cacheable range Cache
  870. \details setup Cache range
  871. */
  872. __STATIC_INLINE void csi_cache_set_range(uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
  873. {
  874. ;
  875. }
  876. /**
  877. \brief Enable cache profile
  878. \details Turns on Cache profile
  879. */
  880. __STATIC_INLINE void csi_cache_enable_profile(void)
  881. {
  882. ;
  883. }
  884. /**
  885. \brief Disable cache profile
  886. \details Turns off Cache profile
  887. */
  888. __STATIC_INLINE void csi_cache_disable_profile(void)
  889. {
  890. ;
  891. }
  892. /**
  893. \brief Reset cache profile
  894. \details Reset Cache profile
  895. */
  896. __STATIC_INLINE void csi_cache_reset_profile(void)
  897. {
  898. ;
  899. }
  900. /**
  901. \brief cache access times
  902. \details Cache access times
  903. \note every 256 access add 1.
  904. \return cache access times, actual times should be multiplied by 256
  905. */
  906. __STATIC_INLINE uint32_t csi_cache_get_access_time(void)
  907. {
  908. return 0;
  909. }
  910. /**
  911. \brief cache miss times
  912. \details Cache miss times
  913. \note every 256 miss add 1.
  914. \return cache miss times, actual times should be multiplied by 256
  915. */
  916. __STATIC_INLINE uint32_t csi_cache_get_miss_time(void)
  917. {
  918. return 0;
  919. }
  920. /*@} end of CSI_Core_CacheFunctions */
  921. /*@} end of CSI_core_DebugFunctions */
  922. /* ################################## IRQ Functions ############################################ */
  923. /**
  924. \brief Save the Irq context
  925. \details save the psr result before disable irq.
  926. */
  927. __STATIC_INLINE uint32_t csi_irq_save(void)
  928. {
  929. uint32_t result;
  930. #ifdef CONFIG_MMU
  931. result = __get_SSTATUS();
  932. __disable_supervisor_irq();
  933. #else
  934. result = __get_MSTATUS();
  935. __disable_irq();
  936. #endif
  937. return (result);
  938. }
  939. /**
  940. \brief Restore the Irq context
  941. \details restore saved primask state.
  942. \param [in] irq_state psr irq state.
  943. */
  944. __STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
  945. {
  946. #ifdef CONFIG_MMU
  947. __set_SSTATUS(irq_state);
  948. #else
  949. __set_MSTATUS(irq_state);
  950. #endif
  951. }
  952. /*@} end of IRQ Functions */
  953. #ifdef __cplusplus
  954. }
  955. #endif
  956. #endif /* __CORE_RV32_H_DEPENDANT */
  957. #endif /* __CSI_GENERIC */