thead_admin 1 год назад
Родитель
Сommit
02deb8b059
66 измененных файлов с 32487 добавлено и 145 удалено
  1. 1 1
      board/thead/light-c910/board.c
  2. 169 7
      board/thead/light-c910/boot.c
  3. 1 1
      board/thead/light-c910/spl.c
  4. 131 40
      lib/sec_library/include/aes.h
  5. 144 0
      lib/sec_library/include/common.h
  6. 1 0
      lib/sec_library/include/core/README.txt
  7. 126 0
      lib/sec_library/include/core/cmsis/ARMCM0.h
  8. 271 0
      lib/sec_library/include/core/cmsis/cmsis_compiler.h
  9. 2101 0
      lib/sec_library/include/core/cmsis/cmsis_gcc.h
  10. 39 0
      lib/sec_library/include/core/cmsis/cmsis_version.h
  11. 949 0
      lib/sec_library/include/core/cmsis/core_cm0.h
  12. 56 0
      lib/sec_library/include/core/cmsis/csi_core.h
  13. 55 0
      lib/sec_library/include/core/cmsis/system_ARMCM0.h
  14. 1103 0
      lib/sec_library/include/core/core_801.h
  15. 1562 0
      lib/sec_library/include/core/core_802.h
  16. 1592 0
      lib/sec_library/include/core/core_803.h
  17. 1596 0
      lib/sec_library/include/core/core_804.h
  18. 1591 0
      lib/sec_library/include/core/core_805.h
  19. 1963 0
      lib/sec_library/include/core/core_807.h
  20. 873 0
      lib/sec_library/include/core/core_810.h
  21. 973 0
      lib/sec_library/include/core/core_ck610.h
  22. 18 0
      lib/sec_library/include/core/core_ck801.h
  23. 18 0
      lib/sec_library/include/core/core_ck802.h
  24. 18 0
      lib/sec_library/include/core/core_ck803.h
  25. 847 0
      lib/sec_library/include/core/core_ck807.h
  26. 854 0
      lib/sec_library/include/core/core_ck810.h
  27. 1109 0
      lib/sec_library/include/core/core_rv32.h
  28. 1187 0
      lib/sec_library/include/core/core_rv32_old.h
  29. 1119 0
      lib/sec_library/include/core/core_rv64.h
  30. 3279 0
      lib/sec_library/include/core/csi_gcc.h
  31. 2830 0
      lib/sec_library/include/core/csi_rv32_gcc.h
  32. 3271 0
      lib/sec_library/include/core/csi_rv64_gcc.h
  33. 62 0
      lib/sec_library/include/csi_core.h
  34. 3 4
      lib/sec_library/include/csi_efuse_api.h
  35. 2 2
      lib/sec_library/include/csi_sec_img_verify.h
  36. 87 0
      lib/sec_library/include/dev_tag.h
  37. 73 0
      lib/sec_library/include/device_types.h
  38. 285 0
      lib/sec_library/include/ecc.h
  39. 41 0
      lib/sec_library/include/ecdh.h
  40. 20 3
      lib/sec_library/include/kdf.h
  41. 5 5
      lib/sec_library/include/keyram.h
  42. 367 0
      lib/sec_library/include/list.h
  43. 149 0
      lib/sec_library/include/rambus.h
  44. 2 2
      lib/sec_library/include/rng.h
  45. 55 3
      lib/sec_library/include/rsa.h
  46. 68 25
      lib/sec_library/include/sec_crypto_aes.h
  47. 1 1
      lib/sec_library/include/sec_crypto_common.h
  48. 265 0
      lib/sec_library/include/sec_crypto_ecc.h
  49. 53 0
      lib/sec_library/include/sec_crypto_ecdh.h
  50. 2 2
      lib/sec_library/include/sec_crypto_errcode.h
  51. 96 0
      lib/sec_library/include/sec_crypto_kdf.h
  52. 117 0
      lib/sec_library/include/sec_crypto_mac.h
  53. 4 4
      lib/sec_library/include/sec_crypto_rng.h
  54. 37 3
      lib/sec_library/include/sec_crypto_rsa.h
  55. 39 5
      lib/sec_library/include/sec_crypto_sha.h
  56. 7 1
      lib/sec_library/include/sec_crypto_sm2.h
  57. 8 4
      lib/sec_library/include/sec_crypto_sm4.h
  58. 11 0
      lib/sec_library/include/sec_include_config.h
  59. 15 1
      lib/sec_library/include/sec_library.h
  60. 19 6
      lib/sec_library/include/sha.h
  61. 3 3
      lib/sec_library/include/sm2.h
  62. 20 16
      lib/sec_library/include/sm3.h
  63. 29 6
      lib/sec_library/include/sm4.h
  64. 484 0
      lib/sec_library/include/soc.h
  65. 211 0
      lib/sec_library/include/sys_clk.h
  66. BIN
      lib/sec_library/libsec_library.a

+ 1 - 1
board/thead/light-c910/board.c

@@ -12,7 +12,7 @@
 
 #ifdef CONFIG_USB_DWC3
 static struct dwc3_device dwc3_device_data = {
-	.maximum_speed = USB_SPEED_HIGH,
+	.maximum_speed = USB_SPEED_SUPER,
 	.dr_mode = USB_DR_MODE_PERIPHERAL,
 	.index = 0,
 };

+ 169 - 7
board/thead/light-c910/boot.c

@@ -7,24 +7,29 @@
 #include <dm.h>
 #include <fdt_support.h>
 #include <fdtdec.h>
+#include <mmc.h>
 #include <opensbi.h>
 #include <asm/csr.h>
 #include <asm/arch-thead/boot_mode.h>
 #include "../../../lib/sec_library/include/csi_efuse_api.h"
-
-
+#include "../../../lib/sec_library/include/sec_crypto_sha.h"
+#include "../../../lib/sec_library/include/kdf.h"
+#include "../../../lib/sec_library/include/sec_crypto_mac.h"
 
 #if CONFIG_IS_ENABLED(LIGHT_SEC_UPGRADE)
 
-/* The micro is used to enable NON-COT boot with non-signed image */
+/* The macro is used to enable NON-COT boot with non-signed image */
 #define LIGHT_NON_COT_BOOT	1
 
-/* The micro is used to enable uboot version in efuse */
+/* The macro is used to enable uboot version in efuse */
 #define	LIGHT_UBOOT_VERSION_IN_ENV	1
 
-/* The micro is used to enble RPMB ACCESS KEY from KDF */
+/* The macro is used to enble RPMB ACCESS KEY from KDF */
 //#define LIGHT_KDF_RPMB_KEY	1
 
+/* The macro is used to enable secure image version check in boot */
+//#define LIGHT_IMG_VERSION_CHECK_IN_BOOT	1
+
 /* the sample rpmb key is only used for testing */
 #ifndef LIGHT_KDF_RPMB_KEY 
 static const unsigned char emmc_rpmb_key_sample[32] = {0x33, 0x22, 0x11, 0x00, 0x77, 0x66, 0x55, 0x44, \
@@ -34,18 +39,87 @@ static const unsigned char emmc_rpmb_key_sample[32] = {0x33, 0x22, 0x11, 0x00, 0
 #endif
 static unsigned int upgrade_image_version = 0;
 
+#define RPMB_EMMC_CID_SIZE 16
+#define RPMB_CID_PRV_OFFSET             9
+#define RPMB_CID_CRC_OFFSET             15
+static int tee_rpmb_key_gen(uint8_t* key, uint32_t * length)
+{
+	uint32_t data[RPMB_EMMC_CID_SIZE / 4];
+    uint8_t huk[32];
+    uint32_t huk_len;
+	struct mmc *mmc = find_mmc_device(0);
+	int i;
+	sc_mac_t mac_handle;
+	int ret = 0;
+
+	if (!mmc)
+		return -1;
+
+	if (!mmc->ext_csd)
+		return -1;
+
+	for (i = 0; i < ARRAY_SIZE(mmc->cid); i++)
+		data[i] = cpu_to_be32(mmc->cid[i]);
+	/*
+	 * PRV/CRC would be changed when doing eMMC FFU
+	 * The following fields should be masked off when deriving RPMB key
+	 *
+	 * CID [55: 48]: PRV (Product revision)
+	 * CID [07: 01]: CRC (CRC7 checksum)
+	 * CID [00]: not used
+	 */
+	memset((void *)((uint64_t)data + RPMB_CID_PRV_OFFSET), 0, 1);
+	memset((void *)((uint64_t)data + RPMB_CID_CRC_OFFSET), 0, 1);
+
+    /* Step1: Derive HUK from KDF function */
+	ret = csi_kdf_gen_hmac_key(huk, &huk_len);
+	if (ret) {
+		printf("kdf gen hmac key faild[%d]\r\n", ret);
+		return -1;
+	}
+
+    /* Step2: Using HUK and data to generate RPMB key */
+	ret = sc_mac_init(&mac_handle, 0);
+	if (ret) {
+		printf("mac init faild[%d]\r\n", ret);
+		ret = -1;
+		return -1;
+	}
+
+	/* LSB 16 bytes are used as key */
+	ret = sc_mac_set_key(&mac_handle, huk, 16);
+	if (ret) {
+		printf("mac set key faild[%d]\r\n", ret);
+		ret = -1;
+		goto func_exit;
+	}
+
+	ret = sc_mac_calc(&mac_handle, SC_SHA_MODE_256, (uint8_t *)&data, sizeof(data), key, length);
+	if (ret) {
+		printf("mac calc faild[%d]\r\n", ret);
+		ret = -1;
+		goto func_exit;
+	}
+
+func_exit:
+	sc_mac_uninit(&mac_handle);
+
+	return ret;
+
+}
+
 int csi_rpmb_write_access_key(void) 
 {
     unsigned long *temp_rpmb_key_addr = NULL;
     char runcmd[64] = {0};
     uint8_t blkdata[256] = {0};
-    uint8_t kdf_rpmb_key[32];
+    __attribute__((__aligned__(8))) uint8_t kdf_rpmb_key[32];
 	uint32_t kdf_rpmb_key_length = 0;
 	int ret = 0;
 
 #ifdef LIGHT_KDF_RPMB_KEY
     /* Step1: retrive RPMB key from KDF function */
-	ret = csi_kdf_gen_hmac_key(kdf_rpmb_key, &kdf_rpmb_key_length);
+	ret = tee_rpmb_key_gen(kdf_rpmb_key, &kdf_rpmb_key_length);
 	if (ret != 0) {
 		return -1;
 	}
@@ -320,6 +394,79 @@ int verify_image_version_rule(unsigned int new_ver, unsigned int cur_ver)
 	return 0;
 }
 
+int check_image_version_rule(unsigned int new_ver, unsigned int cur_ver)
+{
+	unsigned char new_ver_x = 0, new_ver_y = 0;
+	unsigned char cur_ver_x = 0, cur_ver_y = 0;
+
+	/* Get secure version X from image version X.Y */
+	new_ver_x = (new_ver & 0xFF00) >> 8;
+	new_ver_y = new_ver & 0xFF;
+	cur_ver_x = (cur_ver & 0xFF00) >> 8;
+	cur_ver_y = cur_ver & 0xFF;
+
+	/* Ensure image version must be less than expected version */
+	if (new_ver_x < cur_ver_x) {
+		return -1;
+	}
+
+	return 0;
+}
+
+int check_tf_version_in_boot(unsigned long tf_addr)
+{
+	int ret = 0;
+	unsigned int img_version = 0;
+	unsigned int expected_img_version = 0;
+	
+	img_version = get_image_version(tf_addr);
+	if (img_version == 0) {
+		printf("get tf image version fail\n");
+		return -1;
+	}
+
+	ret = csi_tf_get_image_version(&expected_img_version);
+	if (ret != 0) {
+		printf("Get tf expected img version fail\n");
+		return -1;
+	}
+
+	ret = check_image_version_rule(img_version, expected_img_version);
+	if (ret != 0) {
+		printf("Image version breaks the rule\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+int check_tee_version_in_boot(unsigned long tee_addr)
+{
+	int ret = 0;
+	unsigned int img_version = 0;
+	unsigned int expected_img_version = 0;
+	
+	img_version = get_image_version(tee_addr);
+	if (img_version == 0) {
+		printf("get tee image version fail\n");
+		return -1;
+	}
+
+	ret = csi_tee_get_image_version(&expected_img_version);
+	if (ret != 0) {
+		printf("Get tee expected img version fail\n");
+		return -1;
+	}
+
+	ret = check_image_version_rule(img_version, expected_img_version);
+	if (ret != 0) {
+		printf("Image version breaks the rule\n");
+		return -1;
+	}
+
+	return 0;
+}
+
 int light_vimage(int argc, char *const argv[])
 {
 	int ret = 0;
@@ -454,6 +601,13 @@ int light_secboot(int argc, char * const argv[])
 
 	/* Step1. Check and verify TF image */
 	if (image_have_head(LIGHT_TF_FW_TMP_ADDR) == 1) {
+#ifdef LIGHT_IMG_VERSION_CHECK_IN_BOOT
+		printf("check TF version in boot \n");
+		ret = check_tf_version_in_boot(LIGHT_TF_FW_TMP_ADDR);
+		if (ret != 0) {
+			return CMD_RET_FAILURE;
+		}
+#endif
 
 		printf("Process TF image verification ...\n");
 		ret = verify_customer_image(T_TF, LIGHT_TF_FW_TMP_ADDR);
@@ -479,6 +633,14 @@ int light_secboot(int argc, char * const argv[])
 
 	/* Step2. Check and verify TEE image */
 	if (image_have_head(tee_addr) == 1) {
+#ifdef LIGHT_IMG_VERSION_CHECK_IN_BOOT
+		printf("check TEE version in boot \n");
+		ret = check_tee_version_in_boot(tee_addr);
+		if (ret != 0) {
+			return CMD_RET_FAILURE;
+		}
+#endif
+
 		printf("Process TEE image verification ...\n");
 		ret = verify_customer_image(T_TEE, tee_addr);
 		if (ret != 0) {

+ 1 - 1
board/thead/light-c910/spl.c

@@ -306,7 +306,7 @@ void cpu_performance_enable(void)
 	csr_write(CSR_MCCR2, 0xe2490009);
 	csr_write(CSR_MHCR, 0x117f); // clear bit7 to disable indirect brantch prediction
 	csr_write(CSR_MXSTATUS, 0x638000);
-	csr_write(CSR_MHINT, 0x6e30c | (1<<22)); // set bit22 to close fence broadcast
+	csr_write(CSR_MHINT, 0x6e30c | (1<<21) | (1<<22)); // set bit21 & bit 22 to close tlb & fence broadcast
 }
 
 static int bl1_img_have_head(unsigned long img_src_addr)

+ 131 - 40
lib/sec_library/include/aes.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -14,7 +14,7 @@
 #define _DRV_AES_H_
 
 #include <stdint.h>
-#include <drv/common.h>
+#include "common.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,10 +27,70 @@ typedef enum {
     AES_KEY_LEN_BITS_256                   ///< 256 Data bits
 } csi_aes_key_bits_t;
 
+typedef enum{
+    AES_MODE_ECB = 0,
+    AES_MODE_CBC = 0x20000020,
+    AES_MODE_CTR = 0x200001c0,
+    AES_MODE_CFB = 0x20000400,
+    AES_MODE_GCM = 0x20030040,
+    AES_MODE_CCM = 0x21D40040,
+    AES_MODE_OFB = 0x24000000,
+} aes_mode_t;
+
+#define AES_KEY_LEN_BYTES_32 32
+#define AES_KEY_LEN_BYTES_24 24
+#define AES_KEY_LEN_BYTES_16 16
+
+#define AES_CRYPTO_CTRL_CBC_256 0x20000038
+#define AES_CRYPTO_CTRL_CBC_192 0x20000030
+#define AES_CRYPTO_CTRL_CBC_128 0x20000028
+#define AES_CRYPTO_CTRL_ECB_256 0x00000018
+#define AES_CRYPTO_CTRL_ECB_192 0x00000010
+#define AES_CRYPTO_CTRL_ECB_128 0x00000008
+
+#define AES_BLOCK_IV_SIZE  16
+#define AES_BLOCK_TAG_SIZE  16
+#define AES_BLOCK_CRYPTO_SIZE  16
+
+#define AES_DIR_ENCRYPT    1
+#define AES_DIR_DECRYPT    0
+
+#define KEY_128_BITS 0x8
+#define KEY_192_BITS 0x10
+#define KEY_256_BITS 0x18
+
+#define AES_DMA_ENABLE  1
+#define AES_DMA_DISABLE 0
+
+
+typedef enum{
+    AES_CRYPTO_ECB_256_MODE = 0,
+    AES_CRYPTO_ECB_192_MODE,
+    AES_CRYPTO_ECB_128_MODE,
+    AES_CRYPTO_CBC_256_MODE,
+    AES_CRYPTO_CBC_192_MODE,
+    AES_CRYPTO_CBC_128_MODE,
+} csi_aes_mode_t;
+
+typedef struct {
+    uint32_t busy             : 1;        ///< Calculate busy flag
+    uint32_t error            : 1;        ///< Calculate error flag
+} csi_aes_state_t;
+
+typedef struct {
+    uint32_t            key_len_byte;
+    uint8_t             key[32];          ///< Data block being processed
+    uint32_t            sca;
+    uint32_t            is_kdf;
+    uint32_t            is_dma;
+} csi_aes_context_t;
+
 /**
 \brief AES Ctrl Block
 */
 typedef struct {
+    csi_aes_state_t         state;
+    csi_aes_context_t       context;
     csi_dev_t               dev;
     void                    *priv;
 } csi_aes_t;
@@ -97,7 +157,7 @@ csi_error_t csi_aes_ecb_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t si
   \param[in]   iv      Init vector
   \return      Error code \ref Csi_error_t
 */
-csi_error_t csi_aes_cbc_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
+csi_error_t csi_aes_cbc_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv) ;
 
 /**
   \brief       AES cbc decrypt
@@ -161,10 +221,9 @@ csi_error_t csi_aes_cfb8_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t s
   \param[out]  out     Pointer to the result data
   \param[in]   size    The source data size
   \param[in]   iv      Init vector
-  \param[out]  num     The number of the 128-bit block we have used
   \return      Error code \ref csi_error_t
 */
-csi_error_t csi_aes_cfb128_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv, uint32_t *num);
+csi_error_t csi_aes_cfb128_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
 
 /**
   \brief       AES cfb128 encrypt
@@ -173,10 +232,9 @@ csi_error_t csi_aes_cfb128_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t
   \param[out]  out     Pointer to the result data
   \param[in]   size    The source data size
   \param[in]   iv      Init vector
-  \param[out]  num     The number of the 128-bit block we have used
   \return      Error code \ref csi_error_t
 */
-csi_error_t csi_aes_cfb128_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv, uint32_t *num);
+csi_error_t csi_aes_cfb128_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
 
 /**
   \brief       AES ofb encrypt
@@ -185,22 +243,22 @@ csi_error_t csi_aes_cfb128_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t
   \param[out]  out     Pointer to the result data
   \param[in]   size    The source data size
   \param[in]   iv      Init vector
-  \param[out]  num     The number of the 128-bit block we have used
+  \param[in]  key_len key bits
   \return      Error code \ref csi_error_t
 */
-csi_error_t csi_aes_ofb_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv, uint32_t *num);
+csi_error_t csi_aes_ofb_encrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
 
 /**
-  \brief       AES ofb decrypt
-  \param[in]   aes     Handle to operate
-  \param[in]   in      Pointer to the source data
-  \param[out]  out     Pointer to the result data
-  \param[in]   size    The source data size
-  \param[in]   iv      Init vector
-  \param[out]  num     The number of the 128-bit block we have used
-  \return      Error code \ref csi_error_t
+  \brief       Aes ofb decrypt
+  \param[in]   dev_aes     dev_aes handle to operate
+  \param[in]   in      Pointer to the Source data
+  \param[out]  out     Pointer to the Result data
+  \param[in]   size    the Source data size
+  \param[in]   iv      init vector
+  \param[in]  key_len key bits
+  \return      error code \ref csi_error_t
 */
-csi_error_t csi_aes_ofb_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t size, void *iv, uint32_t *num);
+csi_error_t csi_aes_ofb_decrypt(csi_aes_t *aes, void *in, void *out,uint32_t size, void *iv);
 
 /**
   \brief       AES ctr encrypt
@@ -208,20 +266,10 @@ csi_error_t csi_aes_ofb_decrypt(csi_aes_t *aes, void *in, void *out, uint32_t si
   \param[in]   in               Pointer to the source data
   \param[out]  out              Pointer to the result data
   \param[in]   size             The source data size
-  \param[in]   nonce_counter    Pointer to the 128-bit nonce and counter
-  \param[in]   stream_block     Pointer to the saved stream-block for resuming
   \param[in]   iv               Init vector
-  \param[out]  num              The number of the 128-bit block we have used
   \return      Error code \ref csi_error_t
 */
-csi_error_t csi_aes_ctr_encrypt(csi_aes_t *aes,
-                                void *in,
-                                void *out,
-                                uint32_t size,
-                                uint8_t nonce_counter[16],
-                                uint8_t stream_block[16],
-                                void *iv,
-                                uint32_t *num);
+csi_error_t csi_aes_ctr_encrypt(csi_aes_t *aes,void *in,void *out,uint32_t size,void *iv);
 
 /**
   \brief       AES ctr decrypt
@@ -229,20 +277,56 @@ csi_error_t csi_aes_ctr_encrypt(csi_aes_t *aes,
   \param[in]   in               Pointer to the source data
   \param[out]  out              Pointer to the result data
   \param[in]   size             The source data size
-  \param[in]   nonce_counter    Pointer to the 128-bit nonce and counter
-  \param[in]   stream_block     Pointer to the saved stream-block for resuming
   \param[in]   iv               Init vecotr
-  \param[out]  num              The number of the 128-bit block we have used
   \return      Error code \ref csi_error_t
 */
-csi_error_t csi_aes_ctr_decrypt(csi_aes_t *aes,
-                                void *in,
-                                void *out,
-                                uint32_t size,
-                                uint8_t nonce_counter[16],
-                                uint8_t stream_block[16],
-                                void *iv,
-                                uint32_t *num);
+csi_error_t csi_aes_ctr_decrypt(csi_aes_t *aes,void *in,void *out,uint32_t size,void *iv);
+
+/**
+  \brief       Aes gcm encrypt
+  \param[in]   dev_aes              dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vector
+  \return      error code \ref csi_error_t
+*/
+csi_error_t csi_aes_gcm_encrypt(csi_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv);
+
+/**
+  \brief       Aes gcm decrypt
+  \param[in]   dev_aes              dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data.
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vecotr
+  \return      error code \ref csi_error_t
+*/
+csi_error_t csi_aes_gcm_decrypt(csi_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv);
+
+/**
+  \brief       Aes ccm encrypt
+  \param[in]   dev_aes              dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vector
+  \param[in]   tag_out          tag output
+  \return      error code \ref csi_error_t
+*/
+csi_error_t csi_aes_ccm_encrypt(csi_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv, uint8_t *tag_out);
+
+/**
+  \brief       Aes ccm decrypt
+  \param[in]   dev_aes              dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vecotr
+  \param[in]   tag_out          tag output
+  \return      error code \ref csi_error_t
+*/
+csi_error_t csi_aes_ccm_decrypt(csi_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv, uint8_t *tag_out);
 
 /**
   \brief       Enable AES power manage
@@ -258,6 +342,13 @@ csi_error_t csi_aes_enable_pm(csi_aes_t *aes);
 */
 void csi_aes_disable_pm(csi_aes_t *aes);
 
+/**
+  \brief       Config AES mode dma or slave
+  \param[in]   dam_en    zero disable dma, not zero enable dma 
+  \return      None
+*/
+void csi_aes_dma_enable(csi_aes_t *aes, uint8_t dma_en);
+
 #ifdef __cplusplus
 }
 #endif

+ 144 - 0
lib/sec_library/include/common.h

@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     drv/common.h
+ * @brief    Header File for Common Driver
+ * @version  V1.0
+ * @date     31. March 2020
+ * @model    common
+ ******************************************************************************/
+
+#ifndef _DRV_COMMON_H_
+#define _DRV_COMMON_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include "list.h"
+#include "dev_tag.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CONFIG_DEBUG_MODE
+#define CSI_ASSERT(expr)                            \
+    do {                                        \
+        if ((unsigned long)expr == (unsigned long)NULL) {   \
+            printf("PROGRAM ASSERT\n");         \
+            while(1);                           \
+        }                                       \
+    } while(0);
+#else
+#define CSI_ASSERT(expr)        ((void)0U)
+#endif
+
+#ifdef CONFIG_PARAM_NOT_CHECK
+#define CSI_PARAM_CHK(para, err)                    \
+    do {                                            \
+        if ((unsigned long)para == (unsigned long)NULL) {       \
+            return (err);                           \
+        }                                           \
+    } while (0)
+
+#define CSI_PARAM_CHK_NORETVAL(para)                \
+    do {                                            \
+        if ((unsigned long)para == (unsigned long)NULL) {       \
+            return;                                 \
+        }                                           \
+    } while (0)
+#else
+#define CSI_PARAM_CHK(para, err)
+#define CSI_PARAM_CHK_NORETVAL(para)
+#endif
+
+#define CSI_EXAMPLE_RESULT(val)                     \
+    do {                                            \
+        if(val>=0)                                  \
+        {                                           \
+            printf("-*success*-\n");                \
+        }                                           \
+        else                                        \
+        {                                           \
+            printf("-*fail*-\n");                   \
+        }                                           \
+    } while (0);
+
+typedef enum {
+    CSI_OK          =  0,
+    CSI_ERROR       = -1,
+    CSI_BUSY        = -2,
+    CSI_TIMEOUT     = -3,
+    CSI_UNSUPPORTED = -4
+} csi_error_t;
+
+typedef struct {
+    uint8_t    readable;
+    uint8_t    writeable;
+    uint8_t    error;
+} csi_state_t;
+
+typedef struct csi_dev csi_dev_t;
+
+#ifdef CONFIG_PM
+typedef enum {
+    PM_DEV_SUSPEND,
+    PM_DEV_RESUME,
+} csi_pm_dev_action_t;
+
+typedef enum {
+    PM_MODE_RUN                  = 0,   ///< Running mode
+    PM_MODE_SLEEP_1,                    ///< Sleep LV1 mode
+    PM_MODE_SLEEP_2,                    ///< Sleep LV2 mode
+    PM_MODE_DEEP_SLEEP_1,               ///< Deep sleep LV1 mode
+    PM_MODE_DEEP_SLEEP_2,               ///< Deep sleep LV2 mode
+    PM_MODE_DEEP_SLEEP_3,               ///< Deep sleep LV3 mode
+} csi_pm_mode_t;
+
+typedef struct {
+    slist_t     next;
+    csi_error_t (*pm_action)(csi_dev_t *dev, csi_pm_dev_action_t action);
+    uint32_t    *reten_mem;
+    uint32_t    size;
+} csi_pm_dev_t;
+#include <drv/pm.h>
+#endif
+
+struct csi_dev {
+    unsigned long reg_base;
+    uint8_t       irq_num;
+    uint8_t       idx;
+    uint16_t      dev_tag;
+    void          (*irq_handler)(void *);
+#ifdef CONFIG_PM
+    csi_pm_dev_t pm_dev;
+#endif
+};
+
+#define HANDLE_REG_BASE(handle)     (handle->dev.reg_base)
+#define HANDLE_IRQ_NUM(handle)      (handle->dev.irq_num)
+#define HANDLE_DEV_IDX(handle)      (handle->dev.idx)
+#define HANDLE_IRQ_HANDLER(handle)  (handle->dev.irq_handler)
+
+typedef struct {
+    unsigned long    reg_base;
+    uint8_t          irq_num;
+    uint8_t          idx;
+    uint16_t         dev_tag;
+} csi_perip_info_t;
+
+csi_error_t target_get(csi_dev_tag_t dev_tag, uint32_t idx, csi_dev_t *dev);
+csi_error_t target_get_optimal_dma_channel(void *dma_list, uint32_t ctrl_num, csi_dev_t *parent_dev, void *ch_info);
+//void mdelay(uint32_t ms);
+//void udelay(uint32_t us);
+//void msleep(uint32_t ms);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DRV_COMMON_H_ */
+

+ 1 - 0
lib/sec_library/include/core/README.txt

@@ -0,0 +1 @@
+Just include csi_core.h!

+ 126 - 0
lib/sec_library/include/core/cmsis/ARMCM0.h

@@ -0,0 +1,126 @@
+/**************************************************************************//**
+ * @file     ARMCM0.h
+ * @brief    CMSIS Core Peripheral Access Layer Header File for
+ *           ARMCM0 Device
+ * @version  V5.3.1
+ * @date     09. July 2018
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ARMCM0_H
+#define ARMCM0_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* -------------------------  Interrupt Number Definition  ------------------------ */
+
+typedef enum IRQn
+{
+/* -------------------  Processor Exceptions Numbers  ----------------------------- */
+  NonMaskableInt_IRQn           = -14,     /*  2 Non Maskable Interrupt */
+  HardFault_IRQn                = -13,     /*  3 HardFault Interrupt */
+
+
+
+  SVCall_IRQn                   =  -5,     /* 11 SV Call Interrupt */
+
+  PendSV_IRQn                   =  -2,     /* 14 Pend SV Interrupt */
+  SysTick_IRQn                  =  -1,     /* 15 System Tick Interrupt */
+
+/* -------------------  Processor Interrupt Numbers  ------------------------------ */
+  Interrupt0_IRQn               =   0,
+  Interrupt1_IRQn               =   1,
+  Interrupt2_IRQn               =   2,
+  Interrupt3_IRQn               =   3,
+  Interrupt4_IRQn               =   4,
+  Interrupt5_IRQn               =   5,
+  Interrupt6_IRQn               =   6,
+  Interrupt7_IRQn               =   7,
+  Interrupt8_IRQn               =   8,
+  Interrupt9_IRQn               =   9
+  /* Interrupts 10 .. 31 are left out */
+} IRQn_Type;
+
+
+/* ================================================================================ */
+/* ================      Processor and Core Peripheral Section     ================ */
+/* ================================================================================ */
+
+/* -------  Start of section using anonymous unions and disabling warnings  ------- */
+#if   defined (__CC_ARM)
+  #pragma push
+  #pragma anon_unions
+#elif defined (__ICCARM__)
+  #pragma language=extended
+#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+  #pragma clang diagnostic push
+  #pragma clang diagnostic ignored "-Wc11-extensions"
+  #pragma clang diagnostic ignored "-Wreserved-id-macro"
+#elif defined (__GNUC__)
+  /* anonymous unions are enabled by default */
+#elif defined (__TMS470__)
+  /* anonymous unions are enabled by default */
+#elif defined (__TASKING__)
+  #pragma warning 586
+#elif defined (__CSMC__)
+  /* anonymous unions are enabled by default */
+#else
+  #warning Not supported compiler type
+#endif
+
+
+/* --------  Configuration of Core Peripherals  ----------------------------------- */
+#define __CM0_REV                 0x0000U   /* Core revision r0p0 */
+#define __MPU_PRESENT             0U        /* no MPU present */
+#define __VTOR_PRESENT            0U        /* no VTOR present */
+#define __NVIC_PRIO_BITS          2U        /* Number of Bits used for Priority Levels */
+#define __Vendor_SysTickConfig    0U        /* Set to 1 if different SysTick Config is used */
+
+#include "core_cm0.h"                       /* Processor and core peripherals */
+#include "system_ARMCM0.h"                  /* System Header */
+
+
+/* --------  End of section using anonymous unions and disabling warnings  -------- */
+#if   defined (__CC_ARM)
+  #pragma pop
+#elif defined (__ICCARM__)
+  /* leave anonymous unions enabled */
+#elif (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
+  #pragma clang diagnostic pop
+#elif defined (__GNUC__)
+  /* anonymous unions are enabled by default */
+#elif defined (__TMS470__)
+  /* anonymous unions are enabled by default */
+#elif defined (__TASKING__)
+  #pragma warning restore
+#elif defined (__CSMC__)
+  /* anonymous unions are enabled by default */
+#else
+  #warning Not supported compiler type
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* ARMCM0_H */

+ 271 - 0
lib/sec_library/include/core/cmsis/cmsis_compiler.h

@@ -0,0 +1,271 @@
+/**************************************************************************//**
+ * @file     cmsis_compiler.h
+ * @brief    CMSIS compiler generic header file
+ * @version  V5.1.0
+ * @date     09. October 2018
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CMSIS_COMPILER_H
+#define __CMSIS_COMPILER_H
+
+#include <stdint.h>
+
+/*
+ * Arm Compiler 4/5
+ */
+#if   defined ( __CC_ARM )
+  #include "cmsis_armcc.h"
+
+
+/*
+ * Arm Compiler 6.6 LTM (armclang)
+ */
+#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100)
+  #include "cmsis_armclang_ltm.h"
+
+  /*
+ * Arm Compiler above 6.10.1 (armclang)
+ */
+#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100)
+  #include "cmsis_armclang.h"
+
+
+/*
+ * GNU Compiler
+ */
+#elif defined ( __GNUC__ )
+  #include "cmsis_gcc.h"
+
+
+/*
+ * IAR Compiler
+ */
+#elif defined ( __ICCARM__ )
+  #include <cmsis_iccarm.h>
+
+
+/*
+ * TI Arm Compiler
+ */
+#elif defined ( __TI_ARM__ )
+  #include <cmsis_ccs.h>
+
+  #ifndef   __ASM
+    #define __ASM                                  __asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                               inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE                        static inline
+  #endif
+  #ifndef   __STATIC_FORCEINLINE
+    #define __STATIC_FORCEINLINE                   __STATIC_INLINE
+  #endif
+  #ifndef   __NO_RETURN
+    #define __NO_RETURN                            __attribute__((noreturn))
+  #endif
+  #ifndef   __USED
+    #define __USED                                 __attribute__((used))
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                                 __attribute__((weak))
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                               __attribute__((packed))
+  #endif
+  #ifndef   __PACKED_STRUCT
+    #define __PACKED_STRUCT                        struct __attribute__((packed))
+  #endif
+  #ifndef   __PACKED_UNION
+    #define __PACKED_UNION                         union __attribute__((packed))
+  #endif
+  #ifndef   __UNALIGNED_UINT32        /* deprecated */
+    struct __attribute__((packed)) T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)                  (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT16_WRITE
+    __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
+    #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT16_READ
+    __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
+    #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT32_WRITE
+    __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
+    #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT32_READ
+    __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
+    #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #define __ALIGNED(x)                           __attribute__((aligned(x)))
+  #endif
+  #ifndef   __RESTRICT
+    #define __RESTRICT                             __restrict
+  #endif
+
+
+/*
+ * TASKING Compiler
+ */
+#elif defined ( __TASKING__ )
+  /*
+   * The CMSIS functions have been implemented as intrinsics in the compiler.
+   * Please use "carm -?i" to get an up to date list of all intrinsics,
+   * Including the CMSIS ones.
+   */
+
+  #ifndef   __ASM
+    #define __ASM                                  __asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                               inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE                        static inline
+  #endif
+  #ifndef   __STATIC_FORCEINLINE
+    #define __STATIC_FORCEINLINE                   __STATIC_INLINE
+  #endif
+  #ifndef   __NO_RETURN
+    #define __NO_RETURN                            __attribute__((noreturn))
+  #endif
+  #ifndef   __USED
+    #define __USED                                 __attribute__((used))
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                                 __attribute__((weak))
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                               __packed__
+  #endif
+  #ifndef   __PACKED_STRUCT
+    #define __PACKED_STRUCT                        struct __packed__
+  #endif
+  #ifndef   __PACKED_UNION
+    #define __PACKED_UNION                         union __packed__
+  #endif
+  #ifndef   __UNALIGNED_UINT32        /* deprecated */
+    struct __packed__ T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)                  (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT16_WRITE
+    __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
+    #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT16_READ
+    __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
+    #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT32_WRITE
+    __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
+    #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT32_READ
+    __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
+    #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #define __ALIGNED(x)              __align(x)
+  #endif
+  #ifndef   __RESTRICT
+    #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
+    #define __RESTRICT
+  #endif
+
+
+/*
+ * COSMIC Compiler
+ */
+#elif defined ( __CSMC__ )
+   #include <cmsis_csm.h>
+
+ #ifndef   __ASM
+    #define __ASM                                  _asm
+  #endif
+  #ifndef   __INLINE
+    #define __INLINE                               inline
+  #endif
+  #ifndef   __STATIC_INLINE
+    #define __STATIC_INLINE                        static inline
+  #endif
+  #ifndef   __STATIC_FORCEINLINE
+    #define __STATIC_FORCEINLINE                   __STATIC_INLINE
+  #endif
+  #ifndef   __NO_RETURN
+    // NO RETURN is automatically detected hence no warning here
+    #define __NO_RETURN
+  #endif
+  #ifndef   __USED
+    #warning No compiler specific solution for __USED. __USED is ignored.
+    #define __USED
+  #endif
+  #ifndef   __WEAK
+    #define __WEAK                                 __weak
+  #endif
+  #ifndef   __PACKED
+    #define __PACKED                               @packed
+  #endif
+  #ifndef   __PACKED_STRUCT
+    #define __PACKED_STRUCT                        @packed struct
+  #endif
+  #ifndef   __PACKED_UNION
+    #define __PACKED_UNION                         @packed union
+  #endif
+  #ifndef   __UNALIGNED_UINT32        /* deprecated */
+    @packed struct T_UINT32 { uint32_t v; };
+    #define __UNALIGNED_UINT32(x)                  (((struct T_UINT32 *)(x))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT16_WRITE
+    __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
+    #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT16_READ
+    __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
+    #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __UNALIGNED_UINT32_WRITE
+    __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
+    #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
+  #endif
+  #ifndef   __UNALIGNED_UINT32_READ
+    __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
+    #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
+  #endif
+  #ifndef   __ALIGNED
+    #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
+    #define __ALIGNED(x)
+  #endif
+  #ifndef   __RESTRICT
+    #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
+    #define __RESTRICT
+  #endif
+
+
+#else
+  #error Unknown compiler.
+#endif
+
+
+#endif /* __CMSIS_COMPILER_H */
+

+ 2101 - 0
lib/sec_library/include/core/cmsis/cmsis_gcc.h

@@ -0,0 +1,2101 @@
+/**************************************************************************//**
+ * @file     cmsis_gcc.h
+ * @brief    CMSIS compiler GCC header file
+ * @version  V5.1.0
+ * @date     20. December 2018
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CMSIS_GCC_H
+#define __CMSIS_GCC_H
+
+/* ignore some GCC warnings */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsign-conversion"
+#pragma GCC diagnostic ignored "-Wconversion"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+/* Fallback for __has_builtin */
+#ifndef __has_builtin
+  #define __has_builtin(x) (0)
+#endif
+
+/* CMSIS compiler specific defines */
+#ifndef   __ASM
+  #define __ASM                                  __asm
+#endif
+#ifndef   __INLINE
+  #define __INLINE                               inline
+#endif
+#ifndef   __STATIC_INLINE
+  #define __STATIC_INLINE                        static inline
+#endif
+#ifndef   __STATIC_FORCEINLINE                 
+  #define __STATIC_FORCEINLINE                   __attribute__((always_inline)) static inline
+#endif                                           
+#ifndef   __NO_RETURN
+  #define __NO_RETURN                            __attribute__((__noreturn__))
+#endif
+#ifndef   __USED
+  #define __USED                                 __attribute__((used))
+#endif
+#ifndef   __WEAK
+  #define __WEAK                                 __attribute__((weak))
+#endif
+#ifndef   __PACKED
+  #define __PACKED                               __attribute__((packed, aligned(1)))
+#endif
+#ifndef   __PACKED_STRUCT
+  #define __PACKED_STRUCT                        struct __attribute__((packed, aligned(1)))
+#endif
+#ifndef   __PACKED_UNION
+  #define __PACKED_UNION                         union __attribute__((packed, aligned(1)))
+#endif
+#ifndef   __UNALIGNED_UINT32        /* deprecated */
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wpacked"
+  #pragma GCC diagnostic ignored "-Wattributes"
+  struct __attribute__((packed)) T_UINT32 { uint32_t v; };
+  #pragma GCC diagnostic pop
+  #define __UNALIGNED_UINT32(x)                  (((struct T_UINT32 *)(x))->v)
+#endif
+#ifndef   __UNALIGNED_UINT16_WRITE
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wpacked"
+  #pragma GCC diagnostic ignored "-Wattributes"
+  __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
+  #pragma GCC diagnostic pop
+  #define __UNALIGNED_UINT16_WRITE(addr, val)    (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
+#endif
+#ifndef   __UNALIGNED_UINT16_READ
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wpacked"
+  #pragma GCC diagnostic ignored "-Wattributes"
+  __PACKED_STRUCT T_UINT16_READ { uint16_t v; };
+  #pragma GCC diagnostic pop
+  #define __UNALIGNED_UINT16_READ(addr)          (((const struct T_UINT16_READ *)(const void *)(addr))->v)
+#endif
+#ifndef   __UNALIGNED_UINT32_WRITE
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wpacked"
+  #pragma GCC diagnostic ignored "-Wattributes"
+  __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
+  #pragma GCC diagnostic pop
+  #define __UNALIGNED_UINT32_WRITE(addr, val)    (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
+#endif
+#ifndef   __UNALIGNED_UINT32_READ
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wpacked"
+  #pragma GCC diagnostic ignored "-Wattributes"
+  __PACKED_STRUCT T_UINT32_READ { uint32_t v; };
+  #pragma GCC diagnostic pop
+  #define __UNALIGNED_UINT32_READ(addr)          (((const struct T_UINT32_READ *)(const void *)(addr))->v)
+#endif
+#ifndef   __ALIGNED
+  #define __ALIGNED(x)                           __attribute__((aligned(x)))
+#endif
+#ifndef   __RESTRICT
+  #define __RESTRICT                             __restrict
+#endif
+
+
+/* ###########################  Core Function Access  ########################### */
+/** \ingroup  CMSIS_Core_FunctionInterface
+    \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+  @{
+ */
+
+/**
+  \brief   Enable IRQ Interrupts
+  \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
+           Can only be executed in Privileged modes.
+ */
+__STATIC_FORCEINLINE void __enable_irq(void)
+{
+  __ASM volatile ("cpsie i" : : : "memory");
+}
+
+
+/**
+  \brief   Disable IRQ Interrupts
+  \details Disables IRQ interrupts by setting the I-bit in the CPSR.
+           Can only be executed in Privileged modes.
+ */
+__STATIC_FORCEINLINE void __disable_irq(void)
+{
+  __ASM volatile ("cpsid i" : : : "memory");
+}
+
+
+/**
+  \brief   Get Control Register
+  \details Returns the content of the Control Register.
+  \return               Control Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_CONTROL(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, control" : "=r" (result) );
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Control Register (non-secure)
+  \details Returns the content of the non-secure Control Register when in secure mode.
+  \return               non-secure Control Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, control_ns" : "=r" (result) );
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Control Register
+  \details Writes the given value to the Control Register.
+  \param [in]    control  Control Register value to set
+ */
+__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
+{
+  __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Control Register (non-secure)
+  \details Writes the given value to the non-secure Control Register when in secure state.
+  \param [in]    control  Control Register value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
+{
+  __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory");
+}
+#endif
+
+
+/**
+  \brief   Get IPSR Register
+  \details Returns the content of the IPSR Register.
+  \return               IPSR Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_IPSR(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
+  return(result);
+}
+
+
+/**
+  \brief   Get APSR Register
+  \details Returns the content of the APSR Register.
+  \return               APSR Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_APSR(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, apsr" : "=r" (result) );
+  return(result);
+}
+
+
+/**
+  \brief   Get xPSR Register
+  \details Returns the content of the xPSR Register.
+  \return               xPSR Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_xPSR(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
+  return(result);
+}
+
+
+/**
+  \brief   Get Process Stack Pointer
+  \details Returns the current value of the Process Stack Pointer (PSP).
+  \return               PSP Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_PSP(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, psp"  : "=r" (result) );
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Process Stack Pointer (non-secure)
+  \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state.
+  \return               PSP Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, psp_ns"  : "=r" (result) );
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Process Stack Pointer
+  \details Assigns the given value to the Process Stack Pointer (PSP).
+  \param [in]    topOfProcStack  Process Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack)
+{
+  __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : );
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Process Stack Pointer (non-secure)
+  \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state.
+  \param [in]    topOfProcStack  Process Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack)
+{
+  __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : );
+}
+#endif
+
+
+/**
+  \brief   Get Main Stack Pointer
+  \details Returns the current value of the Main Stack Pointer (MSP).
+  \return               MSP Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_MSP(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, msp" : "=r" (result) );
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Main Stack Pointer (non-secure)
+  \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state.
+  \return               MSP Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, msp_ns" : "=r" (result) );
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Main Stack Pointer
+  \details Assigns the given value to the Main Stack Pointer (MSP).
+  \param [in]    topOfMainStack  Main Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack)
+{
+  __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : );
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Main Stack Pointer (non-secure)
+  \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state.
+  \param [in]    topOfMainStack  Main Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack)
+{
+  __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : );
+}
+#endif
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Stack Pointer (non-secure)
+  \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state.
+  \return               SP Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, sp_ns" : "=r" (result) );
+  return(result);
+}
+
+
+/**
+  \brief   Set Stack Pointer (non-secure)
+  \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state.
+  \param [in]    topOfStack  Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack)
+{
+  __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : );
+}
+#endif
+
+
+/**
+  \brief   Get Priority Mask
+  \details Returns the current state of the priority mask bit from the Priority Mask Register.
+  \return               Priority Mask value
+ */
+__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory");
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Priority Mask (non-secure)
+  \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state.
+  \return               Priority Mask value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory");
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Priority Mask
+  \details Assigns the given value to the Priority Mask Register.
+  \param [in]    priMask  Priority Mask
+ */
+__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
+{
+  __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Priority Mask (non-secure)
+  \details Assigns the given value to the non-secure Priority Mask Register when in secure state.
+  \param [in]    priMask  Priority Mask
+ */
+__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask)
+{
+  __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory");
+}
+#endif
+
+
+#if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+     (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+     (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
+/**
+  \brief   Enable FIQ
+  \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
+           Can only be executed in Privileged modes.
+ */
+__STATIC_FORCEINLINE void __enable_fault_irq(void)
+{
+  __ASM volatile ("cpsie f" : : : "memory");
+}
+
+
+/**
+  \brief   Disable FIQ
+  \details Disables FIQ interrupts by setting the F-bit in the CPSR.
+           Can only be executed in Privileged modes.
+ */
+__STATIC_FORCEINLINE void __disable_fault_irq(void)
+{
+  __ASM volatile ("cpsid f" : : : "memory");
+}
+
+
+/**
+  \brief   Get Base Priority
+  \details Returns the current value of the Base Priority register.
+  \return               Base Priority register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, basepri" : "=r" (result) );
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Base Priority (non-secure)
+  \details Returns the current value of the non-secure Base Priority register when in secure state.
+  \return               Base Priority register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) );
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Base Priority
+  \details Assigns the given value to the Base Priority register.
+  \param [in]    basePri  Base Priority value to set
+ */
+__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri)
+{
+  __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory");
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Base Priority (non-secure)
+  \details Assigns the given value to the non-secure Base Priority register when in secure state.
+  \param [in]    basePri  Base Priority value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri)
+{
+  __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory");
+}
+#endif
+
+
+/**
+  \brief   Set Base Priority with condition
+  \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
+           or the new value increases the BASEPRI priority level.
+  \param [in]    basePri  Base Priority value to set
+ */
+__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri)
+{
+  __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory");
+}
+
+
+/**
+  \brief   Get Fault Mask
+  \details Returns the current value of the Fault Mask register.
+  \return               Fault Mask register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
+  return(result);
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Fault Mask (non-secure)
+  \details Returns the current value of the non-secure Fault Mask register when in secure state.
+  \return               Fault Mask register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void)
+{
+  uint32_t result;
+
+  __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) );
+  return(result);
+}
+#endif
+
+
+/**
+  \brief   Set Fault Mask
+  \details Assigns the given value to the Fault Mask register.
+  \param [in]    faultMask  Fault Mask value to set
+ */
+__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask)
+{
+  __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Set Fault Mask (non-secure)
+  \details Assigns the given value to the non-secure Fault Mask register when in secure state.
+  \param [in]    faultMask  Fault Mask value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask)
+{
+  __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory");
+}
+#endif
+
+#endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+           (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+           (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
+
+
+#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+     (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
+
+/**
+  \brief   Get Process Stack Pointer Limit
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence zero is returned always in non-secure
+  mode.
+  
+  \details Returns the current value of the Process Stack Pointer Limit (PSPLIM).
+  \return               PSPLIM Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
+    (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
+    // without main extensions, the non-secure PSPLIM is RAZ/WI
+  return 0U;
+#else
+  uint32_t result;
+  __ASM volatile ("MRS %0, psplim"  : "=r" (result) );
+  return result;
+#endif
+}
+
+#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3))
+/**
+  \brief   Get Process Stack Pointer Limit (non-secure)
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence zero is returned always.
+
+  \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
+  \return               PSPLIM Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
+  // without main extensions, the non-secure PSPLIM is RAZ/WI
+  return 0U;
+#else
+  uint32_t result;
+  __ASM volatile ("MRS %0, psplim_ns"  : "=r" (result) );
+  return result;
+#endif
+}
+#endif
+
+
+/**
+  \brief   Set Process Stack Pointer Limit
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence the write is silently ignored in non-secure
+  mode.
+  
+  \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM).
+  \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
+ */
+__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
+    (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
+  // without main extensions, the non-secure PSPLIM is RAZ/WI
+  (void)ProcStackPtrLimit;
+#else
+  __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit));
+#endif
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
+/**
+  \brief   Set Process Stack Pointer (non-secure)
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence the write is silently ignored.
+
+  \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state.
+  \param [in]    ProcStackPtrLimit  Process Stack Pointer Limit value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
+  // without main extensions, the non-secure PSPLIM is RAZ/WI
+  (void)ProcStackPtrLimit;
+#else
+  __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit));
+#endif
+}
+#endif
+
+
+/**
+  \brief   Get Main Stack Pointer Limit
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence zero is returned always in non-secure
+  mode.
+
+  \details Returns the current value of the Main Stack Pointer Limit (MSPLIM).
+  \return               MSPLIM Register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
+    (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
+  // without main extensions, the non-secure MSPLIM is RAZ/WI
+  return 0U;
+#else
+  uint32_t result;
+  __ASM volatile ("MRS %0, msplim" : "=r" (result) );
+  return result;
+#endif
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
+/**
+  \brief   Get Main Stack Pointer Limit (non-secure)
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence zero is returned always.
+
+  \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state.
+  \return               MSPLIM Register value
+ */
+__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
+  // without main extensions, the non-secure MSPLIM is RAZ/WI
+  return 0U;
+#else
+  uint32_t result;
+  __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) );
+  return result;
+#endif
+}
+#endif
+
+
+/**
+  \brief   Set Main Stack Pointer Limit
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence the write is silently ignored in non-secure
+  mode.
+
+  \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM).
+  \param [in]    MainStackPtrLimit  Main Stack Pointer Limit value to set
+ */
+__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
+    (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
+  // without main extensions, the non-secure MSPLIM is RAZ/WI
+  (void)MainStackPtrLimit;
+#else
+  __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit));
+#endif
+}
+
+
+#if (defined (__ARM_FEATURE_CMSE  ) && (__ARM_FEATURE_CMSE   == 3))
+/**
+  \brief   Set Main Stack Pointer Limit (non-secure)
+  Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure
+  Stack Pointer Limit register hence the write is silently ignored.
+
+  \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state.
+  \param [in]    MainStackPtrLimit  Main Stack Pointer value to set
+ */
+__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
+{
+#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)))
+  // without main extensions, the non-secure MSPLIM is RAZ/WI
+  (void)MainStackPtrLimit;
+#else
+  __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit));
+#endif
+}
+#endif
+
+#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+           (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
+
+
+/**
+  \brief   Get FPSCR
+  \details Returns the current value of the Floating Point Status/Control register.
+  \return               Floating Point Status/Control register value
+ */
+__STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
+{
+#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
+     (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
+#if __has_builtin(__builtin_arm_get_fpscr) 
+// Re-enable using built-in when GCC has been fixed
+// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
+  /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
+  return __builtin_arm_get_fpscr();
+#else
+  uint32_t result;
+
+  __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
+  return(result);
+#endif
+#else
+  return(0U);
+#endif
+}
+
+
+/**
+  \brief   Set FPSCR
+  \details Assigns the given value to the Floating Point Status/Control register.
+  \param [in]    fpscr  Floating Point Status/Control value to set
+ */
+__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
+     (defined (__FPU_USED   ) && (__FPU_USED    == 1U))     )
+#if __has_builtin(__builtin_arm_set_fpscr)
+// Re-enable using built-in when GCC has been fixed
+// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
+  /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
+  __builtin_arm_set_fpscr(fpscr);
+#else
+  __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
+#endif
+#else
+  (void)fpscr;
+#endif
+}
+
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+
+/* ##########################  Core Instruction Access  ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+  Access to dedicated instructions
+  @{
+*/
+
+/* Define macros for porting to both thumb1 and thumb2.
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
+ * Otherwise, use general registers, specified by constraint "r" */
+#if defined (__thumb__) && !defined (__thumb2__)
+#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
+#define __CMSIS_GCC_RW_REG(r) "+l" (r)
+#define __CMSIS_GCC_USE_REG(r) "l" (r)
+#else
+#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
+#define __CMSIS_GCC_RW_REG(r) "+r" (r)
+#define __CMSIS_GCC_USE_REG(r) "r" (r)
+#endif
+
+/**
+  \brief   No Operation
+  \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+#define __NOP()                             __ASM volatile ("nop")
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+#define __WFI()                             __ASM volatile ("wfi")
+
+
+/**
+  \brief   Wait For Event
+  \details Wait For Event is a hint instruction that permits the processor to enter
+           a low-power state until one of a number of events occurs.
+ */
+#define __WFE()                             __ASM volatile ("wfe")
+
+
+/**
+  \brief   Send Event
+  \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+#define __SEV()                             __ASM volatile ("sev")
+
+
+/**
+  \brief   Instruction Synchronization Barrier
+  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+           so that all instructions following the ISB are fetched from cache or memory,
+           after the instruction has been completed.
+ */
+__STATIC_FORCEINLINE void __ISB(void)
+{
+  __ASM volatile ("isb 0xF":::"memory");
+}
+
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+__STATIC_FORCEINLINE void __DSB(void)
+{
+  __ASM volatile ("dsb 0xF":::"memory");
+}
+
+
+/**
+  \brief   Data Memory Barrier
+  \details Ensures the apparent order of the explicit memory operations before
+           and after the instruction, without ensuring their completion.
+ */
+__STATIC_FORCEINLINE void __DMB(void)
+{
+  __ASM volatile ("dmb 0xF":::"memory");
+}
+
+
+/**
+  \brief   Reverse byte order (32 bit)
+  \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+  return __builtin_bswap32(value);
+#else
+  uint32_t result;
+
+  __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+  return result;
+#endif
+}
+
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
+{
+  uint32_t result;
+
+  __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+  return result;
+}
+
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+  return (int16_t)__builtin_bswap16(value);
+#else
+  int16_t result;
+
+  __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+  return result;
+#endif
+}
+
+
+/**
+  \brief   Rotate Right in unsigned value (32 bit)
+  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+  \param [in]    op1  Value to rotate
+  \param [in]    op2  Number of Bits to rotate
+  \return               Rotated value
+ */
+__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+  op2 %= 32U;
+  if (op2 == 0U)
+  {
+    return op1;
+  }
+  return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+  \brief   Breakpoint
+  \details Causes the processor to enter Debug state.
+           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+  \param [in]    value  is ignored by the processor.
+                 If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value)                       __ASM volatile ("bkpt "#value)
+
+
+/**
+  \brief   Reverse bit order of value
+  \details Reverses the bit order of the given value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
+{
+  uint32_t result;
+
+#if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+     (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+     (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
+   __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
+#else
+  uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
+
+  result = value;                      /* r will be reversed bits of v; first get LSB of v */
+  for (value >>= 1U; value != 0U; value >>= 1U)
+  {
+    result <<= 1U;
+    result |= value & 1U;
+    s--;
+  }
+  result <<= s;                        /* shift when v's highest bits are zero */
+#endif
+  return result;
+}
+
+
+/**
+  \brief   Count leading zeros
+  \details Counts the number of leading zeros of a data value.
+  \param [in]  value  Value to count the leading zeros
+  \return             number of leading zeros in value
+ */
+__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
+{
+  /* Even though __builtin_clz produces a CLZ instruction on ARM, formally
+     __builtin_clz(0) is undefined behaviour, so handle this case specially.
+     This guarantees ARM-compatible results if happening to compile on a non-ARM
+     target, and ensures the compiler doesn't decide to activate any
+     optimisations using the logic "value was passed to __builtin_clz, so it
+     is non-zero".
+     ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
+     single CLZ instruction.
+   */
+  if (value == 0U)
+  {
+    return 32U;
+  }
+  return __builtin_clz(value);
+}
+
+
+#if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+     (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+     (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+     (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
+/**
+  \brief   LDR Exclusive (8 bit)
+  \details Executes a exclusive LDR instruction for 8 bit value.
+  \param [in]    ptr  Pointer to data
+  \return             value of type uint8_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
+{
+    uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+   __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+       accepted by assembler. So has to use following less efficient pattern.
+    */
+   __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+   return ((uint8_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDR Exclusive (16 bit)
+  \details Executes a exclusive LDR instruction for 16 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint16_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
+{
+    uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+   __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+       accepted by assembler. So has to use following less efficient pattern.
+    */
+   __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+   return ((uint16_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDR Exclusive (32 bit)
+  \details Executes a exclusive LDR instruction for 32 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint32_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
+   return(result);
+}
+
+
+/**
+  \brief   STR Exclusive (8 bit)
+  \details Executes a exclusive STR instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
+{
+   uint32_t result;
+
+   __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+   return(result);
+}
+
+
+/**
+  \brief   STR Exclusive (16 bit)
+  \details Executes a exclusive STR instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
+{
+   uint32_t result;
+
+   __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+   return(result);
+}
+
+
+/**
+  \brief   STR Exclusive (32 bit)
+  \details Executes a exclusive STR instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
+{
+   uint32_t result;
+
+   __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
+   return(result);
+}
+
+
+/**
+  \brief   Remove the exclusive lock
+  \details Removes the exclusive lock which is created by LDREX.
+ */
+__STATIC_FORCEINLINE void __CLREX(void)
+{
+  __ASM volatile ("clrex" ::: "memory");
+}
+
+#endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+           (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+           (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+           (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
+
+
+#if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+     (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+     (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
+/**
+  \brief   Signed Saturate
+  \details Saturates a signed value.
+  \param [in]  ARG1  Value to be saturated
+  \param [in]  ARG2  Bit position to saturate to (1..32)
+  \return             Saturated value
+ */
+#define __SSAT(ARG1,ARG2) \
+__extension__ \
+({                          \
+  int32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+
+/**
+  \brief   Unsigned Saturate
+  \details Saturates an unsigned value.
+  \param [in]  ARG1  Value to be saturated
+  \param [in]  ARG2  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+#define __USAT(ARG1,ARG2) \
+ __extension__ \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+
+/**
+  \brief   Rotate Right with Extend (32 bit)
+  \details Moves each bit of a bitstring right by one bit.
+           The carry input is shifted in at the left end of the bitstring.
+  \param [in]    value  Value to rotate
+  \return               Rotated value
+ */
+__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value)
+{
+  uint32_t result;
+
+  __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+  return(result);
+}
+
+
+/**
+  \brief   LDRT Unprivileged (8 bit)
+  \details Executes a Unprivileged LDRT instruction for 8 bit value.
+  \param [in]    ptr  Pointer to data
+  \return             value of type uint8_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr)
+{
+    uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+   __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) );
+#else
+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+       accepted by assembler. So has to use following less efficient pattern.
+    */
+   __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
+#endif
+   return ((uint8_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (16 bit)
+  \details Executes a Unprivileged LDRT instruction for 16 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint16_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr)
+{
+    uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+   __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) );
+#else
+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+       accepted by assembler. So has to use following less efficient pattern.
+    */
+   __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" );
+#endif
+   return ((uint16_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (32 bit)
+  \details Executes a Unprivileged LDRT instruction for 32 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint32_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return(result);
+}
+
+
+/**
+  \brief   STRT Unprivileged (8 bit)
+  \details Executes a Unprivileged STRT instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr)
+{
+   __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+  \brief   STRT Unprivileged (16 bit)
+  \details Executes a Unprivileged STRT instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr)
+{
+   __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+  \brief   STRT Unprivileged (32 bit)
+  \details Executes a Unprivileged STRT instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr)
+{
+   __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) );
+}
+
+#else  /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+           (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+           (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
+
+/**
+  \brief   Signed Saturate
+  \details Saturates a signed value.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (1..32)
+  \return             Saturated value
+ */
+__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat)
+{
+  if ((sat >= 1U) && (sat <= 32U))
+  {
+    const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
+    const int32_t min = -1 - max ;
+    if (val > max)
+    {
+      return max;
+    }
+    else if (val < min)
+    {
+      return min;
+    }
+  }
+  return val;
+}
+
+/**
+  \brief   Unsigned Saturate
+  \details Saturates an unsigned value.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat)
+{
+  if (sat <= 31U)
+  {
+    const uint32_t max = ((1U << sat) - 1U);
+    if (val > (int32_t)max)
+    {
+      return max;
+    }
+    else if (val < 0)
+    {
+      return 0U;
+    }
+  }
+  return (uint32_t)val;
+}
+
+#endif /* ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
+           (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
+           (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    ) */
+
+
+#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+     (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    )
+/**
+  \brief   Load-Acquire (8 bit)
+  \details Executes a LDAB instruction for 8 bit value.
+  \param [in]    ptr  Pointer to data
+  \return             value of type uint8_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return ((uint8_t) result);
+}
+
+
+/**
+  \brief   Load-Acquire (16 bit)
+  \details Executes a LDAH instruction for 16 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint16_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return ((uint16_t) result);
+}
+
+
+/**
+  \brief   Load-Acquire (32 bit)
+  \details Executes a LDA instruction for 32 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint32_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return(result);
+}
+
+
+/**
+  \brief   Store-Release (8 bit)
+  \details Executes a STLB instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr)
+{
+   __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+  \brief   Store-Release (16 bit)
+  \details Executes a STLH instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr)
+{
+   __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+  \brief   Store-Release (32 bit)
+  \details Executes a STL instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+ */
+__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
+{
+   __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+  \brief   Load-Acquire Exclusive (8 bit)
+  \details Executes a LDAB exclusive instruction for 8 bit value.
+  \param [in]    ptr  Pointer to data
+  \return             value of type uint8_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return ((uint8_t) result);
+}
+
+
+/**
+  \brief   Load-Acquire Exclusive (16 bit)
+  \details Executes a LDAH exclusive instruction for 16 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint16_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return ((uint16_t) result);
+}
+
+
+/**
+  \brief   Load-Acquire Exclusive (32 bit)
+  \details Executes a LDA exclusive instruction for 32 bit values.
+  \param [in]    ptr  Pointer to data
+  \return        value of type uint32_t at (*ptr)
+ */
+__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr)
+{
+    uint32_t result;
+
+   __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) );
+   return(result);
+}
+
+
+/**
+  \brief   Store-Release Exclusive (8 bit)
+  \details Executes a STLB exclusive instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
+{
+   uint32_t result;
+
+   __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
+   return(result);
+}
+
+
+/**
+  \brief   Store-Release Exclusive (16 bit)
+  \details Executes a STLH exclusive instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
+{
+   uint32_t result;
+
+   __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
+   return(result);
+}
+
+
+/**
+  \brief   Store-Release Exclusive (32 bit)
+  \details Executes a STL exclusive instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    ptr  Pointer to location
+  \return          0  Function succeeded
+  \return          1  Function failed
+ */
+__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
+{
+   uint32_t result;
+
+   __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) );
+   return(result);
+}
+
+#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
+           (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1))    ) */
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+
+/* ###################  Compiler specific Intrinsics  ########################### */
+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
+  Access to dedicated SIMD instructions
+  @{
+*/
+
+#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
+
+__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+
+__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+
+__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+#define __SSAT16(ARG1,ARG2) \
+({                          \
+  int32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+#define __USAT16(ARG1,ARG2) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
+{
+  uint32_t result;
+
+  __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
+{
+  uint32_t result;
+
+  __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+  union llreg_u{
+    uint32_t w32[2];
+    uint64_t w64;
+  } llr;
+  llr.w64 = acc;
+
+#ifndef __ARMEB__   /* Little endian */
+  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else               /* Big endian */
+  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+  return(llr.w64);
+}
+
+__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+  union llreg_u{
+    uint32_t w32[2];
+    uint64_t w64;
+  } llr;
+  llr.w64 = acc;
+
+#ifndef __ARMEB__   /* Little endian */
+  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else               /* Big endian */
+  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+  return(llr.w64);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+  union llreg_u{
+    uint32_t w32[2];
+    uint64_t w64;
+  } llr;
+  llr.w64 = acc;
+
+#ifndef __ARMEB__   /* Little endian */
+  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else               /* Big endian */
+  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+  return(llr.w64);
+}
+
+__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+  union llreg_u{
+    uint32_t w32[2];
+    uint64_t w64;
+  } llr;
+  llr.w64 = acc;
+
+#ifndef __ARMEB__   /* Little endian */
+  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else               /* Big endian */
+  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+  return(llr.w64);
+}
+
+__STATIC_FORCEINLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
+{
+  int32_t result;
+
+  __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__STATIC_FORCEINLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
+{
+  int32_t result;
+
+  __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+#if 0
+#define __PKHBT(ARG1,ARG2,ARG3) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+  __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
+  __RES; \
+ })
+
+#define __PKHTB(ARG1,ARG2,ARG3) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+  if (ARG3 == 0) \
+    __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
+  else \
+    __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
+  __RES; \
+ })
+#endif
+
+#define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
+                                           ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
+
+#define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
+                                           ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
+
+__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
+{
+ int32_t result;
+
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#endif /* (__ARM_FEATURE_DSP == 1) */
+/*@} end of group CMSIS_SIMD_intrinsics */
+
+
+#pragma GCC diagnostic pop
+
+#endif /* __CMSIS_GCC_H */

+ 39 - 0
lib/sec_library/include/core/cmsis/cmsis_version.h

@@ -0,0 +1,39 @@
+/**************************************************************************//**
+ * @file     cmsis_version.h
+ * @brief    CMSIS Core(M) Version definitions
+ * @version  V5.0.2
+ * @date     19. April 2017
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if   defined ( __ICCARM__ )
+  #pragma system_include         /* treat file as system include file for MISRA check */
+#elif defined (__clang__)
+  #pragma clang system_header   /* treat file as system include file */
+#endif
+
+#ifndef __CMSIS_VERSION_H
+#define __CMSIS_VERSION_H
+
+/*  CMSIS Version definitions */
+#define __CM_CMSIS_VERSION_MAIN  ( 5U)                                      /*!< [31:16] CMSIS Core(M) main version */
+#define __CM_CMSIS_VERSION_SUB   ( 1U)                                      /*!< [15:0]  CMSIS Core(M) sub version */
+#define __CM_CMSIS_VERSION       ((__CM_CMSIS_VERSION_MAIN << 16U) | \
+                                   __CM_CMSIS_VERSION_SUB           )       /*!< CMSIS Core(M) version number */
+#endif

+ 949 - 0
lib/sec_library/include/core/cmsis/core_cm0.h

@@ -0,0 +1,949 @@
+/**************************************************************************//**
+ * @file     core_cm0.h
+ * @brief    CMSIS Cortex-M0 Core Peripheral Access Layer Header File
+ * @version  V5.0.6
+ * @date     13. March 2019
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if   defined ( __ICCARM__ )
+  #pragma system_include         /* treat file as system include file for MISRA check */
+#elif defined (__clang__)
+  #pragma clang system_header   /* treat file as system include file */
+#endif
+
+#ifndef __CORE_CM0_H_GENERIC
+#define __CORE_CM0_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/**
+  \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions
+  CMSIS violates the following MISRA-C:2004 rules:
+
+   \li Required Rule 8.5, object/function definition in header file.<br>
+     Function definitions in header files are used to allow 'inlining'.
+
+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
+     Unions are used for effective representation of core registers.
+
+   \li Advisory Rule 19.7, Function-like macro defined.<br>
+     Function-like macros are used to allow more efficient code.
+ */
+
+
+/*******************************************************************************
+ *                 CMSIS definitions
+ ******************************************************************************/
+/**
+  \ingroup Cortex_M0
+  @{
+ */
+
+#include "cmsis_version.h"
+ 
+/*  CMSIS CM0 definitions */
+#define __CM0_CMSIS_VERSION_MAIN  (__CM_CMSIS_VERSION_MAIN)              /*!< \deprecated [31:16] CMSIS HAL main version */
+#define __CM0_CMSIS_VERSION_SUB   (__CM_CMSIS_VERSION_SUB)               /*!< \deprecated [15:0]  CMSIS HAL sub version */
+#define __CM0_CMSIS_VERSION       ((__CM0_CMSIS_VERSION_MAIN << 16U) | \
+                                    __CM0_CMSIS_VERSION_SUB           )  /*!< \deprecated CMSIS HAL version number */
+
+#define __CORTEX_M                (0U)                                   /*!< Cortex-M Core */
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __CC_ARM )
+  #if defined __TARGET_FPU_VFP
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+  #if defined __ARM_FP
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined ( __GNUC__ )
+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined ( __ICCARM__ )
+  #if defined __ARMVFP__
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined ( __TI_ARM__ )
+  #if defined __TI_VFP_SUPPORT__
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined ( __TASKING__ )
+  #if defined __FPU_VFP__
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#elif defined ( __CSMC__ )
+  #if ( __CSMC__ & 0x400U)
+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+  #endif
+
+#endif
+
+#include "cmsis_compiler.h"               /* CMSIS compiler specific defines */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CM0_H_GENERIC */
+
+#ifndef __CMSIS_GENERIC
+
+#ifndef __CORE_CM0_H_DEPENDANT
+#define __CORE_CM0_H_DEPENDANT
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#if defined __CHECK_DEVICE_DEFINES
+  #ifndef __CM0_REV
+    #define __CM0_REV               0x0000U
+    #warning "__CM0_REV not defined in device header file; using default!"
+  #endif
+
+  #ifndef __NVIC_PRIO_BITS
+    #define __NVIC_PRIO_BITS          2U
+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
+  #endif
+
+  #ifndef __Vendor_SysTickConfig
+    #define __Vendor_SysTickConfig    0U
+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"
+  #endif
+#endif
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CMSIS_glob_defs CMSIS Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+  #define   __I     volatile             /*!< Defines 'read only' permissions */
+#else
+  #define   __I     volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O     volatile             /*!< Defines 'write only' permissions */
+#define     __IO    volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const      /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile            /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile            /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group Cortex_M0 */
+
+
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core NVIC Register
+  - Core SCB Register
+  - Core SysTick Register
+ ******************************************************************************/
+/**
+  \defgroup CMSIS_core_register Defines and Type Definitions
+  \brief Type definitions and defines for Cortex-M processor based devices.
+*/
+
+/**
+  \ingroup    CMSIS_core_register
+  \defgroup   CMSIS_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Union type to access the Application Program Status Register (APSR).
+ */
+typedef union
+{
+  struct
+  {
+    uint32_t _reserved0:28;              /*!< bit:  0..27  Reserved */
+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag */
+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag */
+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag */
+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} APSR_Type;
+
+/* APSR Register Definitions */
+#define APSR_N_Pos                         31U                                            /*!< APSR: N Position */
+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */
+
+#define APSR_Z_Pos                         30U                                            /*!< APSR: Z Position */
+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */
+
+#define APSR_C_Pos                         29U                                            /*!< APSR: C Position */
+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */
+
+#define APSR_V_Pos                         28U                                            /*!< APSR: V Position */
+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */
+
+
+/**
+  \brief  Union type to access the Interrupt Program Status Register (IPSR).
+ */
+typedef union
+{
+  struct
+  {
+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number */
+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} IPSR_Type;
+
+/* IPSR Register Definitions */
+#define IPSR_ISR_Pos                        0U                                            /*!< IPSR: ISR Position */
+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */
+
+
+/**
+  \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).
+ */
+typedef union
+{
+  struct
+  {
+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number */
+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved */
+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0) */
+    uint32_t _reserved1:3;               /*!< bit: 25..27  Reserved */
+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag */
+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag */
+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag */
+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} xPSR_Type;
+
+/* xPSR Register Definitions */
+#define xPSR_N_Pos                         31U                                            /*!< xPSR: N Position */
+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */
+
+#define xPSR_Z_Pos                         30U                                            /*!< xPSR: Z Position */
+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */
+
+#define xPSR_C_Pos                         29U                                            /*!< xPSR: C Position */
+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */
+
+#define xPSR_V_Pos                         28U                                            /*!< xPSR: V Position */
+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */
+
+#define xPSR_T_Pos                         24U                                            /*!< xPSR: T Position */
+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */
+
+#define xPSR_ISR_Pos                        0U                                            /*!< xPSR: ISR Position */
+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */
+
+
+/**
+  \brief  Union type to access the Control Registers (CONTROL).
+ */
+typedef union
+{
+  struct
+  {
+    uint32_t _reserved0:1;               /*!< bit:      0  Reserved */
+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used */
+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved */
+  } b;                                   /*!< Structure used for bit  access */
+  uint32_t w;                            /*!< Type      used for word access */
+} CONTROL_Type;
+
+/* CONTROL Register Definitions */
+#define CONTROL_SPSEL_Pos                   1U                                            /*!< CONTROL: SPSEL Position */
+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */
+
+/*@} end of group CMSIS_CORE */
+
+
+/**
+  \ingroup    CMSIS_core_register
+  \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)
+  \brief      Type definitions for the NVIC Registers
+  @{
+ */
+
+/**
+  \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).
+ */
+typedef struct
+{
+  __IOM uint32_t ISER[1U];               /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register */
+        uint32_t RESERVED0[31U];
+  __IOM uint32_t ICER[1U];               /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register */
+        uint32_t RESERVED1[31U];
+  __IOM uint32_t ISPR[1U];               /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register */
+        uint32_t RESERVED2[31U];
+  __IOM uint32_t ICPR[1U];               /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register */
+        uint32_t RESERVED3[31U];
+        uint32_t RESERVED4[64U];
+  __IOM uint32_t IP[8U];                 /*!< Offset: 0x300 (R/W)  Interrupt Priority Register */
+}  NVIC_Type;
+
+/*@} end of group CMSIS_NVIC */
+
+
+/**
+  \ingroup  CMSIS_core_register
+  \defgroup CMSIS_SCB     System Control Block (SCB)
+  \brief    Type definitions for the System Control Block Registers
+  @{
+ */
+
+/**
+  \brief  Structure type to access the System Control Block (SCB).
+ */
+typedef struct
+{
+  __IM  uint32_t CPUID;                  /*!< Offset: 0x000 (R/ )  CPUID Base Register */
+  __IOM uint32_t ICSR;                   /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register */
+        uint32_t RESERVED0;
+  __IOM uint32_t AIRCR;                  /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register */
+  __IOM uint32_t SCR;                    /*!< Offset: 0x010 (R/W)  System Control Register */
+  __IOM uint32_t CCR;                    /*!< Offset: 0x014 (R/W)  Configuration Control Register */
+        uint32_t RESERVED1;
+  __IOM uint32_t SHP[2U];                /*!< Offset: 0x01C (R/W)  System Handlers Priority Registers. [0] is RESERVED */
+  __IOM uint32_t SHCSR;                  /*!< Offset: 0x024 (R/W)  System Handler Control and State Register */
+} SCB_Type;
+
+/* SCB CPUID Register Definitions */
+#define SCB_CPUID_IMPLEMENTER_Pos          24U                                            /*!< SCB CPUID: IMPLEMENTER Position */
+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */
+
+#define SCB_CPUID_VARIANT_Pos              20U                                            /*!< SCB CPUID: VARIANT Position */
+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */
+
+#define SCB_CPUID_ARCHITECTURE_Pos         16U                                            /*!< SCB CPUID: ARCHITECTURE Position */
+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */
+
+#define SCB_CPUID_PARTNO_Pos                4U                                            /*!< SCB CPUID: PARTNO Position */
+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */
+
+#define SCB_CPUID_REVISION_Pos              0U                                            /*!< SCB CPUID: REVISION Position */
+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */
+
+/* SCB Interrupt Control State Register Definitions */
+#define SCB_ICSR_NMIPENDSET_Pos            31U                                            /*!< SCB ICSR: NMIPENDSET Position */
+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */
+
+#define SCB_ICSR_PENDSVSET_Pos             28U                                            /*!< SCB ICSR: PENDSVSET Position */
+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */
+
+#define SCB_ICSR_PENDSVCLR_Pos             27U                                            /*!< SCB ICSR: PENDSVCLR Position */
+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */
+
+#define SCB_ICSR_PENDSTSET_Pos             26U                                            /*!< SCB ICSR: PENDSTSET Position */
+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */
+
+#define SCB_ICSR_PENDSTCLR_Pos             25U                                            /*!< SCB ICSR: PENDSTCLR Position */
+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */
+
+#define SCB_ICSR_ISRPREEMPT_Pos            23U                                            /*!< SCB ICSR: ISRPREEMPT Position */
+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */
+
+#define SCB_ICSR_ISRPENDING_Pos            22U                                            /*!< SCB ICSR: ISRPENDING Position */
+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */
+
+#define SCB_ICSR_VECTPENDING_Pos           12U                                            /*!< SCB ICSR: VECTPENDING Position */
+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */
+
+#define SCB_ICSR_VECTACTIVE_Pos             0U                                            /*!< SCB ICSR: VECTACTIVE Position */
+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */
+
+/* SCB Application Interrupt and Reset Control Register Definitions */
+#define SCB_AIRCR_VECTKEY_Pos              16U                                            /*!< SCB AIRCR: VECTKEY Position */
+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */
+
+#define SCB_AIRCR_VECTKEYSTAT_Pos          16U                                            /*!< SCB AIRCR: VECTKEYSTAT Position */
+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */
+
+#define SCB_AIRCR_ENDIANESS_Pos            15U                                            /*!< SCB AIRCR: ENDIANESS Position */
+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */
+
+#define SCB_AIRCR_SYSRESETREQ_Pos           2U                                            /*!< SCB AIRCR: SYSRESETREQ Position */
+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */
+
+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1U                                            /*!< SCB AIRCR: VECTCLRACTIVE Position */
+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */
+
+/* SCB System Control Register Definitions */
+#define SCB_SCR_SEVONPEND_Pos               4U                                            /*!< SCB SCR: SEVONPEND Position */
+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */
+
+#define SCB_SCR_SLEEPDEEP_Pos               2U                                            /*!< SCB SCR: SLEEPDEEP Position */
+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */
+
+#define SCB_SCR_SLEEPONEXIT_Pos             1U                                            /*!< SCB SCR: SLEEPONEXIT Position */
+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */
+
+/* SCB Configuration Control Register Definitions */
+#define SCB_CCR_STKALIGN_Pos                9U                                            /*!< SCB CCR: STKALIGN Position */
+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */
+
+#define SCB_CCR_UNALIGN_TRP_Pos             3U                                            /*!< SCB CCR: UNALIGN_TRP Position */
+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */
+
+/* SCB System Handler Control and State Register Definitions */
+#define SCB_SHCSR_SVCALLPENDED_Pos         15U                                            /*!< SCB SHCSR: SVCALLPENDED Position */
+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */
+
+/*@} end of group CMSIS_SCB */
+
+
+/**
+  \ingroup  CMSIS_core_register
+  \defgroup CMSIS_SysTick     System Tick Timer (SysTick)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  Structure type to access the System Timer (SysTick).
+ */
+typedef struct
+{
+  __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */
+  __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register */
+  __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  SysTick Current Value Register */
+  __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  SysTick Calibration Register */
+} SysTick_Type;
+
+/* SysTick Control / Status Register Definitions */
+#define SysTick_CTRL_COUNTFLAG_Pos         16U                                            /*!< SysTick CTRL: COUNTFLAG Position */
+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */
+
+#define SysTick_CTRL_CLKSOURCE_Pos          2U                                            /*!< SysTick CTRL: CLKSOURCE Position */
+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */
+
+#define SysTick_CTRL_TICKINT_Pos            1U                                            /*!< SysTick CTRL: TICKINT Position */
+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */
+
+#define SysTick_CTRL_ENABLE_Pos             0U                                            /*!< SysTick CTRL: ENABLE Position */
+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */
+
+/* SysTick Reload Register Definitions */
+#define SysTick_LOAD_RELOAD_Pos             0U                                            /*!< SysTick LOAD: RELOAD Position */
+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */
+
+/* SysTick Current Register Definitions */
+#define SysTick_VAL_CURRENT_Pos             0U                                            /*!< SysTick VAL: CURRENT Position */
+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */
+
+/* SysTick Calibration Register Definitions */
+#define SysTick_CALIB_NOREF_Pos            31U                                            /*!< SysTick CALIB: NOREF Position */
+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */
+
+#define SysTick_CALIB_SKEW_Pos             30U                                            /*!< SysTick CALIB: SKEW Position */
+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */
+
+#define SysTick_CALIB_TENMS_Pos             0U                                            /*!< SysTick CALIB: TENMS Position */
+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */
+
+/*@} end of group CMSIS_SysTick */
+
+
+/**
+  \ingroup  CMSIS_core_register
+  \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)
+  \brief    Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
+            Therefore they are not covered by the Cortex-M0 header file.
+  @{
+ */
+/*@} end of group CMSIS_CoreDebug */
+
+
+/**
+  \ingroup    CMSIS_core_register
+  \defgroup   CMSIS_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field. This parameter is interpreted as an uint32_t type.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register. This parameter is interpreted as an uint32_t type.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CMSIS_core_bitfield */
+
+
+/**
+  \ingroup    CMSIS_core_register
+  \defgroup   CMSIS_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of Core Hardware */
+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address */
+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address */
+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address */
+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address */
+
+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct */
+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct */
+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct */
+
+
+/*@} */
+
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core NVIC Functions
+  - Core SysTick Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+
+
+/* ##########################   NVIC functions  #################################### */
+/**
+  \ingroup  CMSIS_Core_FunctionInterface
+  \defgroup CMSIS_Core_NVICFunctions NVIC Functions
+  \brief    Functions that manage interrupts and exceptions via the NVIC.
+  @{
+ */
+
+#ifdef CMSIS_NVIC_VIRTUAL
+  #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
+    #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
+  #endif
+  #include CMSIS_NVIC_VIRTUAL_HEADER_FILE
+#else
+  #define NVIC_SetPriorityGrouping    __NVIC_SetPriorityGrouping
+  #define NVIC_GetPriorityGrouping    __NVIC_GetPriorityGrouping
+  #define NVIC_EnableIRQ              __NVIC_EnableIRQ
+  #define NVIC_GetEnableIRQ           __NVIC_GetEnableIRQ
+  #define NVIC_DisableIRQ             __NVIC_DisableIRQ
+  #define NVIC_GetPendingIRQ          __NVIC_GetPendingIRQ
+  #define NVIC_SetPendingIRQ          __NVIC_SetPendingIRQ
+  #define NVIC_ClearPendingIRQ        __NVIC_ClearPendingIRQ
+/*#define NVIC_GetActive              __NVIC_GetActive             not available for Cortex-M0 */
+  #define NVIC_SetPriority            __NVIC_SetPriority
+  #define NVIC_GetPriority            __NVIC_GetPriority
+  #define NVIC_SystemReset            __NVIC_SystemReset
+#endif /* CMSIS_NVIC_VIRTUAL */
+
+#ifdef CMSIS_VECTAB_VIRTUAL
+  #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
+    #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
+  #endif
+  #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
+#else
+  #define NVIC_SetVector              __NVIC_SetVector
+  #define NVIC_GetVector              __NVIC_GetVector
+#endif  /* (CMSIS_VECTAB_VIRTUAL) */
+
+#define NVIC_USER_IRQ_OFFSET          16
+
+
+/* The following EXC_RETURN values are saved the LR on exception entry */
+#define EXC_RETURN_HANDLER         (0xFFFFFFF1UL)     /* return to Handler mode, uses MSP after return                               */
+#define EXC_RETURN_THREAD_MSP      (0xFFFFFFF9UL)     /* return to Thread mode, uses MSP after return                                */
+#define EXC_RETURN_THREAD_PSP      (0xFFFFFFFDUL)     /* return to Thread mode, uses PSP after return                                */
+
+
+/* Interrupt Priorities are WORD accessible only under Armv6-M                  */
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _SHP_IDX(IRQn)           ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >>    2UL)      )
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+#define __NVIC_SetPriorityGrouping(X) (void)(X)
+#define __NVIC_GetPriorityGrouping()  (0U)
+
+/**
+  \brief   Enable Interrupt
+  \details Enables a device specific interrupt in the NVIC interrupt controller.
+  \param [in]      IRQn  Device specific interrupt number.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
+  }
+}
+
+
+/**
+  \brief   Get Interrupt Enable status
+  \details Returns a device specific interrupt enable status from the NVIC interrupt controller.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt is not enabled.
+  \return             1  Interrupt is enabled.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+  }
+  else
+  {
+    return(0U);
+  }
+}
+
+
+/**
+  \brief   Disable Interrupt
+  \details Disables a device specific interrupt in the NVIC interrupt controller.
+  \param [in]      IRQn  Device specific interrupt number.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
+    __DSB();
+    __ISB();
+  }
+}
+
+
+/**
+  \brief   Get Pending Interrupt
+  \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+  }
+  else
+  {
+    return(0U);
+  }
+}
+
+
+/**
+  \brief   Set Pending Interrupt
+  \details Sets the pending bit of a device specific interrupt in the NVIC pending register.
+  \param [in]      IRQn  Device specific interrupt number.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
+  }
+}
+
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clears the pending bit of a device specific interrupt in the NVIC pending register.
+  \param [in]      IRQn  Device specific interrupt number.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
+  }
+}
+
+
+/**
+  \brief   Set Interrupt Priority
+  \details Sets the priority of a device specific interrupt or a processor exception.
+           The interrupt number can be positive to specify a device specific interrupt,
+           or negative to specify a processor exception.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+  \note    The priority cannot be set for every processor exception.
+ */
+__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
+{
+  if ((int32_t)(IRQn) >= 0)
+  {
+    NVIC->IP[_IP_IDX(IRQn)]  = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+       (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+  }
+  else
+  {
+    SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+       (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+  }
+}
+
+
+/**
+  \brief   Get Interrupt Priority
+  \details Reads the priority of a device specific interrupt or a processor exception.
+           The interrupt number can be positive to specify a device specific interrupt,
+           or negative to specify a processor exception.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
+{
+
+  if ((int32_t)(IRQn) >= 0)
+  {
+    return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
+  }
+  else
+  {
+    return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
+  }
+}
+
+
+/**
+  \brief   Encode Priority
+  \details Encodes the priority for an interrupt with the given priority group,
+           preemptive priority value, and subpriority value.
+           In case of a conflict between priority grouping and available
+           priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
+  \param [in]     PriorityGroup  Used priority group.
+  \param [in]   PreemptPriority  Preemptive priority value (starting from 0).
+  \param [in]       SubPriority  Subpriority value (starting from 0).
+  \return                        Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
+ */
+__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
+{
+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */
+  uint32_t PreemptPriorityBits;
+  uint32_t SubPriorityBits;
+
+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
+
+  return (
+           ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
+           ((SubPriority     & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL)))
+         );
+}
+
+
+/**
+  \brief   Decode Priority
+  \details Decodes an interrupt priority value with a given priority group to
+           preemptive priority value and subpriority value.
+           In case of a conflict between priority grouping and available
+           priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
+  \param [in]         Priority   Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
+  \param [in]     PriorityGroup  Used priority group.
+  \param [out] pPreemptPriority  Preemptive priority value (starting from 0).
+  \param [out]     pSubPriority  Subpriority value (starting from 0).
+ */
+__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
+{
+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */
+  uint32_t PreemptPriorityBits;
+  uint32_t SubPriorityBits;
+
+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
+
+  *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
+  *pSubPriority     = (Priority                   ) & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL);
+}
+
+
+
+/**
+  \brief   Set Interrupt Vector
+  \details Sets an interrupt vector in SRAM based interrupt vector table.
+           The interrupt number can be positive to specify a device specific interrupt,
+           or negative to specify a processor exception.
+           Address 0 must be mapped to SRAM.
+  \param [in]   IRQn      Interrupt number
+  \param [in]   vector    Address of interrupt handler function
+ */
+__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
+{
+  uint32_t vectors = 0x0U;
+  (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
+}
+
+
+/**
+  \brief   Get Interrupt Vector
+  \details Reads an interrupt vector from interrupt vector table.
+           The interrupt number can be positive to specify a device specific interrupt,
+           or negative to specify a processor exception.
+  \param [in]   IRQn      Interrupt number.
+  \return                 Address of interrupt handler function
+ */
+__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
+{
+  uint32_t vectors = 0x0U;
+  return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
+}
+
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
+{
+  __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                       buffered write are completed before reset */
+  SCB->AIRCR  = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
+                 SCB_AIRCR_SYSRESETREQ_Msk);
+  __DSB();                                                          /* Ensure completion of memory access */
+
+  for(;;)                                                           /* wait until reset */
+  {
+    __NOP();
+  }
+}
+
+/*@} end of CMSIS_Core_NVICFunctions */
+
+
+/* ##########################  FPU functions  #################################### */
+/**
+  \ingroup  CMSIS_Core_FunctionInterface
+  \defgroup CMSIS_Core_FpuFunctions FPU Functions
+  \brief    Function that provides FPU type.
+  @{
+ */
+
+/**
+  \brief   get FPU type
+  \details returns the FPU type
+  \returns
+   - \b  0: No FPU
+   - \b  1: Single precision FPU
+   - \b  2: Double + Single precision FPU
+ */
+__STATIC_INLINE uint32_t SCB_GetFPUType(void)
+{
+    return 0U;           /* No FPU */
+}
+
+
+/*@} end of CMSIS_Core_FpuFunctions */
+
+
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CMSIS_Core_FunctionInterface
+  \defgroup CMSIS_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
+
+/**
+  \brief   System Tick Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
+{
+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
+  {
+    return (1UL);                                                   /* Reload value impossible */
+  }
+
+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */
+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */
+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |
+                   SysTick_CTRL_TICKINT_Msk   |
+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */
+  return (0UL);                                                     /* Function successful */
+}
+
+#endif
+
+/*@} end of CMSIS_Core_SysTickFunctions */
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CM0_H_DEPENDANT */
+
+#endif /* __CMSIS_GENERIC */

+ 56 - 0
lib/sec_library/include/core/cmsis/csi_core.h

@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     csi_core.h
+ * @brief    Header File for csi_core
+ * @version  V1.0
+ * @date     12. june 2019
+ ******************************************************************************/
+#ifndef _CSI_CORE_H_
+#define _CSI_CORE_H_
+
+#include <stddef.h>
+#include <cmsis_gcc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __GNUC__
+__STATIC_INLINE size_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PRIMASK();
+    __disable_irq();
+    return (result);
+}
+
+__STATIC_INLINE void csi_irq_restore(size_t irq_state)
+{
+    __set_PRIMASK(irq_state);
+}
+#else
+static inline __asm size_t csi_irq_save(void)
+{
+    MRS     R0, PRIMASK
+    CPSID   I
+    BX      LR
+
+    return 0;
+}
+
+static inline __asm void csi_irq_restore(size_t irq_state)
+{
+    MSR     PRIMASK, R0
+    BX      LR
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CSI_CORE_H_ */

+ 55 - 0
lib/sec_library/include/core/cmsis/system_ARMCM0.h

@@ -0,0 +1,55 @@
+/**************************************************************************//**
+ * @file     system_ARMCM0.h
+ * @brief    CMSIS Device System Header File for
+ *           ARMCM0 Device
+ * @version  V5.3.1
+ * @date     09. July 2018
+ ******************************************************************************/
+/*
+ * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_ARMCM0_H
+#define SYSTEM_ARMCM0_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern uint32_t SystemCoreClock;     /*!< System Clock Frequency (Core Clock) */
+
+
+/**
+  \brief Setup the microcontroller system.
+
+   Initialize the System and update the SystemCoreClock variable.
+ */
+extern void SystemInit (void);
+
+
+/**
+  \brief  Update SystemCoreClock variable.
+
+   Updates the SystemCoreClock with current core Clock retrieved from cpu registers.
+ */
+extern void SystemCoreClockUpdate (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SYSTEM_ARMCM0_H */

+ 1103 - 0
lib/sec_library/include/core/core_801.h

@@ -0,0 +1,1103 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_801.h
+ * @brief    CSI 801 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_801_H_GENERIC
+#define __CORE_801_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck801
+  @{
+ */
+
+/*  CSI CK801 definitions */
+#define __CK801_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK801_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK801_CSI_VERSION       ((__CK801_CSI_VERSION_MAIN << 16U) | \
+                                   __CK801_CSI_VERSION_SUB           )        /*!< CSI HAL version number */
+
+#ifndef __CK80X
+#define __CK80X                   (0x01U)                                         /*!< CK80X Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK801_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK801_H_DEPENDANT
+#define __CORE_CK801_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK801_REV
+#define __CK801_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK801 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core VIC Register
+  - Core Cache Register
+  - Core CoreTIM Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 7;              /*!< bit: 24..30  Reserved */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 1;                      /*!< bit:      0  memory protection settings */
+        uint32_t _reserved0: 6;              /*!< bit:  1.. 6  Reserved */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved1: 2;              /*!< bit: 11..12  Reserved */
+        uint32_t BE_V2: 1;                   /*!< bit:     13  V2 Endian mode */
+        uint32_t _reserved2: 18;             /*!< bit: 14..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_BE_V2_Pos                     13U                                            /*!< CCR: BE_V2 Position */
+#define CCR_BE_V2_Msk                     (0x1UL << CCR_ISR_Pos)                         /*!< CCR: BE_V2 Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x1UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t X0: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t X1: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t X2: 1;                      /*!< bit:      2  Non executable attribute setting */
+        uint32_t X3: 1;                      /*!< bit:      3  Non executable attribute setting */
+        uint32_t X4: 1;                      /*!< bit:      4  Non executable attribute setting */
+        uint32_t X5: 1;                      /*!< bit:      5  Non executable attribute setting */
+        uint32_t X6: 1;                      /*!< bit:      6  Non executable attribute setting */
+        uint32_t X7: 1;                      /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X7_Pos                        7U                                             /*!< CAPR: X7 Position */
+#define CAPR_X7_Msk                        (0x1UL << CAPR_X7_Pos)                         /*!< CAPR: X7 Mask */
+
+#define CAPR_X6_Pos                        6U                                             /*!< CAPR: X6 Position */
+#define CAPR_X6_Msk                        (0x1UL << CAPR_X6_Pos)                         /*!< CAPR: X6 Mask */
+
+#define CAPR_X5_Pos                        5U                                             /*!< CAPR: X5 Position */
+#define CAPR_X5_Msk                        (0x1UL << CAPR_X5_Pos)                         /*!< CAPR: X5 Mask */
+
+#define CAPR_X4_Pos                        4U                                             /*!< CAPR: X4 Position */
+#define CAPR_X4_Msk                        (0x1UL << CAPR_X4_Pos)                         /*!< CAPR: X4 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t Size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 1;              /*!< bit:      6  Reserved */
+        uint32_t base_addr: 25;              /*!< bit: 7..31   The high position of the address of a protected area */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PACR_Type;
+
+/* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 7U                                              /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0x1FFFFFFUL << PACR_BASE_ADDR_Pos)             /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                              /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                       /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                              /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                           /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 3;                     /*!< bit:  0.. 2  Protected area index value */
+        uint32_t _reserved0: 29;             /*!< bit:  3..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x7UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for CPU Hint Register(CHR, CR<31,0>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 4;              /*!< bit:  0.. 3  Reserved */
+        uint32_t IAE: 1;                     /*!< bit:  4      Interrupt response acceleration enable */
+        uint32_t _reserved1: 11;             /*!< bit:  5..15  Reserved */
+        uint32_t SRST_VAL: 16;               /*!< bit: 16..31  Software reset decision value */
+    } b;
+    uint32_t w;
+} CHR_Type;
+
+/* CHR Register Definitions */
+#define CHR_IAE_Pos                        4U                                            /*!< CHR: IAE Position */
+#define CHR_IAE_Msk                        (0x1UL << CHR_IAE_Pos)                        /*!< CHR: IAE Mask */
+#define CHR_SRST_VAL_Pos                   16U                                           /*!< CHR: SRST_VAL Position */
+#define CHR_SRST_VAL_Mask                  (0xFFFFUL << CHR_SRST_VAL_Pos)                /*!< CHR: SRST_VAL Mask */
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_VIC Vectored Interrupt Controller (VIC)
+  \brief      Type definitions for the VIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint32_t ISER[1U];               /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    uint32_t RESERVED0[15U];
+    __IOM uint32_t IWER[1U];               /*!< Offset: 0x040 (R/W)  Interrupt wake-up set register */
+    uint32_t RESERVED1[15U];
+    __IOM uint32_t ICER[1U];               /*!< Offset: 0x080 (R/W)  Interrupt clear enable register */
+    uint32_t RESERVED2[15U];
+    __IOM uint32_t IWDR[1U];               /*!< Offset: 0x0c0 (R/W)  Interrupt wake-up clear register */
+    uint32_t RESERVED3[15U];
+    __IOM uint32_t ISPR[1U];               /*!< Offset: 0x100 (R/W)  Interrupt set pend register */
+    uint32_t RESERVED4[15U];
+    __IOM uint32_t ISSR[1U];               /*!< Offset: 0x140 (R/W)  Security interrupt set register */
+    uint32_t RESERVED5[15U];
+    __IOM uint32_t ICPR[1U];               /*!< Offset: 0x180 (R/W)  Interrupt clear pend register */
+    uint32_t RESERVED6[15U];
+    __IOM uint32_t ICSR[1U];               /*!< Offset: 0x1c0 (R/W)  Security interrupt clear register */
+    uint32_t RESERVED7[15U];
+    __IOM uint32_t IABR[1U];               /*!< Offset: 0x200 (R/W)  Interrupt answer stateregister */
+    uint32_t RESERVED8[63U];
+    __IOM uint32_t IPR[8U];                /*!< Offset: 0x300 (R/W)  Interrupt priority register */
+    uint32_t RESERVED9[504U];
+    __IM  uint32_t ISR;                    /*!< Offset: 0xB00 (R/ )  Interrupt state register */
+    __IOM uint32_t IPTR;                   /*!< Offset: 0xB04 (R/W)  Interrupt priority thershold register */
+} VIC_Type;
+
+/*@} end of group CSI_VIC */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos            2U                                            /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos              1U                                            /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos               0U                                            /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+/* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos               0U                                            /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+/* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos               0U                                            /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+/* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos               31U                                           /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos                30U                                           /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos               0U                                            /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                      /*!< Offset: 0x034 (R/W) */
+    __IM uint32_t EHSR;                      /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;                 /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;                 /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK801 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define VIC_BASE            (TCIP_BASE +  0x0100UL)                   /*!< VIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define VIC                 ((VIC_Type    *)     VIC_BASE   )         /*!< VIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+
+/*@} */
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    VIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    VIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    VIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+    VIC->ISSR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    VIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+    VIC->ICSR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return  Interrput status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->ISER[0U] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->ISPR[0U] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    VIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    VIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Check Interrupt is Wakeup or not
+  \details Read the wake up register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt is not set as wake up interrupt.
+  \return             1  Interrupt is set as wake up interrupt.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_wakeup_irq(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IWER[0U] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Wakeup Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+    VIC->IWER[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+    VIC->IWDR[0U] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));
+}
+
+/**
+  \brief   Get Interrupt is Active or not
+  \details Read the active register in the VIC and returns the active bit for the device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not active.
+  \return             1  Interrupt status is active.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_active(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IABR[0] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Threshold register
+  \details set the threshold register in the VIC.
+  \param [in]      VectThreshold  specific vecter threshold.
+  \param [in]      PrioThreshold  specific priority threshold.
+ */
+__STATIC_INLINE void csi_vic_set_threshold(uint32_t VectThreshold, uint32_t PrioThreshold)
+{
+    VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x3) << 6);
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    VIC->IPR[_IP_IDX(IRQn)]  = ((uint32_t)(VIC->IPR[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+                                 (((priority << (8U - __VIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IPR[_IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn)) & (uint32_t)0xFFUL) >> (8U - __VIC_PRIO_BITS)));
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 32) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 32) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    (void)IRQn;
+
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD  = (uint32_t)(ticks - 1UL);                           /* set reload register */
+    CORET->VAL   = 0UL;                                               /* Load the CORET Counter Value */
+    CORET->CTRL  = CORET_CTRL_CLKSOURCE_Msk |
+                   CORET_CTRL_TICKINT_Msk |
+                   CORET_CTRL_ENABLE_Msk;                           /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                     /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/**
+  \brief   clean CORE timer interrupt flag
+ */
+__STATIC_INLINE void csi_coret_clear_irq(void)
+{
+    CORET->CTRL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_128B     = 0x6,
+    REGION_SIZE_256B     = 0x7,
+    REGION_SIZE_512B     = 0x8,
+    REGION_SIZE_1KB      = 0x9,
+    REGION_SIZE_2KB      = 0xA,
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;    /* instruction fetched excution */
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;    /* security */
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0x3FFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_128B) {
+        pacr.w &= ~(((1u << (size -6)) - 1) << 7);
+    }
+
+    pacr.b.Size = size;
+
+    capr.w &= ~((0x1 << idx) | (0x3 << (idx * 2 + 8)) | (0x1 << (idx + 24)));
+    capr.w = (capr.w | (attr.nx << idx) | (attr.ap << (idx * 2 + 8)) | (attr.s << (idx + 24)));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void csi_system_reset(void)
+{
+    CHR_Type chr;
+
+    chr.w = __get_CHR();
+#ifdef __RESET_CONST
+    chr.b.SRST_VAL = __RESET_CONST;
+#else
+    chr.b.SRST_VAL = 0xABCD;
+#endif
+
+    __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                         buffered write are completed before reset */
+    __set_CHR(chr.w);
+
+    __DSB();                                                          /* Ensure completion of memory access */
+
+    for(;;)                                                           /* wait until reset */
+    {
+        __NOP();
+    }
+}
+
+/* ##################################    Old Interfaces  ############################################ */
+
+/* These interfaces are deprecated */
+#define NVIC_EnableIRQ(IRQn)                            csi_vic_enable_irq(IRQn)
+#define NVIC_DisableIRQ(IRQn)                           csi_vic_disable_irq(IRQn)
+#define NVIC_GetPendingIRQ(IRQn)                        csi_vic_get_pending_irq(IRQn)
+#define NVIC_SetPendingIRQ(IRQn)                        csi_vic_set_pending_irq(IRQn)
+#define NVIC_ClearPendingIRQ(IRQn)                      csi_vic_clear_pending_irq(IRQn)
+#define NVIC_GetWakeupIRQ(IRQn)                         csi_vic_get_wakeup_irq(IRQn)
+#define NVIC_SetWakeupIRQ(IRQn)                         csi_vic_set_wakeup_irq(IRQn)
+#define NVIC_ClearWakeupIRQ(IRQn)                       csi_vic_clear_wakeup_irq(IRQn)
+#define NVIC_GetActive(IRQn)                            csi_vic_get_active(IRQn)
+#define NVIC_SetThreshold(VectThreshold, PrioThreshold) csi_vic_set_threshold(VectThreshold, PrioThreshold)
+#define NVIC_SetPriority(IRQn, priority)                csi_vic_set_prio(IRQn, priority)
+#define NVIC_GetPriority(IRQn)                          csi_vic_get_prio(IRQn)
+#define NVIC_SystemReset()                              csi_system_reset()
+
+#define SysTick_Config(ticks)                           csi_coret_config(ticks, CORET_IRQn)
+#define CORET_Config(ticks)                             csi_coret_config(ticks, CORET_IRQn)
+
+/*@} end of Old Interfaces */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_801_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1562 - 0
lib/sec_library/include/core/core_802.h

@@ -0,0 +1,1562 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_802.h
+ * @brief    CSI 802 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_802_H_GENERIC
+#define __CORE_802_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck802
+  @{
+ */
+
+/*  CSI CK802 definitions */
+#define __CK802_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK802_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK802_CSI_VERSION       ((__CK802_CSI_VERSION_MAIN << 16U) | \
+                                   __CK802_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+#ifndef __CK80X
+#define __CK80X                (0x02U)                                         /*!< CK80X Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK802_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK802_H_DEPENDANT
+#define __CORE_CK802_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK802_REV
+#define __CK802_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK802 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core VIC Register
+  - Core Cache Register
+  - Core CoreTIM Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 3;              /*!< bit: 24..26  Reserved */
+        uint32_t SC: 1;                      /*!< bit:     27  Secure call bit */
+        uint32_t HS: 1;                      /*!< bit:     28  Hardware stacked bit */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pending bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_T_Pos                          30U                                            /*!< PSR: T Position */
+#define PSR_T_Msk                          (1UL << PSR_T_Pos)                             /*!< PSR: T Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 6  Reserved */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved1: 2;              /*!< bit: 11..12  Reserved */
+        uint32_t BE_V2: 1;                   /*!< bit:     13  V2 Endian mode */
+        uint32_t _reserved2: 18;             /*!< bit: 14..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_BE_V2_Pos                     13U                                            /*!< CCR: BE_V2 Position */
+#define CCR_BE_V2_Msk                     (0x1UL << CCR_BE_V2_Pos)                       /*!< CCR: BE_V2 Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t X0: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t X1: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t X2: 1;                      /*!< bit:      2  Non executable attribute setting */
+        uint32_t X3: 1;                      /*!< bit:      3  Non executable attribute setting */
+        uint32_t X4: 1;                      /*!< bit:      4  Non executable attribute setting */
+        uint32_t X5: 1;                      /*!< bit:      5  Non executable attribute setting */
+        uint32_t X6: 1;                      /*!< bit:      6  Non executable attribute setting */
+        uint32_t X7: 1;                      /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X7_Pos                        7U                                             /*!< CAPR: X7 Position */
+#define CAPR_X7_Msk                        (0x1UL << CAPR_X7_Pos)                         /*!< CAPR: X7 Mask */
+
+#define CAPR_X6_Pos                        6U                                             /*!< CAPR: X6 Position */
+#define CAPR_X6_Msk                        (0x1UL << CAPR_X6_Pos)                         /*!< CAPR: X6 Mask */
+
+#define CAPR_X5_Pos                        5U                                             /*!< CAPR: X5 Position */
+#define CAPR_X5_Msk                        (0x1UL << CAPR_X5_Pos)                         /*!< CAPR: X5 Mask */
+
+#define CAPR_X4_Pos                        4U                                             /*!< CAPR: X4 Position */
+#define CAPR_X4_Msk                        (0x1UL << CAPR_X4_Pos)                         /*!< CAPR: X4 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 1;              /*!< bit:      6  Reserved */
+        uint32_t base_addr: 25;              /*!< bit: 7..31   The high position of the address of a protected area */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PACR_Type;
+
+/* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 7U                                              /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0x1FFFFFFUL << PACR_BASE_ADDR_Pos)             /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                              /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                       /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                              /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                           /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 3;                     /*!< bit:  0.. 2  Protected area index value */
+        uint32_t _reserved0: 29;             /*!< bit:  3..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x7UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for CPU Hint Register(CHR, CR<31,0>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 4;              /*!< bit:  0.. 3  Reserved */
+        uint32_t IAE: 1;                     /*!< bit:  4      Interrupt response acceleration enable */
+        uint32_t _reserved1: 9;              /*!< bit:  5..13  Reserved */
+        uint32_t ISE: 1;                     /*!< bit: 14      Interrupt SP enable */
+        uint32_t HS_EXP: 1;                  /*!< bit: 15      Exception bit for TEE world switch */
+        uint32_t SRST_VAL: 16;               /*!< bit: 16..31  Software reset decision value */
+    } b;
+    uint32_t w;
+} CHR_Type;
+
+/* CHR Register Definitions */
+#define CHR_IAE_Pos                        4U                                            /*!< CHR: IAE Position */
+#define CHR_IAE_Msk                        (0x1UL << CHR_IAE_Pos)                        /*!< CHR: IAE Mask */
+#define CHR_ISE_Pos                        14U                                           /*!< CHR: ISE Position */
+#define CHR_ISE_Msk                        (0x1UL << CHR_ISE_Pos)                        /*!< CHR: ISE Mask */
+#define CHR_HS_EXP_Pos                     15U                                           /*!< CHR: HS_EXP Position */
+#define CHR_HS_EXP_Msk                     (0x1UL << CHR_HS_EXP_Pos)                     /*!< CHR: HS_EXP Mask */
+#define CHR_SRST_VAL_Pos                   16U                                           /*!< CHR: SRST_VAL Position */
+#define CHR_SRST_VAL_Mask                  (0xFFFFUL << CHR_SRST_VAL_Pos)                /*!< CHR: SRST_VAL Mask */
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_VIC Vectored Interrupt Controller (VIC)
+  \brief      Type definitions for the VIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint32_t ISER[4U];               /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    uint32_t RESERVED0[12U];
+    __IOM uint32_t IWER[4U];               /*!< Offset: 0x040 (R/W)  Interrupt wake-up set register */
+    uint32_t RESERVED1[12U];
+    __IOM uint32_t ICER[4U];               /*!< Offset: 0x080 (R/W)  Interrupt clear enable register */
+    uint32_t RESERVED2[12U];
+    __IOM uint32_t IWDR[4U];               /*!< Offset: 0x0c0 (R/W)  Interrupt wake-up clear register */
+    uint32_t RESERVED3[12U];
+    __IOM uint32_t ISPR[4U];               /*!< Offset: 0x100 (R/W)  Interrupt set pend register */
+    uint32_t RESERVED4[12U];
+    __IOM uint32_t ISSR[4U];               /*!< Offset: 0x140 (R/W)  Security interrupt set register */
+    uint32_t RESERVED5[12U];
+    __IOM uint32_t ICPR[4U];               /*!< Offset: 0x180 (R/W)  Interrupt clear pend register */
+    uint32_t RESERVED6[12U];
+    __IOM uint32_t ICSR[4U];               /*!< Offset: 0x1c0 (R/W)  Security interrupt clear register */
+    uint32_t RESERVED7[12U];
+    __IOM uint32_t IABR[4U];               /*!< Offset: 0x200 (R/W)  Interrupt answer stateregister */
+    uint32_t RESERVED8[60U];
+    __IOM uint32_t IPR[32U];               /*!< Offset: 0x300 (R/W)  Interrupt priority register */
+    uint32_t RESERVED9[480U];
+    __IM  uint32_t ISR;                    /*!< Offset: 0xB00 (R/ )  Interrupt state register */
+    __IOM uint32_t IPTR;                   /*!< Offset: 0xB04 (R/W)  Interrupt priority thershold register */
+    __IOM uint32_t TSPEND;                 /*!< Offset: 0xB08 (R/W)  Task pending register */
+    __IOM uint32_t TSABR;                  /*!< Offset: 0xB0c (R/W)  Tspend acknowledge register */
+    __IOM uint32_t TSPR;                   /*!< Offset: 0xB10 (R/W)  Tspend priority register */
+} VIC_Type;
+
+/*@} end of group CSI_VIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief On chip cache structure.
+ */
+typedef struct
+{
+    __IOM uint32_t CER;                    /*!< Offset: 0x000 (R/W)  Cache enable register */
+    __IOM uint32_t CIR;                    /*!< Offset: 0x004 (R/W)  Cache invalid register */
+    __IOM uint32_t CRCR[4U];               /*!< Offset: 0x008 (R/W)  Cache Configuration register */
+          uint32_t RSERVED0[1015U];
+    __IOM uint32_t CPFCR;                  /*!< Offset: 0xFF4 (R/W)  Cache performance analisis control register */
+    __IOM uint32_t CPFATR;                 /*!< Offset: 0xFF8 (R/W)  Cache access times register */
+    __IOM uint32_t CPFMTR;                 /*!< Offset: 0xFFC (R/W)  Cache missing times register */
+} CACHE_Type;
+
+/* CACHE Register Definitions */
+#define CACHE_CER_EN_Pos                       0U                                            /*!< CACHE CER: EN Position */
+#define CACHE_CER_EN_Msk                       (0x1UL << CACHE_CER_EN_Pos)                   /*!< CACHE CER: EN Mask */
+
+#define CACHE_CER_CFIG_Pos                     1U                                            /*!< CACHE CER: CFIG Position */
+#define CACHE_CER_CFIG_Msk                     (0x1UL << CACHE_CER_CFIG_Pos)                 /*!< CACHE CER: CFIG Mask */
+
+#define CACHE_CIR_INV_ALL_Pos                  0U                                            /*!< CACHE CIR: INV_ALL Position */
+#define CACHE_CIR_INV_ALL_Msk                  (0x1UL << CACHE_CIR_INV_ALL_Pos)              /*!< CACHE CIR: INV_ALL Mask */
+
+#define CACHE_CIR_INV_ONE_Pos                  1U                                            /*!< CACHE CIR: INV_ONE Position */
+#define CACHE_CIR_INV_ONE_Msk                  (0x1UL << CACHE_CIR_INV_ONE_Pos)              /*!< CACHE CIR: INV_ONE Mask */
+
+#define CACHE_CIR_CLR_ALL_Pos                  2U                                            /*!< CACHE CIR: CLR_ALL Position */
+#define CACHE_CIR_CLR_ALL_Msk                  (0x1UL << CACHE_CIR_CLR_ALL_Pos)              /*!< CACHE CIR: CLR_ALL Mask */
+
+#define CACHE_CIR_CLR_ONE_Pos                  3U                                            /*!< CACHE CIR: CLR_ONE Position */
+#define CACHE_CIR_CLR_ONE_Msk                  (0x1UL << CACHE_CIR_CLR_ONE_Pos)              /*!< CACHE CIR: CLR_ONE Mask */
+
+#define CACHE_CIR_INV_ADDR_Pos                 4U                                            /*!< CACHE CIR: INV_ADDR Position */
+#define CACHE_CIR_INV_ADDR_Msk                 (0xFFFFFFFUL << CACHE_CIR_INV_ADDR_Pos)       /*!< CACHE CIR: INV_ADDR Mask */
+
+#define CACHE_CRCR_EN_Pos                      0U                                            /*!< CACHE CRCR: EN Position */
+#define CACHE_CRCR_EN_Msk                      (0x1UL << CACHE_CRCR_EN_Pos)                  /*!< CACHE CRCR: EN Mask */
+
+#define CACHE_CRCR_SIZE_Pos                    1U                                            /*!< CACHE CRCR: Size Position */
+#define CACHE_CRCR_SIZE_Msk                    (0x1FUL << CACHE_CRCR_SIZE_Pos)               /*!< CACHE CRCR: Size Mask */
+
+#define CACHE_CRCR_BASE_ADDR_Pos               10U                                           /*!< CACHE CRCR: base addr Position */
+#define CACHE_CRCR_BASE_ADDR_Msk               (0x3FFFFFUL << CACHE_CRCR_BASE_ADDR_Pos)      /*!< CACHE CRCR: base addr Mask */
+
+#define CACHE_CPFCR_PFEN_Pos                   0U                                            /*!< CACHE CPFCR: PFEN Position */
+#define CACHE_CPFCR_PFEN_Msk                   (0x1UL << CACHE_CPFCR_PFEN_Pos)               /*!< CACHE CPFCR: PFEN Mask */
+
+#define CACHE_CPFCR_PFRST_Pos                  1U                                            /*!< CACHE CPFCR: PFRST Position */
+#define CACHE_CPFCR_PFRST_Msk                  (0x1UL << CACHE_CPFCR_PFRST_Pos)              /*!< CACHE CPFCR: PFRST Mask */
+
+#define CACHE_CRCR_4K                          0xB                                           /* 01011 */
+#define CACHE_CRCR_8K                          0xC                                           /* 01100 */
+#define CACHE_CRCR_16K                         0xD                                           /* 01101 */
+#define CACHE_CRCR_32K                         0xE                                           /* 01110 */
+#define CACHE_CRCR_64K                         0xF                                           /* 01111 */
+#define CACHE_CRCR_128K                        0x10                                          /* 10000 */
+#define CACHE_CRCR_256K                        0x11                                          /* 10001 */
+#define CACHE_CRCR_512K                        0x12                                          /* 10010 */
+#define CACHE_CRCR_1M                          0x13                                          /* 10011 */
+#define CACHE_CRCR_2M                          0x14                                          /* 10100 */
+#define CACHE_CRCR_4M                          0x15                                          /* 10101 */
+#define CACHE_CRCR_8M                          0x16                                          /* 10110 */
+#define CACHE_CRCR_16M                         0x17                                          /* 10111 */
+#define CACHE_CRCR_32M                         0x18                                          /* 11000 */
+#define CACHE_CRCR_64M                         0x19                                          /* 11001 */
+#define CACHE_CRCR_128M                        0x1A                                          /* 11010 */
+#define CACHE_CRCR_256M                        0x1B                                          /* 11011 */
+#define CACHE_CRCR_512M                        0x1C                                          /* 11100 */
+#define CACHE_CRCR_1G                          0x1D                                          /* 11101 */
+#define CACHE_CRCR_2G                          0x1E                                          /* 11110 */
+#define CACHE_CRCR_4G                          0x1F                                          /* 11111 */
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos           2U                                             /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos             1U                                             /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos              0U                                             /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+    /* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos              0U                                             /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+    /* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos              0U                                             /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+    /* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos              31U                                            /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos               30U                                            /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos              0U                                             /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    __IM  uint32_t EHSR;                   /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK802 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define VIC_BASE            (TCIP_BASE +  0x0100UL)                   /*!< VIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define VIC                 ((VIC_Type     *)     VIC_BASE    )       /*!< VIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IR_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ISSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ICSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISPR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Wakeup or not
+  \details Read the wake up register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt is not set as wake up interrupt.
+  \return             1  Interrupt is set as wake up interrupt.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IWER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWDR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Get Interrupt is Active or not
+  \details Read the active register in the VIC and returns the active bit for the device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not active.
+  \return             1  Interrupt status is active.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_active(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IABR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Threshold register
+  \details set the threshold register in the VIC.
+  \param [in]      VectThreshold  specific vector threshold.
+  \param [in]      PrioThreshold  specific priority threshold.
+ */
+__STATIC_INLINE void csi_vic_set_threshold(uint32_t VectThreshold, uint32_t PrioThreshold)
+{
+    VectThreshold &= 0x7FUL;
+
+    if (VectThreshold <= 31) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x3) << 6);
+    }
+
+    if (VectThreshold > 31 && VectThreshold < 96) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x7) << 5);
+    }
+
+    if (VectThreshold > 95) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0xF) << 4);
+    }
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    VIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(VIC->IPR[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+                                 (((priority << (8U - __VIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IPR[_IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn)) & (uint32_t)0xFFUL) >> (8U - __VIC_PRIO_BITS)));
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD = (uint32_t)(ticks - 1UL);                              /* set reload register */
+    CORET->VAL  = 0UL;                                                  /* Load the CORET Counter Value */
+    CORET->CTRL = CORET_CTRL_CLKSOURCE_Msk |
+                  CORET_CTRL_TICKINT_Msk |
+                  CORET_CTRL_ENABLE_Msk;                                /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                       /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/**
+  \brief   clean CORE timer interrupt flag
+ */
+__STATIC_INLINE void csi_coret_clear_irq(void)
+{
+    CORET->CTRL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER |=  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* enable all Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+  #if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER =  (uint32_t)(CACHE_CER_EN_Msk & (~CACHE_CER_CFIG_Msk));  /* enable all Cache */
+  #endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)CACHE_CER_EN_Msk;  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_INV_ALL, 1) | _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean and inv all Cache */
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1) | _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    CACHE->CRCR[index] =  ((baseAddr & CACHE_CRCR_BASE_ADDR_Msk) |
+                           (_VAL2FLD(CACHE_CRCR_SIZE, size)) |
+                           (_VAL2FLD(CACHE_CRCR_EN, enable)));
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    CACHE->CPFCR &=  ~(uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFRST_Msk;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return CACHE->CPFATR;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return CACHE->CPFMTR;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_128B     = 0x6,
+    REGION_SIZE_256B     = 0x7,
+    REGION_SIZE_512B     = 0x8,
+    REGION_SIZE_1KB      = 0x9,
+    REGION_SIZE_2KB      = 0xA,
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;               /* instruction fetched excution */
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;                /* security */
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    if (idx > 7) {
+        return;
+    }
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0x3FFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_128B) {
+        pacr.w &= ~(((1u << (size -6)) - 1) << 7);
+    }
+
+    pacr.b.size = size;
+
+    capr.w &= ~((0x1 << idx) | (0x3 << (idx * 2 + 8)) | (0x1 << (idx + 24)));
+    capr.w = (capr.w | (attr.nx << idx) | (attr.ap << (idx * 2 + 8)) | (attr.s << (idx + 24)));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void csi_system_reset(void)
+{
+    CHR_Type chr;
+
+    chr.w = __get_CHR();
+#ifdef __RESET_CONST
+    chr.b.SRST_VAL = __RESET_CONST;
+#else
+    chr.b.SRST_VAL = 0xABCD;
+#endif
+
+    __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                         buffered write are completed before reset */
+    __set_CHR(chr.w);
+
+    __DSB();                                                          /* Ensure completion of memory access */
+
+    for(;;)                                                           /* wait until reset */
+    {
+        __NOP();
+    }
+}
+
+/* ##################################    Old Interfaces  ############################################ */
+
+/* These interfaces are deprecated */
+#define NVIC_EnableIRQ(IRQn)                            csi_vic_enable_irq(IRQn)
+#define NVIC_DisableIRQ(IRQn)                           csi_vic_disable_irq(IRQn)
+#define NVIC_GetPendingIRQ(IRQn)                        csi_vic_get_pending_irq(IRQn)
+#define NVIC_SetPendingIRQ(IRQn)                        csi_vic_set_pending_irq(IRQn)
+#define NVIC_ClearPendingIRQ(IRQn)                      csi_vic_clear_pending_irq(IRQn)
+#define NVIC_GetWakeupIRQ(IRQn)                         csi_vic_get_wakeup_irq(IRQn)
+#define NVIC_SetWakeupIRQ(IRQn)                         csi_vic_set_wakeup_irq(IRQn)
+#define NVIC_ClearWakeupIRQ(IRQn)                       csi_vic_clear_wakeup_irq(IRQn)
+#define NVIC_GetActive(IRQn)                            csi_vic_get_active(IRQn)
+#define NVIC_SetThreshold(VectThreshold, PrioThreshold) csi_vic_set_threshold(VectThreshold, PrioThreshold)
+#define NVIC_SetPriority(IRQn, priority)                csi_vic_set_prio(IRQn, priority)
+#define NVIC_GetPriority(IRQn)                          csi_vic_get_prio(IRQn)
+#define NVIC_SystemReset()                              csi_system_reset()
+
+#define SysTick_Config(ticks)                           csi_coret_config(ticks, CORET_IRQn)
+#define CORET_Config(ticks)                             csi_coret_config(ticks, CORET_IRQn)
+
+#define HAD_SendChar(ch)                                csi_had_send_char(ch)
+#define HAD_ReceiveChar()                               csi_had_receive_char()
+#define HAD_CheckChar()                                 csi_had_check_char()
+
+/*@} end of Old Interfaces */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_802_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1592 - 0
lib/sec_library/include/core/core_803.h

@@ -0,0 +1,1592 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_803.h
+ * @brief    CSI 803 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_803_H_GENERIC
+#define __CORE_803_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck803
+  @{
+ */
+
+/*  CSI CK803 definitions */
+#define __CK803_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK803_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK803_CSI_VERSION       ((__CK803_CSI_VERSION_MAIN << 16U) | \
+                                   __CK803_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK80X
+#define __CK80X                (0x03U)                                         /*!< CK80X Core */
+#endif
+
+/* __FPU_USED indicates whether an FPU is used or not. */
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK803_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK803_H_DEPENDANT
+#define __CORE_CK803_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK803_REV
+#define __CK803_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK803 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core VIC Register
+  - Core Cache Register
+  - Core CoreTIM Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 1;              /*!< bit:     24  Reserved */
+        uint32_t SV: 1;                      /*!< bit:     25  Stacked valid */
+        uint32_t SD: 1;                      /*!< bit:     26  Stacked dirty */
+        uint32_t SC: 1;                      /*!< bit:     27  Secure call bit */
+        uint32_t HS: 1;                      /*!< bit:     28  Hardware stacked bit */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pending bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 1;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t _reserved0: 6;              /*!< bit:  2.. 6  Reserved */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved1: 2;              /*!< bit: 11..12  Reserved */
+        uint32_t BE_V2: 1;                   /*!< bit:     13  V2 Endian mode */
+        uint32_t _reserved2: 18;             /*!< bit: 14..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_BE_V2_Pos                     13U                                            /*!< CCR: BE_V2 Position */
+#define CCR_BE_V2_Msk                     (0x1UL << CCR_BE_V2_Pos)                       /*!< CCR: BE_V2 Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t X0: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t X1: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t X2: 1;                      /*!< bit:      2  Non executable attribute setting */
+        uint32_t X3: 1;                      /*!< bit:      3  Non executable attribute setting */
+        uint32_t X4: 1;                      /*!< bit:      4  Non executable attribute setting */
+        uint32_t X5: 1;                      /*!< bit:      5  Non executable attribute setting */
+        uint32_t X6: 1;                      /*!< bit:      6  Non executable attribute setting */
+        uint32_t X7: 1;                      /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X7_Pos                        7U                                             /*!< CAPR: X7 Position */
+#define CAPR_X7_Msk                        (0x1UL << CAPR_X7_Pos)                         /*!< CAPR: X7 Mask */
+
+#define CAPR_X6_Pos                        6U                                             /*!< CAPR: X6 Position */
+#define CAPR_X6_Msk                        (0x1UL << CAPR_X6_Pos)                         /*!< CAPR: X6 Mask */
+
+#define CAPR_X5_Pos                        5U                                             /*!< CAPR: X5 Position */
+#define CAPR_X5_Msk                        (0x1UL << CAPR_X5_Pos)                         /*!< CAPR: X5 Mask */
+
+#define CAPR_X4_Pos                        4U                                             /*!< CAPR: X4 Position */
+#define CAPR_X4_Msk                        (0x1UL << CAPR_X4_Pos)                         /*!< CAPR: X4 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 6;              /*!< bit:  6.. 11 Reserved */
+        uint32_t base_addr: 20;              /*!< bit:  10..31 The high position of the address of a protected area */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PACR_Type;
+
+/* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 12U                                            /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0xFFFFFUL << PACR_BASE_ADDR_Pos)              /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                             /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                      /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                             /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                          /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 3;                     /*!< bit:  0.. 2  Protected area index value */
+        uint32_t _reserved0: 29;             /*!< bit:  3..31  Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x7UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for CPU Hint Register(CHR, CR<31,0>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 1;              /*!< bit:  0      Reserved */
+        uint32_t BE: 1;                      /*!< bit:  1      System bus support burst transer */
+        uint32_t IPE: 1;                     /*!< bit:  2      Instruction prefetch function enable */
+        uint32_t RPE: 1;                     /*!< bit:  3      Function return instruction RTS will speculate execution */
+        uint32_t IAE: 1;                     /*!< bit:  4      Interrupt response acceleration enable */
+        uint32_t _reserved1: 9;              /*!< bit:  5..13  Reserved */
+        uint32_t ISE: 1;                     /*!< bit: 14      Interrupt SP enable */
+        uint32_t HS_EXP: 1;                  /*!< bit: 15      Exception bit for TEE world switch */
+        uint32_t SRST_VAL: 16;               /*!< bit: 16..31  Software reset decision value */
+    } b;
+    uint32_t w;
+} CHR_Type;
+
+/* CHR Register Definitions */
+#define CHR_BE_Pos                         1U                                            /*!< CHR: BE Position */
+#define CHR_BE_Msk                         (1UL << CHR_BE_Pos)                           /*!< CHR: BE Mask */
+#define CHR_IPE_Pos                        1U                                            /*!< CHR: IPE Position */
+#define CHR_IPE_Msk                        (1UL << CHR_IPE_Pos)                          /*!< CHR: IPE Mask */
+#define CHR_RPE_Pos                        1U                                            /*!< CHR: RPE Position */
+#define CHR_RPE_Msk                        (1UL << CHR_RPE_Pos)                          /*!< CHR: RPE Mask */
+#define CHR_IAE_Pos                        4U                                            /*!< CHR: IAE Position */
+#define CHR_IAE_Msk                        (0x1UL << CHR_IAE_Pos)                        /*!< CHR: IAE Mask */
+#define CHR_ISE_Pos                        14U                                           /*!< CHR: ISE Position */
+#define CHR_ISE_Msk                        (0x1UL << CHR_ISE_Pos)                        /*!< CHR: ISE Mask */
+#define CHR_HS_EXP_Pos                     15U                                           /*!< CHR: HS_EXP Position */
+#define CHR_HS_EXP_Msk                     (0x1UL << CHR_HS_EXP_Pos)                     /*!< CHR: HS_EXP Mask */
+#define CHR_SRST_VAL_Pos                   16U                                           /*!< CHR: SRST_VAL Position */
+#define CHR_SRST_VAL_Mask                  (0xFFFFUL << CHR_SRST_VAL_Pos)                /*!< CHR: SRST_VAL Mask */
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_VIC Vectored Interrupt Controller (VIC)
+  \brief      Type definitions for the VIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint32_t ISER[4U];               /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    uint32_t RESERVED0[12U];
+    __IOM uint32_t IWER[4U];               /*!< Offset: 0x040 (R/W)  Interrupt wake-up set register */
+    uint32_t RESERVED1[12U];
+    __IOM uint32_t ICER[4U];               /*!< Offset: 0x080 (R/W)  Interrupt clear enable register */
+    uint32_t RESERVED2[12U];
+    __IOM uint32_t IWDR[4U];               /*!< Offset: 0x0c0 (R/W)  Interrupt wake-up clear register */
+    uint32_t RESERVED3[12U];
+    __IOM uint32_t ISPR[4U];               /*!< Offset: 0x100 (R/W)  Interrupt set pend register */
+    uint32_t RESERVED4[12U];
+    __IOM uint32_t ISSR[4U];               /*!< Offset: 0x140 (R/W)  Security interrupt set register */
+    uint32_t RESERVED5[12U];
+    __IOM uint32_t ICPR[4U];               /*!< Offset: 0x180 (R/W)  Interrupt clear pend register */
+    uint32_t RESERVED6[12U];
+    __IOM uint32_t ICSR[4U];               /*!< Offset: 0x1c0 (R/W)  Security interrupt clear register */
+    uint32_t RESERVED7[12U];
+    __IOM uint32_t IABR[4U];               /*!< Offset: 0x200 (R/W)  Interrupt answer stateregister */
+    uint32_t RESERVED8[60U];
+    __IOM uint32_t IPR[32U];               /*!< Offset: 0x300 (R/W)  Interrupt priority register */
+    uint32_t RESERVED9[480U];
+    __IM  uint32_t ISR;                    /*!< Offset: 0xB00 (R/ )  Interrupt state register */
+    __IOM uint32_t IPTR;                   /*!< Offset: 0xB04 (R/W)  Interrupt priority thershold register */
+    __IOM uint32_t TSPEND;                 /*!< Offset: 0xB08 (R/W)  Task pending register */
+    __IOM uint32_t TSABR;                  /*!< Offset: 0xB0c (R/W)  Tspend acknowledge register */
+    __IOM uint32_t TSPR;                   /*!< Offset: 0xB10 (R/W)  Tspend priority register */
+} VIC_Type;
+
+/*@} end of group CSI_VIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief On chip cache structure.
+ */
+typedef struct
+{
+  __IOM uint32_t CER;                    /*!< Offset: 0x000 (R/W)  Cache enable register */
+  __IOM uint32_t CIR;                    /*!< Offset: 0x004 (R/W)  Cache invalid register */
+  __IOM uint32_t CRCR[4U];               /*!< Offset: 0x008 (R/W)  Cache Configuration register */
+        uint32_t RSERVED0[1015U];
+  __IOM uint32_t CPFCR;                  /*!< Offset: 0xFF4 (R/W)  Cache performance analisis control register */
+  __IOM uint32_t CPFATR;                 /*!< Offset: 0xFF8 (R/W)  Cache access times register */
+  __IOM uint32_t CPFMTR;                 /*!< Offset: 0xFFC (R/W)  Cache missing times register */
+} CACHE_Type;
+
+/* CACHE Register Definitions */
+#define CACHE_CER_EN_Pos                       0U                                            /*!< CACHE CER: EN Position */
+#define CACHE_CER_EN_Msk                       (0x1UL << CACHE_CER_EN_Pos)                   /*!< CACHE CER: EN Mask */
+
+#define CACHE_CER_CFIG_Pos                     1U                                            /*!< CACHE CER: CFIG Position */
+#define CACHE_CER_CFIG_Msk                     (0x1UL << CACHE_CER_CFIG_Pos)                 /*!< CACHE CER: CFIG Mask */
+
+#define CACHE_CER_WB_Pos                       2U                                            /*!< CACHE CER: WB Position */
+#define CACHE_CER_WB_Msk                       (0x1UL << CACHE_CER_WB_Pos)                   /*!< CACHE CER: WB Mask */
+
+#define CACHE_CER_DCW_Pos                      4U                                            /*!< CACHE CER: DCW Position */
+#define CACHE_CER_DCW_Msk                      (0x1UL << CACHE_CER_DCW_Pos)                  /*!< CACHE CER: DCW Mask */
+
+#define CACHE_CER_WA_Pos                       5U                                            /*!< CACHE CER: WA Position */
+#define CACHE_CER_WA_Msk                       (0x1UL << CACHE_CER_WA_Pos)                   /*!< CACHE CER: WA Mask */
+
+#define CACHE_CIR_INV_ALL_Pos                  0U                                            /*!< CACHE CIR: INV_ALL Position */
+#define CACHE_CIR_INV_ALL_Msk                  (0x1UL << CACHE_CIR_INV_ALL_Pos)              /*!< CACHE CIR: INV_ALL Mask */
+
+#define CACHE_CIR_INV_ONE_Pos                  1U                                            /*!< CACHE CIR: INV_ONE Position */
+#define CACHE_CIR_INV_ONE_Msk                  (0x1UL << CACHE_CIR_INV_ONE_Pos)              /*!< CACHE CIR: INV_ONE Mask */
+
+#define CACHE_CIR_CLR_ALL_Pos                  2U                                            /*!< CACHE CIR: CLR_ALL Position */
+#define CACHE_CIR_CLR_ALL_Msk                  (0x1UL << CACHE_CIR_CLR_ALL_Pos)              /*!< CACHE CIR: CLR_ALL Mask */
+
+#define CACHE_CIR_CLR_ONE_Pos                  3U                                            /*!< CACHE CIR: CLR_ONE Position */
+#define CACHE_CIR_CLR_ONE_Msk                  (0x1UL << CACHE_CIR_CLR_ONE_Pos)              /*!< CACHE CIR: CLR_ONE Mask */
+
+#define CACHE_CIR_INV_ADDR_Pos                 4U                                            /*!< CACHE CIR: INV_ADDR Position */
+#define CACHE_CIR_INV_ADDR_Msk                 (0xFFFFFFFUL << CACHE_CIR_INV_ADDR_Pos)       /*!< CACHE CIR: INV_ADDR Mask */
+
+#define CACHE_CRCR_EN_Pos                      0U                                            /*!< CACHE CRCR: EN Position */
+#define CACHE_CRCR_EN_Msk                      (0x1UL << CACHE_CRCR_EN_Pos)                  /*!< CACHE CRCR: EN Mask */
+
+#define CACHE_CRCR_SIZE_Pos                    1U                                            /*!< CACHE CRCR: Size Position */
+#define CACHE_CRCR_SIZE_Msk                    (0x1FUL << CACHE_CRCR_SIZE_Pos)               /*!< CACHE CRCR: Size Mask */
+
+#define CACHE_CRCR_BASE_ADDR_Pos               10U                                           /*!< CACHE CRCR: base addr Position */
+#define CACHE_CRCR_BASE_ADDR_Msk               (0x3FFFFFUL << CACHE_CRCR_BASE_ADDR_Pos)      /*!< CACHE CRCR: base addr Mask */
+
+#define CACHE_CPFCR_PFEN_Pos                   0U                                            /*!< CACHE CPFCR: PFEN Position */
+#define CACHE_CPFCR_PFEN_Msk                   (0x1UL << CACHE_CPFCR_PFEN_Pos)               /*!< CACHE CPFCR: PFEN Mask */
+
+#define CACHE_CPFCR_PFRST_Pos                  1U                                            /*!< CACHE CPFCR: PFRST Position */
+#define CACHE_CPFCR_PFRST_Msk                  (0x1UL << CACHE_CPFCR_PFRST_Pos)              /*!< CACHE CPFCR: PFRST Mask */
+
+#define CACHE_CRCR_4K                          0xB                                           /* 01011 */
+#define CACHE_CRCR_8K                          0xC                                           /* 01100 */
+#define CACHE_CRCR_16K                         0xD                                           /* 01101 */
+#define CACHE_CRCR_32K                         0xE                                           /* 01110 */
+#define CACHE_CRCR_64K                         0xF                                           /* 01111 */
+#define CACHE_CRCR_128K                        0x10                                          /* 10000 */
+#define CACHE_CRCR_256K                        0x11                                          /* 10001 */
+#define CACHE_CRCR_512K                        0x12                                          /* 10010 */
+#define CACHE_CRCR_1M                          0x13                                          /* 10011 */
+#define CACHE_CRCR_2M                          0x14                                          /* 10100 */
+#define CACHE_CRCR_4M                          0x15                                          /* 10101 */
+#define CACHE_CRCR_8M                          0x16                                          /* 10110 */
+#define CACHE_CRCR_16M                         0x17                                          /* 10111 */
+#define CACHE_CRCR_32M                         0x18                                          /* 11000 */
+#define CACHE_CRCR_64M                         0x19                                          /* 11001 */
+#define CACHE_CRCR_128M                        0x1A                                          /* 11010 */
+#define CACHE_CRCR_256M                        0x1B                                          /* 11011 */
+#define CACHE_CRCR_512M                        0x1C                                          /* 11100 */
+#define CACHE_CRCR_1G                          0x1D                                          /* 11101 */
+#define CACHE_CRCR_2G                          0x1E                                          /* 11110 */
+#define CACHE_CRCR_4G                          0x1F                                          /* 11111 */
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos           2U                                             /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos             1U                                             /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos              0U                                             /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+    /* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos              0U                                             /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+    /* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos              0U                                             /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+    /* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos              31U                                            /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos               30U                                            /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos              0U                                             /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    __IM uint32_t EHSR;                    /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK803 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define VIC_BASE            (TCIP_BASE +  0x0100UL)                   /*!< VIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define VIC                 ((VIC_Type    *)     VIC_BASE   )         /*!< VIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IR_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ISSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ICSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISPR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Wakeup or not
+  \details Read the wake up register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt is not set as wake up interrupt.
+  \return             1  Interrupt is set as wake up interrupt.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IWER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWDR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Get Interrupt is Active or not
+  \details Read the active register in the VIC and returns the active bit for the device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not active.
+  \return             1  Interrupt status is active.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_active(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IABR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Threshold register
+  \details set the threshold register in the VIC.
+  \param [in]      VectThreshold  specific vector threshold.
+  \param [in]      PrioThreshold  specific priority threshold.
+ */
+__STATIC_INLINE void csi_vic_set_threshold(uint32_t VectThreshold, uint32_t PrioThreshold)
+{
+    VectThreshold &= 0x7FUL;
+
+    if (VectThreshold <= 31) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x3) << 6);
+    }
+
+    if (VectThreshold > 31 && VectThreshold < 96) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x7) << 5);
+    }
+
+    if (VectThreshold > 95) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0xF) << 4);
+    }
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    VIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(VIC->IPR[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+                                 (((priority << (8U - __VIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IPR[_IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn)) & (uint32_t)0xFFUL) >> (8U - __VIC_PRIO_BITS)));
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD = (uint32_t)(ticks - 1UL);                              /* set reload register */
+    CORET->VAL  = 0UL;                                                  /* Load the CORET Counter Value */
+    CORET->CTRL = CORET_CTRL_CLKSOURCE_Msk |
+                   CORET_CTRL_TICKINT_Msk |
+                   CORET_CTRL_ENABLE_Msk;                               /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                       /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/**
+  \brief   clean CORE timer interrupt flag
+ */
+__STATIC_INLINE void csi_coret_clear_irq(void)
+{
+    CORET->CTRL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER |=  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* enable all Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER =  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_WB_Msk | CACHE_CER_DCW_Msk) & (~CACHE_CER_CFIG_Msk);  /* enable all Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)CACHE_CER_EN_Msk;  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_INV_ALL, 1) | _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean and inv all Cache */
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1) | _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    CACHE->CRCR[index] =  ((baseAddr & CACHE_CRCR_BASE_ADDR_Msk) |
+                           (_VAL2FLD(CACHE_CRCR_SIZE, size)) |
+                           (_VAL2FLD(CACHE_CRCR_EN, enable)));
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    CACHE->CPFCR &=  ~(uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFRST_Msk;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return CACHE->CPFATR;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return CACHE->CPFMTR;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;               /* instruction fetched excution */
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;                /* security */
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0xFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_4KB) {
+        pacr.w &= ~(((1u << (size -11)) - 1) << 12);
+    }
+
+    pacr.b.size = size;
+
+    capr.w &= ~((0x1 << idx) | (0x3 << (idx * 2 + 8)) | (0x1 << (idx + 24)));
+    capr.w = (capr.w | (attr.nx << idx) | (attr.ap << (idx * 2 + 8)) | (attr.s << (idx + 24)));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void csi_system_reset(void)
+{
+    CHR_Type chr;
+
+    chr.w = __get_CHR();
+#ifdef __RESET_CONST
+    chr.b.SRST_VAL = __RESET_CONST;
+#else
+    chr.b.SRST_VAL = 0xABCD;
+#endif
+
+    __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                         buffered write are completed before reset */
+    __set_CHR(chr.w);
+
+    __DSB();                                                          /* Ensure completion of memory access */
+
+    for(;;)                                                           /* wait until reset */
+    {
+        __NOP();
+    }
+}
+
+/* ##################################    Old Interfaces  ############################################ */
+
+/* These interfaces are deprecated */
+#define NVIC_EnableIRQ(IRQn)                               csi_vic_enable_irq(IRQn)
+#define NVIC_DisableIRQ(IRQn)                              csi_vic_disable_irq(IRQn)
+#define NVIC_GetPendingIRQ(IRQn)                           csi_vic_get_pending_irq(IRQn)
+#define NVIC_SetPendingIRQ(IRQn)                           csi_vic_set_pending_irq(IRQn)
+#define NVIC_ClearPendingIRQ(IRQn)                         csi_vic_clear_pending_irq(IRQn)
+#define NVIC_GetWakeupIRQ(IRQn)                            csi_vic_get_wakeup_irq(IRQn)
+#define NVIC_SetWakeupIRQ(IRQn)                            csi_vic_set_wakeup_irq(IRQn)
+#define NVIC_ClearWakeupIRQ(IRQn)                          csi_vic_clear_wakeup_irq(IRQn)
+#define NVIC_GetActive(IRQn)                               csi_vic_get_active(IRQn)
+#define NVIC_SetThreshold(VectThreshold, PrioThreshold)    csi_vic_set_threshold(VectThreshold, PrioThreshold)
+#define NVIC_SetPriority(IRQn, priority)                   csi_vic_set_prio(IRQn, priority)
+#define NVIC_GetPriority(IRQn)                             csi_vic_get_prio(IRQn)
+#define NVIC_SystemReset()                                 csi_system_reset()
+
+#define SysTick_Config(ticks)                              csi_coret_config(ticks, CORET_IRQn)
+#define CORET_Config(ticks)                                csi_coret_config(ticks, CORET_IRQn)
+
+#define SCB_EnableICache()                                 csi_icache_enable()
+#define SCB_DisableICache()                                csi_icache_disable()
+#define SCB_InvalidateICache()                             csi_icache_invalid()
+#define SCB_EnableDCache()                                 csi_dcache_enable()
+#define SCB_DisableDCache()                                csi_dcache_disable()
+#define SCB_InvalidateDCache()                             csi_dcache_invalid()
+#define SCB_CleanDCache()                                  csi_dcache_clean()
+#define SCB_CleanInvalidateDCache()                        csi_dcache_clean_invalid()
+#define SCB_InvalidateDCache_by_Addr(addr, dsize)          csi_dcache_invalid_range(addr, dsize)
+#define SCB_CleanDCache_by_Addr(addr, dsize)               csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateDCache_by_Addr(addr, dsize)     csi_dcache_clean_invalid_range(addr, dsize)
+#define SCB_Cacheable_Range(index, baseAddr, size, enable) csi_cache_set_range(index, baseAddr, size, enable)
+#define SCB_EnableCacheProfile()                           csi_cache_enable_profile()
+#define SCB_DisableCacheProfile()                          csi_cache_disable_profile()
+#define SCB_ResetCacheProfile()                            csi_cache_reset_profile()
+#define SCB_CacheAccessTime()                              csi_cache_get_access_time()
+#define SCB_CacheMissTime()                                csi_cache_get_miss_time()
+#define SCB_EnableCache()                                  csi_icache_enable();csi_dcache_enable()
+#define SCB_DisableCache()                                 csi_icache_disable();csi_dcache_disable()
+#define SCB_InvalidateCache()                              csi_icache_invalid();csi_dcache_invalid()
+#define SCB_CleanCache()                                   csi_dcache_clean()
+#define SCB_CleanInvalidateCache()                         csi_icache_invalid();csi_dcache_clean();csi_dcache_invalid()
+#define SCB_InvalidateCache_by_Addr(addr, dsize)           csi_dcache_invalid_range(addr, dsize);csi_icache_invalid()
+#define SCB_CleanCache_by_Addr(addr, dsize)                csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateCache_by_Addr(addr, dsize)      csi_dcache_clean_invalid_range(addr, dsize)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_803_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1596 - 0
lib/sec_library/include/core/core_804.h

@@ -0,0 +1,1596 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_804.h
+ * @brief    CSI 804 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_804_H_GENERIC
+#define __CORE_804_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck804
+  @{
+ */
+
+/*  CSI CK804 definitions */
+#define __CK804_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK804_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK804_CSI_VERSION       ((__CK804_CSI_VERSION_MAIN << 16U) | \
+                                   __CK804_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK80X
+#define __CK80X                (0x03U)                                         /*!< CK80X Core */
+#endif
+
+/* __FPU_USED indicates whether an FPU is used or not. */
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK804_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK804_H_DEPENDANT
+#define __CORE_CK804_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK804_REV
+#define __CK804_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK804 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core VIC Register
+  - Core Cache Register
+  - Core CoreTIM Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 1;              /*!< bit:     24  Reserved */
+        uint32_t SV: 1;                      /*!< bit:     25  Stacked valid */
+        uint32_t SD: 1;                      /*!< bit:     26  Stacked dirty */
+        uint32_t SC: 1;                      /*!< bit:     27  Secure call bit */
+        uint32_t HS: 1;                      /*!< bit:     28  Hardware stacked bit */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pending bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_T_Pos                          30U                                            /*!< PSR: T Position */
+#define PSR_T_Msk                          (1UL << PSR_T_Pos)                             /*!< PSR: T Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 1;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t _reserved0: 6;              /*!< bit:  2.. 6  Reserved */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved1: 2;              /*!< bit: 11..12  Reserved */
+        uint32_t BE_V2: 1;                   /*!< bit:     13  V2 Endian mode */
+        uint32_t _reserved2: 18;             /*!< bit: 14..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_BE_V2_Pos                     13U                                            /*!< CCR: BE_V2 Position */
+#define CCR_BE_V2_Msk                     (0x1UL << CCR_BE_V2_Pos)                       /*!< CCR: BE_V2 Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t X0: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t X1: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t X2: 1;                      /*!< bit:      2  Non executable attribute setting */
+        uint32_t X3: 1;                      /*!< bit:      3  Non executable attribute setting */
+        uint32_t X4: 1;                      /*!< bit:      4  Non executable attribute setting */
+        uint32_t X5: 1;                      /*!< bit:      5  Non executable attribute setting */
+        uint32_t X6: 1;                      /*!< bit:      6  Non executable attribute setting */
+        uint32_t X7: 1;                      /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X7_Pos                        7U                                             /*!< CAPR: X7 Position */
+#define CAPR_X7_Msk                        (0x1UL << CAPR_X7_Pos)                         /*!< CAPR: X7 Mask */
+
+#define CAPR_X6_Pos                        6U                                             /*!< CAPR: X6 Position */
+#define CAPR_X6_Msk                        (0x1UL << CAPR_X6_Pos)                         /*!< CAPR: X6 Mask */
+
+#define CAPR_X5_Pos                        5U                                             /*!< CAPR: X5 Position */
+#define CAPR_X5_Msk                        (0x1UL << CAPR_X5_Pos)                         /*!< CAPR: X5 Mask */
+
+#define CAPR_X4_Pos                        4U                                             /*!< CAPR: X4 Position */
+#define CAPR_X4_Msk                        (0x1UL << CAPR_X4_Pos)                         /*!< CAPR: X4 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 6;              /*!< bit:  6.. 11 Reserved */
+        uint32_t base_addr: 20;              /*!< bit:  10..31 The high position of the address of a protected area */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PACR_Type;
+
+/* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 12U                                            /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0xFFFFFUL << PACR_BASE_ADDR_Pos)              /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                             /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                      /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                             /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                          /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 3;                     /*!< bit:  0.. 2  Protected area index value */
+        uint32_t _reserved0: 29;             /*!< bit:  3..31  Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x7UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for CPU Hint Register(CHR, CR<31,0>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 1;              /*!< bit:  0      Reserved */
+        uint32_t BE: 1;                      /*!< bit:  1      System bus support burst transer */
+        uint32_t IPE: 1;                     /*!< bit:  2      Instruction prefetch function enable */
+        uint32_t RPE: 1;                     /*!< bit:  3      Function return instruction RTS will speculate execution */
+        uint32_t IAE: 1;                     /*!< bit:  4      Interrupt response acceleration enable */
+        uint32_t _reserved1: 9;              /*!< bit:  5..13  Reserved */
+        uint32_t ISE: 1;                     /*!< bit: 14      Interrupt SP enable */
+        uint32_t HS_EXP: 1;                  /*!< bit: 15      Exception bit for TEE world switch */
+        uint32_t SRST_VAL: 16;               /*!< bit: 16..31  Software reset decision value */
+    } b;
+    uint32_t w;
+} CHR_Type;
+
+/* CHR Register Definitions */
+#define CHR_BE_Pos                         1U                                            /*!< CHR: BE Position */
+#define CHR_BE_Msk                         (1UL << CHR_BE_Pos)                           /*!< CHR: BE Mask */
+#define CHR_IPE_Pos                        1U                                            /*!< CHR: IPE Position */
+#define CHR_IPE_Msk                        (1UL << CHR_IPE_Pos)                          /*!< CHR: IPE Mask */
+#define CHR_RPE_Pos                        1U                                            /*!< CHR: RPE Position */
+#define CHR_RPE_Msk                        (1UL << CHR_RPE_Pos)                          /*!< CHR: RPE Mask */
+#define CHR_IAE_Pos                        4U                                            /*!< CHR: IAE Position */
+#define CHR_IAE_Msk                        (0x1UL << CHR_IAE_Pos)                        /*!< CHR: IAE Mask */
+#define CHR_ISE_Pos                        14U                                           /*!< CHR: ISE Position */
+#define CHR_ISE_Msk                        (0x1UL << CHR_ISE_Pos)                        /*!< CHR: ISE Mask */
+#define CHR_HS_EXP_Pos                     15U                                           /*!< CHR: HS_EXP Position */
+#define CHR_HS_EXP_Msk                     (0x1UL << CHR_HS_EXP_Pos)                     /*!< CHR: HS_EXP Mask */
+#define CHR_SRST_VAL_Pos                   16U                                           /*!< CHR: SRST_VAL Position */
+#define CHR_SRST_VAL_Mask                  (0xFFFFUL << CHR_SRST_VAL_Pos)                /*!< CHR: SRST_VAL Mask */
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_VIC Vectored Interrupt Controller (VIC)
+  \brief      Type definitions for the VIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint32_t ISER[4U];               /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    uint32_t RESERVED0[12U];
+    __IOM uint32_t IWER[4U];               /*!< Offset: 0x040 (R/W)  Interrupt wake-up set register */
+    uint32_t RESERVED1[12U];
+    __IOM uint32_t ICER[4U];               /*!< Offset: 0x080 (R/W)  Interrupt clear enable register */
+    uint32_t RESERVED2[12U];
+    __IOM uint32_t IWDR[4U];               /*!< Offset: 0x0c0 (R/W)  Interrupt wake-up clear register */
+    uint32_t RESERVED3[12U];
+    __IOM uint32_t ISPR[4U];               /*!< Offset: 0x100 (R/W)  Interrupt set pend register */
+    uint32_t RESERVED4[12U];
+    __IOM uint32_t ISSR[4U];               /*!< Offset: 0x140 (R/W)  Security interrupt set register */
+    uint32_t RESERVED5[12U];
+    __IOM uint32_t ICPR[4U];               /*!< Offset: 0x180 (R/W)  Interrupt clear pend register */
+    uint32_t RESERVED6[12U];
+    __IOM uint32_t ICSR[4U];               /*!< Offset: 0x1c0 (R/W)  Security interrupt clear register */
+    uint32_t RESERVED7[12U];
+    __IOM uint32_t IABR[4U];               /*!< Offset: 0x200 (R/W)  Interrupt answer stateregister */
+    uint32_t RESERVED8[60U];
+    __IOM uint32_t IPR[32U];               /*!< Offset: 0x300 (R/W)  Interrupt priority register */
+    uint32_t RESERVED9[480U];
+    __IM  uint32_t ISR;                    /*!< Offset: 0xB00 (R/ )  Interrupt state register */
+    __IOM uint32_t IPTR;                   /*!< Offset: 0xB04 (R/W)  Interrupt priority thershold register */
+    __IOM uint32_t TSPEND;                 /*!< Offset: 0xB08 (R/W)  Task pending register */
+    __IOM uint32_t TSABR;                  /*!< Offset: 0xB0c (R/W)  Tspend acknowledge register */
+    __IOM uint32_t TSPR;                   /*!< Offset: 0xB10 (R/W)  Tspend priority register */
+} VIC_Type;
+
+/*@} end of group CSI_VIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief On chip cache structure.
+ */
+typedef struct
+{
+  __IOM uint32_t CER;                    /*!< Offset: 0x000 (R/W)  Cache enable register */
+  __IOM uint32_t CIR;                    /*!< Offset: 0x004 (R/W)  Cache invalid register */
+  __IOM uint32_t CRCR[4U];               /*!< Offset: 0x008 (R/W)  Cache Configuration register */
+        uint32_t RSERVED0[1015U];
+  __IOM uint32_t CPFCR;                  /*!< Offset: 0xFF4 (R/W)  Cache performance analisis control register */
+  __IOM uint32_t CPFATR;                 /*!< Offset: 0xFF8 (R/W)  Cache access times register */
+  __IOM uint32_t CPFMTR;                 /*!< Offset: 0xFFC (R/W)  Cache missing times register */
+} CACHE_Type;
+
+/* CACHE Register Definitions */
+#define CACHE_CER_EN_Pos                       0U                                            /*!< CACHE CER: EN Position */
+#define CACHE_CER_EN_Msk                       (0x1UL << CACHE_CER_EN_Pos)                   /*!< CACHE CER: EN Mask */
+
+#define CACHE_CER_CFIG_Pos                     1U                                            /*!< CACHE CER: CFIG Position */
+#define CACHE_CER_CFIG_Msk                     (0x1UL << CACHE_CER_CFIG_Pos)                 /*!< CACHE CER: CFIG Mask */
+
+#define CACHE_CER_WB_Pos                       2U                                            /*!< CACHE CER: WB Position */
+#define CACHE_CER_WB_Msk                       (0x1UL << CACHE_CER_WB_Pos)                   /*!< CACHE CER: WB Mask */
+
+#define CACHE_CER_DCW_Pos                      4U                                            /*!< CACHE CER: DCW Position */
+#define CACHE_CER_DCW_Msk                      (0x1UL << CACHE_CER_DCW_Pos)                  /*!< CACHE CER: DCW Mask */
+
+#define CACHE_CER_WA_Pos                       5U                                            /*!< CACHE CER: WA Position */
+#define CACHE_CER_WA_Msk                       (0x1UL << CACHE_CER_WA_Pos)                   /*!< CACHE CER: WA Mask */
+
+#define CACHE_CIR_INV_ALL_Pos                  0U                                            /*!< CACHE CIR: INV_ALL Position */
+#define CACHE_CIR_INV_ALL_Msk                  (0x1UL << CACHE_CIR_INV_ALL_Pos)              /*!< CACHE CIR: INV_ALL Mask */
+
+#define CACHE_CIR_INV_ONE_Pos                  1U                                            /*!< CACHE CIR: INV_ONE Position */
+#define CACHE_CIR_INV_ONE_Msk                  (0x1UL << CACHE_CIR_INV_ONE_Pos)              /*!< CACHE CIR: INV_ONE Mask */
+
+#define CACHE_CIR_CLR_ALL_Pos                  2U                                            /*!< CACHE CIR: CLR_ALL Position */
+#define CACHE_CIR_CLR_ALL_Msk                  (0x1UL << CACHE_CIR_CLR_ALL_Pos)              /*!< CACHE CIR: CLR_ALL Mask */
+
+#define CACHE_CIR_CLR_ONE_Pos                  3U                                            /*!< CACHE CIR: CLR_ONE Position */
+#define CACHE_CIR_CLR_ONE_Msk                  (0x1UL << CACHE_CIR_CLR_ONE_Pos)              /*!< CACHE CIR: CLR_ONE Mask */
+
+#define CACHE_CIR_INV_ADDR_Pos                 4U                                            /*!< CACHE CIR: INV_ADDR Position */
+#define CACHE_CIR_INV_ADDR_Msk                 (0xFFFFFFFUL << CACHE_CIR_INV_ADDR_Pos)       /*!< CACHE CIR: INV_ADDR Mask */
+
+#define CACHE_CRCR_EN_Pos                      0U                                            /*!< CACHE CRCR: EN Position */
+#define CACHE_CRCR_EN_Msk                      (0x1UL << CACHE_CRCR_EN_Pos)                  /*!< CACHE CRCR: EN Mask */
+
+#define CACHE_CRCR_SIZE_Pos                    1U                                            /*!< CACHE CRCR: Size Position */
+#define CACHE_CRCR_SIZE_Msk                    (0x1FUL << CACHE_CRCR_SIZE_Pos)               /*!< CACHE CRCR: Size Mask */
+
+#define CACHE_CRCR_BASE_ADDR_Pos               10U                                           /*!< CACHE CRCR: base addr Position */
+#define CACHE_CRCR_BASE_ADDR_Msk               (0x3FFFFFUL << CACHE_CRCR_BASE_ADDR_Pos)      /*!< CACHE CRCR: base addr Mask */
+
+#define CACHE_CPFCR_PFEN_Pos                   0U                                            /*!< CACHE CPFCR: PFEN Position */
+#define CACHE_CPFCR_PFEN_Msk                   (0x1UL << CACHE_CPFCR_PFEN_Pos)               /*!< CACHE CPFCR: PFEN Mask */
+
+#define CACHE_CPFCR_PFRST_Pos                  1U                                            /*!< CACHE CPFCR: PFRST Position */
+#define CACHE_CPFCR_PFRST_Msk                  (0x1UL << CACHE_CPFCR_PFRST_Pos)              /*!< CACHE CPFCR: PFRST Mask */
+
+#define CACHE_CRCR_4K                          0xB                                           /* 01011 */
+#define CACHE_CRCR_8K                          0xC                                           /* 01100 */
+#define CACHE_CRCR_16K                         0xD                                           /* 01101 */
+#define CACHE_CRCR_32K                         0xE                                           /* 01110 */
+#define CACHE_CRCR_64K                         0xF                                           /* 01111 */
+#define CACHE_CRCR_128K                        0x10                                          /* 10000 */
+#define CACHE_CRCR_256K                        0x11                                          /* 10001 */
+#define CACHE_CRCR_512K                        0x12                                          /* 10010 */
+#define CACHE_CRCR_1M                          0x13                                          /* 10011 */
+#define CACHE_CRCR_2M                          0x14                                          /* 10100 */
+#define CACHE_CRCR_4M                          0x15                                          /* 10101 */
+#define CACHE_CRCR_8M                          0x16                                          /* 10110 */
+#define CACHE_CRCR_16M                         0x17                                          /* 10111 */
+#define CACHE_CRCR_32M                         0x18                                          /* 11000 */
+#define CACHE_CRCR_64M                         0x19                                          /* 11001 */
+#define CACHE_CRCR_128M                        0x1A                                          /* 11010 */
+#define CACHE_CRCR_256M                        0x1B                                          /* 11011 */
+#define CACHE_CRCR_512M                        0x1C                                          /* 11100 */
+#define CACHE_CRCR_1G                          0x1D                                          /* 11101 */
+#define CACHE_CRCR_2G                          0x1E                                          /* 11110 */
+#define CACHE_CRCR_4G                          0x1F                                          /* 11111 */
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos           2U                                             /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos             1U                                             /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos              0U                                             /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+    /* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos              0U                                             /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+    /* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos              0U                                             /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+    /* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos              31U                                            /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos               30U                                            /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos              0U                                             /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    __IM uint32_t EHSR;                    /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK804 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define VIC_BASE            (TCIP_BASE +  0x0100UL)                   /*!< VIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define VIC                 ((VIC_Type    *)     VIC_BASE   )         /*!< VIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IR_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ISSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ICSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISPR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Wakeup or not
+  \details Read the wake up register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt is not set as wake up interrupt.
+  \return             1  Interrupt is set as wake up interrupt.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IWER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWDR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Get Interrupt is Active or not
+  \details Read the active register in the VIC and returns the active bit for the device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not active.
+  \return             1  Interrupt status is active.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_active(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IABR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Threshold register
+  \details set the threshold register in the VIC.
+  \param [in]      VectThreshold  specific vector threshold.
+  \param [in]      PrioThreshold  specific priority threshold.
+ */
+__STATIC_INLINE void csi_vic_set_threshold(uint32_t VectThreshold, uint32_t PrioThreshold)
+{
+    VectThreshold &= 0x7FUL;
+
+    if (VectThreshold <= 31) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x3) << 6);
+    }
+
+    if (VectThreshold > 31 && VectThreshold < 96) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x7) << 5);
+    }
+
+    if (VectThreshold > 95) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0xF) << 4);
+    }
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    VIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(VIC->IPR[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+                                 (((priority << (8U - __VIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IPR[_IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn)) & (uint32_t)0xFFUL) >> (8U - __VIC_PRIO_BITS)));
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD = (uint32_t)(ticks - 1UL);                              /* set reload register */
+    CORET->VAL  = 0UL;                                                  /* Load the CORET Counter Value */
+    CORET->CTRL = CORET_CTRL_CLKSOURCE_Msk |
+                   CORET_CTRL_TICKINT_Msk |
+                   CORET_CTRL_ENABLE_Msk;                               /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                       /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/**
+  \brief   clean CORE timer interrupt flag
+ */
+__STATIC_INLINE void csi_coret_clear_irq(void)
+{
+    CORET->CTRL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER |=  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* enable all Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER =  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_WB_Msk | CACHE_CER_DCW_Msk) & (~CACHE_CER_CFIG_Msk);  /* enable all Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)CACHE_CER_EN_Msk;  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean all Cache */
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_INV_ALL, 1) | _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean and inv all Cache */
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1) | _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    CACHE->CRCR[index] =  ((baseAddr & CACHE_CRCR_BASE_ADDR_Msk) |
+                           (_VAL2FLD(CACHE_CRCR_SIZE, size)) |
+                           (_VAL2FLD(CACHE_CRCR_EN, enable)));
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    CACHE->CPFCR &=  ~(uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFRST_Msk;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return CACHE->CPFATR;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return CACHE->CPFMTR;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;               /* instruction fetched excution */
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;                /* security */
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0xFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_4KB) {
+        pacr.w &= ~(((1u << (size -11)) - 1) << 12);
+    }
+
+    pacr.b.size = size;
+
+    capr.w &= ~((0x1 << idx) | (0x3 << (idx * 2 + 8)) | (0x1 << (idx + 24)));
+    capr.w = (capr.w | (attr.nx << idx) | (attr.ap << (idx * 2 + 8)) | (attr.s << (idx + 24)));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void csi_system_reset(void)
+{
+    CHR_Type chr;
+
+    chr.w = __get_CHR();
+#ifdef __RESET_CONST
+    chr.b.SRST_VAL = __RESET_CONST;
+#else
+    chr.b.SRST_VAL = 0xABCD;
+#endif
+
+    __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                         buffered write are completed before reset */
+    __set_CHR(chr.w);
+
+    __DSB();                                                          /* Ensure completion of memory access */
+
+    for(;;)                                                           /* wait until reset */
+    {
+        __NOP();
+    }
+}
+
+/* ##################################    Old Interfaces  ############################################ */
+
+/* These interfaces are deprecated */
+#define NVIC_EnableIRQ(IRQn)                               csi_vic_enable_irq(IRQn)
+#define NVIC_DisableIRQ(IRQn)                              csi_vic_disable_irq(IRQn)
+#define NVIC_GetPendingIRQ(IRQn)                           csi_vic_get_pending_irq(IRQn)
+#define NVIC_SetPendingIRQ(IRQn)                           csi_vic_set_pending_irq(IRQn)
+#define NVIC_ClearPendingIRQ(IRQn)                         csi_vic_clear_pending_irq(IRQn)
+#define NVIC_GetWakeupIRQ(IRQn)                            csi_vic_get_wakeup_irq(IRQn)
+#define NVIC_SetWakeupIRQ(IRQn)                            csi_vic_set_wakeup_irq(IRQn)
+#define NVIC_ClearWakeupIRQ(IRQn)                          csi_vic_clear_wakeup_irq(IRQn)
+#define NVIC_GetActive(IRQn)                               csi_vic_get_active(IRQn)
+#define NVIC_SetThreshold(VectThreshold, PrioThreshold)    csi_vic_set_threshold(VectThreshold, PrioThreshold)
+#define NVIC_SetPriority(IRQn, priority)                   csi_vic_set_prio(IRQn, priority)
+#define NVIC_GetPriority(IRQn)                             csi_vic_get_prio(IRQn)
+#define NVIC_SystemReset()                                 csi_system_reset()
+
+#define SysTick_Config(ticks)                              csi_coret_config(ticks, CORET_IRQn)
+#define CORET_Config(ticks)                                csi_coret_config(ticks, CORET_IRQn)
+
+#define SCB_EnableICache()                                 csi_icache_enable()
+#define SCB_DisableICache()                                csi_icache_disable()
+#define SCB_InvalidateICache()                             csi_icache_invalid()
+#define SCB_EnableDCache()                                 csi_dcache_enable()
+#define SCB_DisableDCache()                                csi_dcache_disable()
+#define SCB_InvalidateDCache()                             csi_dcache_invalid()
+#define SCB_CleanDCache()                                  csi_dcache_clean()
+#define SCB_CleanInvalidateDCache()                        csi_dcache_clean_invalid()
+#define SCB_InvalidateDCache_by_Addr(addr, dsize)          csi_dcache_invalid_range(addr, dsize)
+#define SCB_CleanDCache_by_Addr(addr, dsize)               csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateDCache_by_Addr(addr, dsize)     csi_dcache_clean_invalid_range(addr, dsize)
+#define SCB_Cacheable_Range(index, baseAddr, size, enable) csi_cache_set_range(index, baseAddr, size, enable)
+#define SCB_EnableCacheProfile()                           csi_cache_enable_profile()
+#define SCB_DisableCacheProfile()                          csi_cache_disable_profile()
+#define SCB_ResetCacheProfile()                            csi_cache_reset_profile()
+#define SCB_CacheAccessTime()                              csi_cache_get_access_time()
+#define SCB_CacheMissTime()                                csi_cache_get_miss_time()
+#define SCB_EnableCache()                                  csi_icache_enable();csi_dcache_enable()
+#define SCB_DisableCache()                                 csi_icache_disable();csi_dcache_disable()
+#define SCB_InvalidateCache()                              csi_icache_invalid();csi_dcache_invalid()
+#define SCB_CleanCache()                                   csi_dcache_clean()
+#define SCB_CleanInvalidateCache()                         csi_icache_invalid();csi_dcache_clean();csi_dcache_invalid()
+#define SCB_InvalidateCache_by_Addr(addr, dsize)           csi_dcache_invalid_range(addr, dsize);csi_icache_invalid()
+#define SCB_CleanCache_by_Addr(addr, dsize)                csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateCache_by_Addr(addr, dsize)      csi_dcache_clean_invalid_range(addr, dsize)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_804_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1591 - 0
lib/sec_library/include/core/core_805.h

@@ -0,0 +1,1591 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_805.h
+ * @brief    CSI 805 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_805_H_GENERIC
+#define __CORE_805_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck805
+  @{
+ */
+
+/*  CSI CK805 definitions */
+#define __CK805_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK805_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK805_CSI_VERSION       ((__CK805_CSI_VERSION_MAIN << 16U) | \
+                                   __CK805_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK80X
+#define __CK80X                (0x03U)                                         /*!< CK80X Core */
+#endif
+
+/* __FPU_USED indicates whether an FPU is used or not. */
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK805_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK805_H_DEPENDANT
+#define __CORE_CK805_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK805_REV
+#define __CK805_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK805 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core VIC Register
+  - Core Cache Register
+  - Core CoreTIM Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 1;              /*!< bit:     24  Reserved */
+        uint32_t SV: 1;                      /*!< bit:     25  Stacked valid */
+        uint32_t SD: 1;                      /*!< bit:     26  Stacked dirty */
+        uint32_t SC: 1;                      /*!< bit:     27  Secure call bit */
+        uint32_t HS: 1;                      /*!< bit:     28  Hardware stacked bit */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pending bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 1;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t _reserved0: 6;              /*!< bit:  2.. 6  Reserved */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved1: 2;              /*!< bit: 11..12  Reserved */
+        uint32_t BE_V2: 1;                   /*!< bit:     13  V2 Endian mode */
+        uint32_t _reserved2: 18;             /*!< bit: 14..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_BE_V2_Pos                     13U                                            /*!< CCR: BE_V2 Position */
+#define CCR_BE_V2_Msk                     (0x1UL << CCR_BE_V2_Pos)                       /*!< CCR: BE_V2 Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t X0: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t X1: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t X2: 1;                      /*!< bit:      2  Non executable attribute setting */
+        uint32_t X3: 1;                      /*!< bit:      3  Non executable attribute setting */
+        uint32_t X4: 1;                      /*!< bit:      4  Non executable attribute setting */
+        uint32_t X5: 1;                      /*!< bit:      5  Non executable attribute setting */
+        uint32_t X6: 1;                      /*!< bit:      6  Non executable attribute setting */
+        uint32_t X7: 1;                      /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X7_Pos                        7U                                             /*!< CAPR: X7 Position */
+#define CAPR_X7_Msk                        (0x1UL << CAPR_X7_Pos)                         /*!< CAPR: X7 Mask */
+
+#define CAPR_X6_Pos                        6U                                             /*!< CAPR: X6 Position */
+#define CAPR_X6_Msk                        (0x1UL << CAPR_X6_Pos)                         /*!< CAPR: X6 Mask */
+
+#define CAPR_X5_Pos                        5U                                             /*!< CAPR: X5 Position */
+#define CAPR_X5_Msk                        (0x1UL << CAPR_X5_Pos)                         /*!< CAPR: X5 Mask */
+
+#define CAPR_X4_Pos                        4U                                             /*!< CAPR: X4 Position */
+#define CAPR_X4_Msk                        (0x1UL << CAPR_X4_Pos)                         /*!< CAPR: X4 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 6;              /*!< bit:  6.. 11 Reserved */
+        uint32_t base_addr: 20;              /*!< bit:  10..31 The high position of the address of a protected area */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PACR_Type;
+
+/* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 12U                                            /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0xFFFFFUL << PACR_BASE_ADDR_Pos)              /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                             /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                      /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                             /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                          /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 3;                     /*!< bit:  0.. 2  Protected area index value */
+        uint32_t _reserved0: 29;             /*!< bit:  3..31  Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x7UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for CPU Hint Register(CHR, CR<31,0>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 1;              /*!< bit:  0      Reserved */
+        uint32_t BE: 1;                      /*!< bit:  1      System bus support burst transer */
+        uint32_t IPE: 1;                     /*!< bit:  2      Instruction prefetch function enable */
+        uint32_t RPE: 1;                     /*!< bit:  3      Function return instruction RTS will speculate execution */
+        uint32_t IAE: 1;                     /*!< bit:  4      Interrupt response acceleration enable */
+        uint32_t _reserved1: 9;              /*!< bit:  5..13  Reserved */
+        uint32_t ISE: 1;                     /*!< bit: 14      Interrupt SP enable */
+        uint32_t HS_EXP: 1;                  /*!< bit: 15      Exception bit for TEE world switch */
+        uint32_t SRST_VAL: 16;               /*!< bit: 16..31  Software reset decision value */
+    } b;
+    uint32_t w;
+} CHR_Type;
+
+/* CHR Register Definitions */
+#define CHR_BE_Pos                         1U                                            /*!< CHR: BE Position */
+#define CHR_BE_Msk                         (1UL << CHR_BE_Pos)                           /*!< CHR: BE Mask */
+#define CHR_IPE_Pos                        1U                                            /*!< CHR: IPE Position */
+#define CHR_IPE_Msk                        (1UL << CHR_IPE_Pos)                          /*!< CHR: IPE Mask */
+#define CHR_RPE_Pos                        1U                                            /*!< CHR: RPE Position */
+#define CHR_RPE_Msk                        (1UL << CHR_RPE_Pos)                          /*!< CHR: RPE Mask */
+#define CHR_IAE_Pos                        4U                                            /*!< CHR: IAE Position */
+#define CHR_IAE_Msk                        (0x1UL << CHR_IAE_Pos)                        /*!< CHR: IAE Mask */
+#define CHR_ISE_Pos                        14U                                           /*!< CHR: ISE Position */
+#define CHR_ISE_Msk                        (0x1UL << CHR_ISE_Pos)                        /*!< CHR: ISE Mask */
+#define CHR_HS_EXP_Pos                     15U                                           /*!< CHR: HS_EXP Position */
+#define CHR_HS_EXP_Msk                     (0x1UL << CHR_HS_EXP_Pos)                     /*!< CHR: HS_EXP Mask */
+#define CHR_SRST_VAL_Pos                   16U                                           /*!< CHR: SRST_VAL Position */
+#define CHR_SRST_VAL_Mask                  (0xFFFFUL << CHR_SRST_VAL_Pos)                /*!< CHR: SRST_VAL Mask */
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_VIC Vectored Interrupt Controller (VIC)
+  \brief      Type definitions for the VIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint32_t ISER[4U];               /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    uint32_t RESERVED0[12U];
+    __IOM uint32_t IWER[4U];               /*!< Offset: 0x040 (R/W)  Interrupt wake-up set register */
+    uint32_t RESERVED1[12U];
+    __IOM uint32_t ICER[4U];               /*!< Offset: 0x080 (R/W)  Interrupt clear enable register */
+    uint32_t RESERVED2[12U];
+    __IOM uint32_t IWDR[4U];               /*!< Offset: 0x0c0 (R/W)  Interrupt wake-up clear register */
+    uint32_t RESERVED3[12U];
+    __IOM uint32_t ISPR[4U];               /*!< Offset: 0x100 (R/W)  Interrupt set pend register */
+    uint32_t RESERVED4[12U];
+    __IOM uint32_t ISSR[4U];               /*!< Offset: 0x140 (R/W)  Security interrupt set register */
+    uint32_t RESERVED5[12U];
+    __IOM uint32_t ICPR[4U];               /*!< Offset: 0x180 (R/W)  Interrupt clear pend register */
+    uint32_t RESERVED6[12U];
+    __IOM uint32_t ICSR[4U];               /*!< Offset: 0x1c0 (R/W)  Security interrupt clear register */
+    uint32_t RESERVED7[12U];
+    __IOM uint32_t IABR[4U];               /*!< Offset: 0x200 (R/W)  Interrupt answer stateregister */
+    uint32_t RESERVED8[60U];
+    __IOM uint32_t IPR[32U];               /*!< Offset: 0x300 (R/W)  Interrupt priority register */
+    uint32_t RESERVED9[480U];
+    __IM  uint32_t ISR;                    /*!< Offset: 0xB00 (R/ )  Interrupt state register */
+    __IOM uint32_t IPTR;                   /*!< Offset: 0xB04 (R/W)  Interrupt priority thershold register */
+    __IOM uint32_t TSPEND;                 /*!< Offset: 0xB08 (R/W)  Task pending register */
+    __IOM uint32_t TSABR;                  /*!< Offset: 0xB0c (R/W)  Tspend acknowledge register */
+    __IOM uint32_t TSPR;                   /*!< Offset: 0xB10 (R/W)  Tspend priority register */
+} VIC_Type;
+
+/*@} end of group CSI_VIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief On chip cache structure.
+ */
+typedef struct
+{
+  __IOM uint32_t CER;                    /*!< Offset: 0x000 (R/W)  Cache enable register */
+  __IOM uint32_t CIR;                    /*!< Offset: 0x004 (R/W)  Cache invalid register */
+  __IOM uint32_t CRCR[4U];               /*!< Offset: 0x008 (R/W)  Cache Configuration register */
+        uint32_t RSERVED0[1015U];
+  __IOM uint32_t CPFCR;                  /*!< Offset: 0xFF4 (R/W)  Cache performance analisis control register */
+  __IOM uint32_t CPFATR;                 /*!< Offset: 0xFF8 (R/W)  Cache access times register */
+  __IOM uint32_t CPFMTR;                 /*!< Offset: 0xFFC (R/W)  Cache missing times register */
+} CACHE_Type;
+
+/* CACHE Register Definitions */
+#define CACHE_CER_DCEN_Pos                     0U                                            /*!< CACHE CER: DCEN Position */
+#define CACHE_CER_DCEN_Msk                     (0x1UL << CACHE_CER_DCEN_Pos)                 /*!< CACHE CER: DCEN Mask */
+
+#define CACHE_CER_ICEN_Pos                     1U                                            /*!< CACHE CER: ICEN Position */
+#define CACHE_CER_ICEN_Msk                     (0x1UL << CACHE_CER_ICEN_Pos)                 /*!< CACHE CER: ICEN Mask */
+
+#define CACHE_CER_WB_Pos                       2U                                            /*!< CACHE CER: WB Position */
+#define CACHE_CER_WB_Msk                       (0x1UL << CACHE_CER_WB_Pos)                   /*!< CACHE CER: WB Mask */
+
+#define CACHE_CER_DCW_Pos                      4U                                            /*!< CACHE CER: DCW Position */
+#define CACHE_CER_DCW_Msk                      (0x1UL << CACHE_CER_DCW_Pos)                  /*!< CACHE CER: DCW Mask */
+
+#define CACHE_CER_WA_Pos                       5U                                            /*!< CACHE CER: WA Position */
+#define CACHE_CER_WA_Msk                       (0x1UL << CACHE_CER_WA_Pos)                   /*!< CACHE CER: WA Mask */
+
+#define CACHE_CIR_D_INV_Pos                    0U                                            /*!< CACHE CIR: D_INV Position */
+#define CACHE_CIR_D_INV_Msk                    (0x1UL << CACHE_CIR_D_INV_Pos)                /*!< CACHE CIR: D_INV Mask */
+
+#define CACHE_CIR_I_INV_Pos                    1U                                            /*!< CACHE CIR: I_INV Position */
+#define CACHE_CIR_I_INV_Msk                    (0x1UL << CACHE_CIR_I_INV_Pos)                /*!< CACHE CIR: I_INV Mask */
+
+#define CACHE_CIR_D_CLR_Pos                    2U                                            /*!< CACHE CIR: D_CLR Position */
+#define CACHE_CIR_D_CLR_Msk                    (0x1UL << CACHE_CIR_D_CLR_Pos)                /*!< CACHE CIR: D_CLR Mask */
+
+#define CACHE_CIR_LINE_MODE_Pos                3U                                            /*!< CACHE CIR: LINE_MODE Position */
+#define CACHE_CIR_LINE_MODE_Msk                (0x1UL << CACHE_CIR_LINE_MODE_Pos)            /*!< CACHE CIR: LINE_MODE Mask */
+
+#define CACHE_CIR_OP_ADDR_Pos                  4U                                            /*!< CACHE CIR: OP_ADDR Position */
+#define CACHE_CIR_OP_ADDR_Msk                  (0xFFFFFFFUL << CACHE_CIR_OP_ADDR_Pos)        /*!< CACHE CIR: OP_ADDR Mask */
+
+#define CACHE_CRCR_EN_Pos                      0U                                            /*!< CACHE CRCR: EN Position */
+#define CACHE_CRCR_EN_Msk                      (0x1UL << CACHE_CRCR_EN_Pos)                  /*!< CACHE CRCR: EN Mask */
+
+#define CACHE_CRCR_SIZE_Pos                    1U                                            /*!< CACHE CRCR: Size Position */
+#define CACHE_CRCR_SIZE_Msk                    (0x1FUL << CACHE_CRCR_SIZE_Pos)               /*!< CACHE CRCR: Size Mask */
+
+#define CACHE_CRCR_BASE_ADDR_Pos               10U                                           /*!< CACHE CRCR: base addr Position */
+#define CACHE_CRCR_BASE_ADDR_Msk               (0x3FFFFFUL << CACHE_CRCR_BASE_ADDR_Pos)      /*!< CACHE CRCR: base addr Mask */
+
+#define CACHE_CPFCR_PFEN_Pos                   0U                                            /*!< CACHE CPFCR: PFEN Position */
+#define CACHE_CPFCR_PFEN_Msk                   (0x1UL << CACHE_CPFCR_PFEN_Pos)               /*!< CACHE CPFCR: PFEN Mask */
+
+#define CACHE_CPFCR_PFRST_Pos                  1U                                            /*!< CACHE CPFCR: PFRST Position */
+#define CACHE_CPFCR_PFRST_Msk                  (0x1UL << CACHE_CPFCR_PFRST_Pos)              /*!< CACHE CPFCR: PFRST Mask */
+
+#define CACHE_CRCR_4K                          0xB                                           /* 01011 */
+#define CACHE_CRCR_8K                          0xC                                           /* 01100 */
+#define CACHE_CRCR_16K                         0xD                                           /* 01101 */
+#define CACHE_CRCR_32K                         0xE                                           /* 01110 */
+#define CACHE_CRCR_64K                         0xF                                           /* 01111 */
+#define CACHE_CRCR_128K                        0x10                                          /* 10000 */
+#define CACHE_CRCR_256K                        0x11                                          /* 10001 */
+#define CACHE_CRCR_512K                        0x12                                          /* 10010 */
+#define CACHE_CRCR_1M                          0x13                                          /* 10011 */
+#define CACHE_CRCR_2M                          0x14                                          /* 10100 */
+#define CACHE_CRCR_4M                          0x15                                          /* 10101 */
+#define CACHE_CRCR_8M                          0x16                                          /* 10110 */
+#define CACHE_CRCR_16M                         0x17                                          /* 10111 */
+#define CACHE_CRCR_32M                         0x18                                          /* 11000 */
+#define CACHE_CRCR_64M                         0x19                                          /* 11001 */
+#define CACHE_CRCR_128M                        0x1A                                          /* 11010 */
+#define CACHE_CRCR_256M                        0x1B                                          /* 11011 */
+#define CACHE_CRCR_512M                        0x1C                                          /* 11100 */
+#define CACHE_CRCR_1G                          0x1D                                          /* 11101 */
+#define CACHE_CRCR_2G                          0x1E                                          /* 11110 */
+#define CACHE_CRCR_4G                          0x1F                                          /* 11111 */
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos           2U                                             /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos             1U                                             /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos              0U                                             /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+    /* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos              0U                                             /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+    /* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos              0U                                             /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+    /* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos              31U                                            /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos               30U                                            /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos              0U                                             /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    __IM uint32_t EHSR;                    /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK805 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define VIC_BASE            (TCIP_BASE +  0x0100UL)                   /*!< VIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define VIC                 ((VIC_Type    *)     VIC_BASE   )         /*!< VIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IR_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ISSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+    VIC->ICSR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->ISPR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ISPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->ICPR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Check Interrupt is Wakeup or not
+  \details Read the wake up register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt is not set as wake up interrupt.
+  \return             1  Interrupt is set as wake up interrupt.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IWER[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWER[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    VIC->IWDR[_IR_IDX(IRQn)] = (uint32_t)(1UL << ((uint32_t)(int32_t)IRQn % 32));
+}
+
+/**
+  \brief   Get Interrupt is Active or not
+  \details Read the active register in the VIC and returns the active bit for the device specific interrupt.
+  \param [in]      IRQn  Device specific interrupt number.
+  \return             0  Interrupt status is not active.
+  \return             1  Interrupt status is active.
+  \note    IRQn must not be negative.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_active(int32_t IRQn)
+{
+    IRQn &= 0x7FUL;
+
+    return ((uint32_t)(((VIC->IABR[_IR_IDX(IRQn)] & (1UL << (((uint32_t)(int32_t)IRQn % 32) & 0x7FUL))) != 0UL) ? 1UL : 0UL));
+}
+
+/**
+  \brief   Set Threshold register
+  \details set the threshold register in the VIC.
+  \param [in]      VectThreshold  specific vector threshold.
+  \param [in]      PrioThreshold  specific priority threshold.
+ */
+__STATIC_INLINE void csi_vic_set_threshold(uint32_t VectThreshold, uint32_t PrioThreshold)
+{
+    VectThreshold &= 0x7FUL;
+
+    if (VectThreshold <= 31) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x3) << 6);
+    }
+
+    if (VectThreshold > 31 && VectThreshold < 96) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0x7) << 5);
+    }
+
+    if (VectThreshold > 95) {
+        VIC->IPTR = 0x80000000 | (((VectThreshold + 32) & 0xFF) << 8) | ((PrioThreshold & 0xF) << 4);
+    }
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    VIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(VIC->IPR[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
+                                 (((priority << (8U - __VIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return ((uint32_t)(((VIC->IPR[_IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn)) & (uint32_t)0xFFUL) >> (8U - __VIC_PRIO_BITS)));
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 128) {
+        uint32_t *vectors = (uint32_t *)__get_VBR();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD = (uint32_t)(ticks - 1UL);                              /* set reload register */
+    CORET->VAL  = 0UL;                                                  /* Load the CORET Counter Value */
+    CORET->CTRL = CORET_CTRL_CLKSOURCE_Msk |
+                   CORET_CTRL_TICKINT_Msk |
+                   CORET_CTRL_ENABLE_Msk;                               /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                       /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/**
+  \brief   clean CORE timer interrupt flag
+ */
+__STATIC_INLINE void csi_coret_clear_irq(void)
+{
+    CORET->CTRL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_I_INV_Msk;         /* invalidate all Cache */
+    CACHE->CER |=  (uint32_t)(CACHE_CER_ICEN_Msk);  /* enable icache Cache */
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)(CACHE_CER_ICEN_Msk);  /* disable icache */
+    CACHE->CIR = CACHE_CIR_I_INV_Msk;          /* invalidate icache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_I_INV_Msk;         /* invalidate icache */
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_D_INV_Msk;         /* invalidate dcache */
+    CACHE->CER |= (uint32_t)(CACHE_CER_DCEN_Msk | CACHE_CER_WB_Msk | CACHE_CER_DCW_Msk);  /* enable dcache */
+#endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CER &=  ~(uint32_t)CACHE_CER_DCEN_Msk;  /* disable dcache */
+    CACHE->CIR = CACHE_CIR_D_INV_Msk;          /* invalidate dcache */
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_D_INV_Msk;         /* invalidate dcache */
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_D_CLR_Msk;         /* clean dcache */
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    CACHE->CIR = CACHE_CIR_D_CLR_Msk | CACHE_CIR_D_INV_Msk;         /* clean and inv all Cache */
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_OP_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= (CACHE_CIR_D_INV_Msk | CACHE_CIR_LINE_MODE_Msk);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_OP_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= (CACHE_CIR_D_CLR_Msk | CACHE_CIR_LINE_MODE_Msk);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_OP_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= (CACHE_CIR_D_CLR_Msk | CACHE_CIR_D_INV_Msk | CACHE_CIR_LINE_MODE_Msk);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    CACHE->CRCR[index] =  ((baseAddr & CACHE_CRCR_BASE_ADDR_Msk) |
+                           (_VAL2FLD(CACHE_CRCR_SIZE, size)) |
+                           (_VAL2FLD(CACHE_CRCR_EN, enable)));
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    CACHE->CPFCR &=  ~(uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFRST_Msk;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return CACHE->CPFATR;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return CACHE->CPFMTR;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;               /* instruction fetched excution */
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;                /* security */
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu.
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0xFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_4KB) {
+        pacr.w &= ~(((1u << (size -11)) - 1) << 12);
+    }
+
+    pacr.b.size = size;
+
+    capr.w &= ~((0x1 << idx) | (0x3 << (idx * 2 + 8)) | (0x1 << (idx + 24)));
+    capr.w = (capr.w | (attr.nx << idx) | (attr.ap << (idx * 2 + 8)) | (attr.s << (idx + 24)));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 7).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 7) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | (idx << PRSR_RID_Pos));
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+/**
+  \brief   System Reset
+  \details Initiates a system reset request to reset the MCU.
+ */
+__STATIC_INLINE void csi_system_reset(void)
+{
+    CHR_Type chr;
+
+    chr.w = __get_CHR();
+#ifdef __RESET_CONST
+    chr.b.SRST_VAL = __RESET_CONST;
+#else
+    chr.b.SRST_VAL = 0xABCD;
+#endif
+
+    __DSB();                                                          /* Ensure all outstanding memory accesses included
+                                                                         buffered write are completed before reset */
+    __set_CHR(chr.w);
+
+    __DSB();                                                          /* Ensure completion of memory access */
+
+    for(;;)                                                           /* wait until reset */
+    {
+        __NOP();
+    }
+}
+
+/* ##################################    Old Interfaces  ############################################ */
+
+/* These interfaces are deprecated */
+#define NVIC_EnableIRQ(IRQn)                               csi_vic_enable_irq(IRQn)
+#define NVIC_DisableIRQ(IRQn)                              csi_vic_disable_irq(IRQn)
+#define NVIC_GetPendingIRQ(IRQn)                           csi_vic_get_pending_irq(IRQn)
+#define NVIC_SetPendingIRQ(IRQn)                           csi_vic_set_pending_irq(IRQn)
+#define NVIC_ClearPendingIRQ(IRQn)                         csi_vic_clear_pending_irq(IRQn)
+#define NVIC_GetWakeupIRQ(IRQn)                            csi_vic_get_wakeup_irq(IRQn)
+#define NVIC_SetWakeupIRQ(IRQn)                            csi_vic_set_wakeup_irq(IRQn)
+#define NVIC_ClearWakeupIRQ(IRQn)                          csi_vic_clear_wakeup_irq(IRQn)
+#define NVIC_GetActive(IRQn)                               csi_vic_get_active(IRQn)
+#define NVIC_SetThreshold(VectThreshold, PrioThreshold)    csi_vic_set_threshold(VectThreshold, PrioThreshold)
+#define NVIC_SetPriority(IRQn, priority)                   csi_vic_set_prio(IRQn, priority)
+#define NVIC_GetPriority(IRQn)                             csi_vic_get_prio(IRQn)
+#define NVIC_SystemReset()                                 csi_system_reset()
+
+#define SysTick_Config(ticks)                              csi_coret_config(ticks, CORET_IRQn)
+#define CORET_Config(ticks)                                csi_coret_config(ticks, CORET_IRQn)
+
+#define SCB_EnableICache()                                 csi_icache_enable()
+#define SCB_DisableICache()                                csi_icache_disable()
+#define SCB_InvalidateICache()                             csi_icache_invalid()
+#define SCB_EnableDCache()                                 csi_dcache_enable()
+#define SCB_DisableDCache()                                csi_dcache_disable()
+#define SCB_InvalidateDCache()                             csi_dcache_invalid()
+#define SCB_CleanDCache()                                  csi_dcache_clean()
+#define SCB_CleanInvalidateDCache()                        csi_dcache_clean_invalid()
+#define SCB_InvalidateDCache_by_Addr(addr, dsize)          csi_dcache_invalid_range(addr, dsize)
+#define SCB_CleanDCache_by_Addr(addr, dsize)               csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateDCache_by_Addr(addr, dsize)     csi_dcache_clean_invalid_range(addr, dsize)
+#define SCB_Cacheable_Range(index, baseAddr, size, enable) csi_cache_set_range(index, baseAddr, size, enable)
+#define SCB_EnableCacheProfile()                           csi_cache_enable_profile()
+#define SCB_DisableCacheProfile()                          csi_cache_disable_profile()
+#define SCB_ResetCacheProfile()                            csi_cache_reset_profile()
+#define SCB_CacheAccessTime()                              csi_cache_get_access_time()
+#define SCB_CacheMissTime()                                csi_cache_get_miss_time()
+#define SCB_EnableCache()                                  csi_icache_enable();csi_dcache_enable()
+#define SCB_DisableCache()                                 csi_icache_disable();csi_dcache_disable()
+#define SCB_InvalidateCache()                              csi_icache_invalid();csi_dcache_invalid()
+#define SCB_CleanCache()                                   csi_dcache_clean()
+#define SCB_CleanInvalidateCache()                         csi_icache_invalid();csi_dcache_clean();csi_dcache_invalid()
+#define SCB_InvalidateCache_by_Addr(addr, dsize)           csi_dcache_invalid_range(addr, dsize);csi_icache_invalid()
+#define SCB_CleanCache_by_Addr(addr, dsize)                csi_dcache_clean_range(addr, dsize)
+#define SCB_CleanInvalidateCache_by_Addr(addr, dsize)      csi_dcache_clean_invalid_range(addr, dsize)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_805_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1963 - 0
lib/sec_library/include/core/core_807.h

@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck807.h
+ * @brief    CSI CK807 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     26. Jan 2018
+ ******************************************************************************/
+
+#ifndef __CORE_CK807_H_GENERIC
+#define __CORE_CK807_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup CK807
+  @{
+ */
+
+/*  CSI CK807 definitions */
+#define __CK807_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK807_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK807_CSI_VERSION       ((__CK807_CSI_VERSION_MAIN << 16U) | \
+                                   __CK807_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK807
+#define __CK807                   (0x07U)                                      /*!< CK807 Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+*/
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK807_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK807_H_DEPENDANT
+#define __CORE_CK807_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK807_REV
+#define __CK807_REV               0x0000U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK807 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK807 processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t AF: 1;                      /*!< bit:      1  Alternate register valid control bit */
+        uint32_t _reserved0: 2;              /*!< bit:  2.. 3  Reserved */
+        uint32_t FE: 1;                      /*!< bit:      4  Fast interrupt enable control bit */
+        uint32_t _reserved1: 1;              /*!< bit:      5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved2: 2;              /*!< bit: 10..11  Reserved */
+        uint32_t TE: 1;                      /*!< bit:     12  Trace transmission control bit */
+        uint32_t TP: 1;                      /*!< bit:     13  Pending trace exception set bit */
+        uint32_t TM: 2;                      /*!< bit: 14..15  Tracing mode bit */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved3: 7;              /*!< bit: 24..30  Reserved */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (0x1UL << PSR_S_Pos)                           /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0xFFUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_TM_Pos                         14U                                            /*!< PSR: TM Position */
+#define PSR_TM_Msk                         (0x3UL << PSR_TM_Pos)                          /*!< PSR: TM Mask */
+
+#define PSR_TP_Pos                         13U                                            /*!< PSR: TP Position */
+#define PSR_TP_Msk                         (0x1UL << PSR_TM_Pos)                          /*!< PSR: TP Mask */
+
+#define PSR_TE_Pos                         12U                                            /*!< PSR: TE Position */
+#define PSR_TE_Msk                         (0x1UL << PSR_TE_Pos)                          /*!< PSR: TE Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (0x1UL << PSR_MM_Pos)                          /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (0x1UL << PSR_EE_Pos)                          /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (0x1UL << PSR_IC_Pos)                          /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (0x1UL << PSR_IE_Pos)                          /*!< PSR: IE Mask */
+
+#define PSR_FE_Pos                         4U                                             /*!< PSR: FE Position */
+#define PSR_FE_Msk                         (0x1UL << PSR_FE_Pos)                          /*!< PSR: FE Mask */
+
+#define PSR_AF_Pos                         1U                                             /*!< PSR: AF Position */
+#define PSR_AF_Msk                         (0x1UL << PSR_AF_Pos)                          /*!< PSR: AF Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (0x1UL << PSR_C_Pos)                           /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t IE: 1;                      /*!< bit:      2  Instruction cache enable */
+        uint32_t DE: 1;                      /*!< bit:      3  Data cache enable */
+        uint32_t WB: 1;                      /*!< bit:      4  Cache write back */
+        uint32_t RS: 1;                      /*!< bit:      5  Address return stack settings */
+        uint32_t Z: 1;                       /*!< bit:      6  Allow predictive jump bit */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved0: 1;              /*!< bit:     11  Reserved */
+        uint32_t WA: 1;                      /*!< bit:     12  Write allocate enable */
+        uint32_t E_V2: 1;                    /*!< bit:     13  V2 Endian mode */
+        uint32_t BSTE: 1;                    /*!< bit:     14  Burst transmit enable */
+        uint32_t IPE: 1;                     /*!< bit:     15  Indirect predict enable */
+        uint32_t _reserved1: 16;             /*!< bit: 16..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_IPE_Pos                       15u                                            /*!< CCR: IPE Position */
+#define CCR_IPE_Msk                       (0x1UL << CCR_IPE_Pos)                         /*!< CCR: IPE Mask */
+
+#define CCR_BSTE_Pos                      14u                                            /*!< CCR: BSTE Position */
+#define CCR_BSTE_Msk                      (0x1UL << CCR_BSTE_Pos)                        /*!< CCR: BSTE Mask */
+
+#define CCR_E_V2_Pos                      13U                                            /*!< CCR: E_V2 Position */
+#define CCR_E_V2_Msk                      (0x1UL << CCR_E_V2_Pos)                        /*!< CCR: E_V2 Mask */
+
+#define CCR_WA_Pos                        12u                                            /*!< CCR: WA Position */
+#define CCR_WA_Msk                        (0x1UL << CCR_WA_Pos)                          /*!< CCR: WA Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_Z_Pos                         6U                                             /*!< CCR: Z Position */
+#define CCR_Z_Msk                         (0x1UL << CCR_BE_Pos)                          /*!< CCR: Z Mask */
+
+#define CCR_RS_Pos                        5U                                             /*!< CCR: RS Position */
+#define CCR_RS_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: RS Mask */
+
+#define CCR_WB_Pos                        4U                                             /*!< CCR: WB Position */
+#define CCR_WB_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: WB Mask */
+
+#define CCR_DE_Pos                        3U                                             /*!< CCR: DE Position */
+#define CCR_DE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: DE Mask */
+
+#define CCR_IE_Pos                        2U                                             /*!< CCR: IE Position */
+#define CCR_IE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: IE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t NX0: 1;                     /*!< bit:      0  Non executable attribute setting */
+        uint32_t NX1: 1;                     /*!< bit:      1  Non executable attribute setting */
+        uint32_t NX2: 1;                     /*!< bit:      2  Non executable attribute setting */
+        uint32_t NX3: 1;                     /*!< bit:      3  Non executable attribute setting */
+        uint32_t NX4: 1;                     /*!< bit:      4  Non executable attribute setting */
+        uint32_t NX5: 1;                     /*!< bit:      5  Non executable attribute setting */
+        uint32_t NX6: 1;                     /*!< bit:      6  Non executable attribute setting */
+        uint32_t NX7: 1;                     /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP4: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP5: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP6: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP7: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S0: 1;                      /*!< bit:     24  Security property settings */
+        uint32_t S1: 1;                      /*!< bit:     25  Security property settings */
+        uint32_t S2: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S3: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S4: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S5: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S6: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S7: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_S7_Pos                        31U                                            /*!< CAPR: S7 Position */
+#define CAPR_S7_Msk                        (1UL << CAPR_S7_Pos)                           /*!< CAPR: S7 Mask */
+
+#define CAPR_S6_Pos                        30U                                            /*!< CAPR: S6 Position */
+#define CAPR_S6_Msk                        (1UL << CAPR_S6_Pos)                           /*!< CAPR: S6 Mask */
+
+#define CAPR_S5_Pos                        29U                                            /*!< CAPR: S5 Position */
+#define CAPR_S5_Msk                        (1UL << CAPR_S5_Pos)                           /*!< CAPR: S5 Mask */
+
+#define CAPR_S4_Pos                        28U                                            /*!< CAPR: S4 Position */
+#define CAPR_S4_Msk                        (1UL << CAPR_S4_Pos)                           /*!< CAPR: S4 Mask */
+
+#define CAPR_S3_Pos                        27U                                            /*!< CAPR: S3 Position */
+#define CAPR_S3_Msk                        (1UL << CAPR_S3_Pos)                           /*!< CAPR: S3 Mask */
+
+#define CAPR_S2_Pos                        26U                                            /*!< CAPR: S2 Position */
+#define CAPR_S2_Msk                        (1UL << CAPR_S2_Pos)                           /*!< CAPR: S2 Mask */
+
+#define CAPR_S1_Pos                        25U                                            /*!< CAPR: S1 Position */
+#define CAPR_S1_Msk                        (1UL << CAPR_S1_Pos)                           /*!< CAPR: S1 Mask */
+
+#define CAPR_S0_Pos                        24U                                            /*!< CAPR: S0 Position */
+#define CAPR_S0_Msk                        (1UL << CAPR_S0_Pos)                           /*!< CAPR: S0 Mask */
+
+#define CAPR_AP7_Pos                       22U                                            /*!< CAPR: AP7 Position */
+#define CAPR_AP7_Msk                       (0x3UL << CAPR_AP7_Pos)                        /*!< CAPR: AP7 Mask */
+
+#define CAPR_AP6_Pos                       20U                                            /*!< CAPR: AP6 Position */
+#define CAPR_AP6_Msk                       (0x3UL << CAPR_AP6_Pos)                        /*!< CAPR: AP6 Mask */
+
+#define CAPR_AP5_Pos                       18U                                            /*!< CAPR: AP5 Position */
+#define CAPR_AP5_Msk                       (0x3UL << CAPR_AP5_Pos)                        /*!< CAPR: AP5 Mask */
+
+#define CAPR_AP4_Pos                       16U                                            /*!< CAPR: AP4 Position */
+#define CAPR_AP4_Msk                       (0x3UL << CAPR_AP4_Pos)                        /*!< CAPR: AP4 Mask */
+
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_NX7_Pos                       7U                                             /*!< CAPR: NX7 Position */
+#define CAPR_NX7_Msk                       (0x1UL << CAPR_NX7_Pos)                        /*!< CAPR: NX7 Mask */
+
+#define CAPR_NX6_Pos                       6U                                             /*!< CAPR: NX6 Position */
+#define CAPR_NX6_Msk                       (0x1UL << CAPR_NX6_Pos)                        /*!< CAPR: NX6 Mask */
+
+#define CAPR_NX5_Pos                       5U                                             /*!< CAPR: NX5 Position */
+#define CAPR_NX5_Msk                       (0x1UL << CAPR_NX5_Pos)                        /*!< CAPR: NX5 Mask */
+
+#define CAPR_NX4_Pos                       4U                                             /*!< CAPR: NX4 Position */
+#define CAPR_NX4_Msk                       (0x1UL << CAPR_NX4_Pos)                        /*!< CAPR: NX4 Mask */
+
+#define CAPR_NX3_Pos                       3U                                             /*!< CAPR: NX3 Position */
+#define CAPR_NX3_Msk                       (0x1UL << CAPR_NX3_Pos)                        /*!< CAPR: NX3 Mask */
+
+#define CAPR_NX2_Pos                       2U                                             /*!< CAPR: NX2 Position */
+#define CAPR_NX2_Msk                       (0x1UL << CAPR_NX2_Pos)                        /*!< CAPR: NX2 Mask */
+
+#define CAPR_NX1_Pos                       1U                                             /*!< CAPR: NX1 Position */
+#define CAPR_NX1_Msk                       (0x1UL << CAPR_NX1_Pos)                        /*!< CAPR: NX1 Mask */
+
+#define CAPR_NX0_Pos                       0U                                             /*!< CAPR: NX0 Position */
+#define CAPR_NX0_Msk                       (0x1UL << CAPR_NX0_Pos)                        /*!< CAPR: NX0 Mask */
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR1, CR<16,0>)
+ */
+typedef union {
+    struct {
+        uint32_t NX8: 1;                      /*!< bit:      0  Non executable attribute setting */
+        uint32_t NX9: 1;                      /*!< bit:      1  Non executable attribute setting */
+        uint32_t NX10: 1;                     /*!< bit:      2  Non executable attribute setting */
+        uint32_t NX11: 1;                     /*!< bit:      3  Non executable attribute setting */
+        uint32_t NX12: 1;                     /*!< bit:      4  Non executable attribute setting */
+        uint32_t NX13: 1;                     /*!< bit:      5  Non executable attribute setting */
+        uint32_t NX14: 1;                     /*!< bit:      6  Non executable attribute setting */
+        uint32_t NX15: 1;                     /*!< bit:      7  Non executable attribute setting */
+        uint32_t AP8: 2;                      /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP9: 2;                      /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP10: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP11: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t AP12: 2;                     /*!< bit: 16..17  access permissions settings bit */
+        uint32_t AP13: 2;                     /*!< bit: 18..19  access permissions settings bit */
+        uint32_t AP14: 2;                     /*!< bit: 20..21  access permissions settings bit */
+        uint32_t AP15: 2;                     /*!< bit: 22..23  access permissions settings bit */
+        uint32_t S8: 1;                       /*!< bit:     24  Security property settings */
+        uint32_t S9: 1;                       /*!< bit:     25  Security property settings */
+        uint32_t S10: 1;                      /*!< bit:     26  Security property settings */
+        uint32_t S11: 1;                      /*!< bit:     27  Security property settings */
+        uint32_t S12: 1;                      /*!< bit:     28  Security property settings */
+        uint32_t S13: 1;                      /*!< bit:     29  Security property settings */
+        uint32_t S14: 1;                      /*!< bit:     30  Security property settings */
+        uint32_t S15: 1;                      /*!< bit:     31  Security property settings */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR1_Type;
+
+/* CAPR1 Register Definitions */
+#define CAPR1_S15_Pos                        31U                                            /*!< CAPR1: S15 Position */
+#define CAPR1_S15_Msk                        (1UL << CAPR1_S15_Pos)                         /*!< CAPR1: S15 Mask */
+
+#define CAPR1_S14_Pos                        30U                                            /*!< CAPR1: S14 Position */
+#define CAPR1_S14_Msk                        (1UL << CAPR1_S14_Pos)                         /*!< CAPR1: S14 Mask */
+
+#define CAPR1_S13_Pos                        29U                                            /*!< CAPR1: S13 Position */
+#define CAPR1_S13_Msk                        (1UL << CAPR1_S13_Pos)                         /*!< CAPR1: S13 Mask */
+
+#define CAPR1_S12_Pos                        28U                                            /*!< CAPR1: S12 Position */
+#define CAPR1_S12_Msk                        (1UL << CAPR1_S12_Pos)                         /*!< CAPR1: S12 Mask */
+
+#define CAPR1_S11_Pos                        27U                                            /*!< CAPR1: S11 Position */
+#define CAPR1_S11_Msk                        (1UL << CAPR1_S11_Pos)                         /*!< CAPR1: S11 Mask */
+
+#define CAPR1_S10_Pos                        26U                                            /*!< CAPR1: S10 Position */
+#define CAPR1_S10_Msk                        (1UL << CAPR1_S10_Pos)                         /*!< CAPR1: S10 Mask */
+
+#define CAPR1_S9_Pos                         25U                                            /*!< CAPR1: S9 Position */
+#define CAPR1_S9_Msk                         (1UL << CAPR1_S9_Pos)                          /*!< CAPR1: S9 Mask */
+
+#define CAPR1_S8_Pos                         24U                                            /*!< CAPR1: S8 Position */
+#define CAPR1_S8_Msk                         (1UL << CAPR1_S8_Pos)                          /*!< CAPR1: S8 Mask */
+
+#define CAPR1_AP15_Pos                       22U                                            /*!< CAPR1: AP15 Position */
+#define CAPR1_AP15_Msk                       (0x3UL << CAPR1_AP15_Pos)                      /*!< CAPR1: AP15 Mask */
+
+#define CAPR1_AP14_Pos                       20U                                            /*!< CAPR1: AP14 Position */
+#define CAPR1_AP14_Msk                       (0x3UL << CAPR1_AP14_Pos)                      /*!< CAPR1: AP14 Mask */
+
+#define CAPR1_AP13_Pos                       18U                                            /*!< CAPR1: AP13 Position */
+#define CAPR1_AP13_Msk                       (0x3UL << CAPR1_AP13_Pos)                      /*!< CAPR1: AP13 Mask */
+
+#define CAPR1_AP12_Pos                       16U                                            /*!< CAPR1: AP12 Position */
+#define CAPR1_AP12_Msk                       (0x3UL << CAPR1_AP12_Pos)                      /*!< CAPR1: AP12 Mask */
+
+#define CAPR1_AP11_Pos                       14U                                            /*!< CAPR1: AP11 Position */
+#define CAPR1_AP11_Msk                       (0x3UL << CAPR1_AP11_Pos)                      /*!< CAPR1: AP11 Mask */
+
+#define CAPR1_AP10_Pos                       12U                                            /*!< CAPR1: AP10 Position */
+#define CAPR1_AP10_Msk                       (0x3UL << CAPR1_AP10_Pos)                      /*!< CAPR1: AP10 Mask */
+
+#define CAPR1_AP9_Pos                        10U                                            /*!< CAPR1: AP9 Position */
+#define CAPR1_AP9_Msk                        (0x3UL << CAPR1_AP9_Pos)                       /*!< CAPR1: AP9 Mask */
+
+#define CAPR1_AP8_Pos                        8U                                             /*!< CAPR1: AP8 Position */
+#define CAPR1_AP8_Msk                        (0x3UL << CAPR1_AP8_Pos)                       /*!< CAPR1: AP8 Mask */
+
+#define CAPR1_NX15_Pos                       7U                                             /*!< CAPR1: NX15 Position */
+#define CAPR1_NX15_Msk                       (0x1UL << CAPR1_NX15_Pos)                      /*!< CAPR1: NX15 Mask */
+
+#define CAPR1_NX14_Pos                       6U                                             /*!< CAPR1: NX14 Position */
+#define CAPR1_NX14_Msk                       (0x1UL << CAPR1_NX14_Pos)                      /*!< CAPR1: NX14 Mask */
+
+#define CAPR1_NX13_Pos                       5U                                             /*!< CAPR1: NX13 Position */
+#define CAPR1_NX13_Msk                       (0x1UL << CAPR1_NX13_Pos)                      /*!< CAPR1: NX13 Mask */
+
+#define CAPR1_NX12_Pos                       4U                                             /*!< CAPR1: NX12 Position */
+#define CAPR1_NX12_Msk                       (0x1UL << CAPR1_NX12_Pos)                      /*!< CAPR1: NX12 Mask */
+
+#define CAPR1_NX11_Pos                       3U                                             /*!< CAPR1: NX11 Position */
+#define CAPR1_NX11_Msk                       (0x1UL << CAPR1_NX11_Pos)                      /*!< CAPR1: NX11 Mask */
+
+#define CAPR1_NX10_Pos                       2U                                             /*!< CAPR1: NX10 Position */
+#define CAPR1_NX10_Msk                       (0x1UL << CAPR1_NX10_Pos)                      /*!< CAPR1: NX10 Mask */
+
+#define CAPR1_NX9_Pos                        1U                                             /*!< CAPR1: NX9 Position */
+#define CAPR1_NX9_Msk                        (0x1UL << CAPR1_NX9_Pos)                       /*!< CAPR1: NX9 Mask */
+
+#define CAPR1_NX8_Pos                        0U                                             /*!< CAPR1: NX8 Position */
+#define CAPR1_NX8_Msk                        (0x1UL << CAPR1_NX8_Pos)                       /*!< CAPR1: NX8 Mask */
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t base_addr: 26;              /*!< bit:   6..31 The high position of the address of a protected area */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PACR_Type;
+
+    /* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 6U                                            /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0x3FFFFFFUL << PACR_BASE_ADDR_Pos)              /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                             /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                      /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                             /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                          /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 4;                     /*!< bit:  0.. 3  Protected area index value */
+        uint32_t _reserved0: 28;             /*!< bit:  2..28  Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x3UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(ATTR0,CR<26,0>).
+ */
+typedef union {
+    struct {
+        uint32_t Attr0_reserved: 1;                      /*!< bit:      0  Reserved */
+        uint32_t Attr0_B: 1;                             /*!< bit:      1  Bufferable attribute setting */
+        uint32_t Attr0_SO: 1;                            /*!< bit:      2  Strong-Order attribute setting */
+        uint32_t Attr0_C: 1;                             /*!< bit:      3  Cacheable attribute setting */
+        uint32_t Attr1_reserved: 1;                      /*!< bit:      4  Reserved */
+        uint32_t Attr1_B: 1;                             /*!< bit:      5  Bufferable attribute setting */
+        uint32_t Attr1_SO: 1;                            /*!< bit:      6  Strong-Order attribute setting */
+        uint32_t Attr1_C: 1;                             /*!< bit:      7  Cacheable attribute setting */
+        uint32_t Attr2_reserved: 1;                      /*!< bit:      8  Reserved */
+        uint32_t Attr2_B: 1;                             /*!< bit:      9  Bufferable attribute setting */
+        uint32_t Attr2_SO: 1;                            /*!< bit:      10  Strong-Order attribute setting */
+        uint32_t Attr2_C: 1;                             /*!< bit:      11  Bufferable attribute setting */
+        uint32_t Attr3_reserved: 1;                      /*!< bit:      12  Reserved */
+        uint32_t Attr3_B: 1;                             /*!< bit:      13  Bufferable attribute setting */
+        uint32_t Attr3_SO: 1;                            /*!< bit:      14  Strong-Order attribute setting */
+        uint32_t Attr3_C: 1;                             /*!< bit:      15  Bufferable attribute setting */
+        uint32_t Attr4_reserved: 1;                      /*!< bit:      16  Reserved */
+        uint32_t Attr4_B: 1;                             /*!< bit:      17  Bufferable attribute setting */
+        uint32_t Attr4_SO: 1;                            /*!< bit:      18  Strong-Order attribute setting */
+        uint32_t Attr4_C: 1;                             /*!< bit:      19  Bufferable attribute setting */
+        uint32_t Attr5_reserved: 1;                      /*!< bit:      20  Reserved */
+        uint32_t Attr5_B: 1;                             /*!< bit:      21  Bufferable attribute setting */
+        uint32_t Attr5_SO: 1;                            /*!< bit:      22  Strong-Order attribute setting */
+        uint32_t Attr5_C: 1;                             /*!< bit:      23  Bufferable attribute setting */
+        uint32_t Attr6_reserved: 1;                      /*!< bit:      24  Reserved */
+        uint32_t Attr6_B: 1;                             /*!< bit:      25  Bufferable attribute setting */
+        uint32_t Attr6_SO: 1;                            /*!< bit:      26  Strong-Order attribute setting */
+        uint32_t Attr6_C: 1;                             /*!< bit:      27  Bufferable attribute setting */
+        uint32_t Attr7_reserved: 1;                      /*!< bit:      28  Reserved */
+        uint32_t Attr7_B: 1;                             /*!< bit:      29  Bufferable attribute setting */
+        uint32_t Attr7_SO: 1;                            /*!< bit:      30  Strong-Order attribute setting */
+        uint32_t Attr7_C: 1;                             /*!< bit:      31  Bufferable attribute setting */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} ATTR0_Type;
+
+/* ATTR0 Register Definitions */
+#define ATTR0_RESERVED_Pos                       0U                                                  /*!< ATTR0: RESERVED Position */
+#define ATTR0_RESERVED_Msk                       (0x1UL << ATTR0_RESERVED_Pos)                       /*!< ATTR0: RESERVED Mask */
+
+#define ATTR0_B_Pos                              1U                                                  /*!< ATTR0: BUFFERABLE Position */
+#define ATTR0_B_Msk                              (0x1UL << ATTR0_B_Pos)                              /*!< ATTR0: BUFFERABLE Mask */
+
+#define ATTR0_SO_Pos                             2U                                                  /*!< ATTR0: STRONG-ORDER Position */
+#define ATTR0_SO_Msk                             (0x1UL << ATTR0_SO_Pos)                             /*!< ATTR0: STRONG-ORDER Mask */
+
+#define ATTR0_C_Pos                              3U                                                  /*!< ATTR0: CACHEABLE Position */
+#define ATTR0_C_Msk                              (0x1UL << ATTR0_C_Pos)                              /*!< ATTR0: CACHEABLE Mask */
+
+#define ATTR1_RESERVED_Pos                       4U                                                  /*!< ATTR1: RESERVED Position */
+#define ATTR1_RESERVED_Msk                       (0x1UL << ATTR1_RESERVED_Pos)                       /*!< ATTR1: RESERVED Mask */
+
+#define ATTR1_B_Pos                              5U                                                  /*!< ATTR1: BUFFERABLE Position */
+#define ATTR1_B_Msk                              (0x1UL << ATTR1_B_Pos)                              /*!< ATTR1: BUFFERABLE Mask */
+
+#define ATTR1_SO_Pos                             6U                                                  /*!< ATTR1: STRONG-ORDER Position */
+#define ATTR1_SO_Msk                             (0x1UL << ATTR1_SO_Pos)                             /*!< ATTR1: STRONG-ORDER Mask */
+
+#define ATTR1_C_Pos                              7U                                                  /*!< ATTR1: CACHEABLE Position */
+#define ATTR1_C_Msk                              (0x1UL << ATTR1_C_Pos)                              /*!< ATTR1: CACHEABLE Mask */
+
+#define ATTR2_RESERVED_Pos                       8U                                                  /*!< ATTR2: RESERVED Position */
+#define ATTR2_RESERVED_Msk                       (0x1UL << ATTR2_RESERVED_Pos)                       /*!< ATTR2: RESERVED Mask */
+
+#define ATTR2_B_Pos                              9U                                                  /*!< ATTR2: BUFFERABLE Position */
+#define ATTR2_B_Msk                              (0x1UL << ATTR2_B_Pos)                              /*!< ATTR2: BUFFERABLE Mask */
+
+#define ATTR2_SO_Pos                             10U                                                 /*!< ATTR2: STRONG-ORDER Position */
+#define ATTR2_SO_Msk                             (0x1UL << ATTR2_SO_Pos)                             /*!< ATTR2: STRONG-ORDER Mask */
+
+#define ATTR2_C_Pos                              11U                                                 /*!< ATTR2: CACHEABLE Position */
+#define ATTR2_C_Msk                              (0x1UL << ATTR2_C_Pos)                              /*!< ATTR2: CACHEABLE Mask */
+
+#define ATTR3_RESERVED_Pos                       12U                                                 /*!< ATTR3: RESERVED Position */
+#define ATTR3_RESERVED_Msk                       (0x1UL << ATTR3_RESERVED_Pos)                       /*!< ATTR3: RESERVED Mask */
+
+#define ATTR3_B_Pos                              13U                                                 /*!< ATTR3: BUFFERABLE Position */
+#define ATTR3_B_Msk                              (0x1UL << ATTR3_B_Pos)                              /*!< ATTR3: BUFFERABLE Mask */
+
+#define ATTR3_SO_Pos                             14U                                                 /*!< ATTR3: STRONG-ORDER Position */
+#define ATTR3_SO_Msk                             (0x1UL << ATTR3_SO_Pos)                             /*!< ATTR3: STRONG-ORDER Mask */
+
+#define ATTR3_C_Pos                              15U                                                 /*!< ATTR3: CACHEABLE Position */
+#define ATTR3_C_Msk                              (0x1UL << ATTR3_C_Pos)                              /*!< ATTR3: CACHEABLE Mask */
+
+#define ATTR4_RESERVED_Pos                       16U                                                 /*!< ATTR4: RESERVED Position */
+#define ATTR4_RESERVED_Msk                       (0x1UL << ATTR4_RESERVED_Pos)                       /*!< ATTR4: RESERVED Mask */
+
+#define ATTR4_B_Pos                              17U                                                 /*!< ATTR4: BUFFERABLE Position */
+#define ATTR4_B_Msk                              (0x1UL << ATTR4_B_Pos)                              /*!< ATTR4: BUFFERABLE Mask */
+
+#define ATTR4_SO_Pos                             18U                                                 /*!< ATTR4: STRONG-ORDER Position */
+#define ATTR4_SO_Msk                             (0x1UL << ATTR4_SO_Pos                              /*!< ATTR4: STRONG-ORDER Mask */
+
+#define ATTR4_C_Pos                              19U                                                 /*!< ATTR4: CACHEABLE Position */
+#define ATTR4_C_Msk                              (0x1UL << ATTR4_C_Pos)                              /*!< ATTR4: CACHEABLE Mask */
+
+#define ATTR5_RESERVED_Pos                       20U                                                 /*!< ATTR5: RESERVED Position */
+#define ATTR5_RESERVED_Msk                       (0x1UL << ATTR5_RESERVED_Pos)                       /*!< ATTR5: RESERVED Mask */
+
+#define ATTR5_B_Pos                              21U                                                 /*!< ATTR5: BUFFERABLE Position */
+#define ATTR5_B_Msk                              (0x1UL << ATTR5_B_Pos)                              /*!< ATTR5: BUFFERABLE Mask */
+
+#define ATTR5_SO_Pos                             22U                                                 /*!< ATTR5: STRONG-ORDER Position */
+#define ATTR5_SO_Msk                             (0x1UL << ATTR5_SO_Pos)                             /*!< ATTR5: STRONG-ORDER Mask */
+
+#define ATTR5_C_Pos                              23U                                                 /*!< ATTR5: CACHEABLE Position */
+#define ATTR5_C_Msk                              (0x1UL << ATTR5_C_Pos)                              /*!< ATTR5: CACHEABLE Mask */
+
+#define ATTR6_RESERVED_Pos                       24U                                                 /*!< ATTR6: RESERVED Position */
+#define ATTR6_RESERVED_Msk                       (0x1UL << ATTR6_RESERVED_Pos)                       /*!< ATTR6: RESERVED Mask */
+
+#define ATTR6_B_Pos                              25U                                                 /*!< ATTR6: BUFFERABLE Position */
+#define ATTR6_B_Msk                              (0x1UL << ATTR6_B_Pos)                              /*!< ATTR6: BUFFERABLE Mask */
+
+#define ATTR6_SO_Pos                             26U                                                 /*!< ATTR6: STRONG-ORDER Position */
+#define ATTR6_SO_Msk                             (0x1UL << ATTR6_SO_Pos)                             /*!< ATTR6: STRONG-ORDER Mask */
+
+#define ATTR6_C_Pos                              27U                                                 /*!< ATTR6: CACHEABLE Position */
+#define ATTR6_C_Msk                              (0x1UL << ATTR6_C_Pos)                              /*!< ATTR6: CACHEABLE Mask */
+
+#define ATTR7_RESERVED_Pos                       28U                                                 /*!< ATTR7: RESERVED Position */
+#define ATTR7_RESERVED_Msk                       (0x1UL << ATTR7_RESERVED_Pos)                       /*!< ATTR7: RESERVED Mask */
+
+#define ATTR7_B_Pos                              29U                                                 /*!< ATTR7: BUFFERABLE Position */
+#define ATTR7_B_Msk                              (0x1UL << ATTR7_B_Pos)                              /*!< ATTR7: BUFFERABLE Mask */
+
+#define ATTR7_SO_Pos                             30U                                                 /*!< ATTR7: STRONG-ORDER Position */
+#define ATTR7_SO_Msk                             (0x1UL << ATTR7_SO_Pos)                             /*!< ATTR7: STRONG-ORDER Mask */
+
+#define ATTR7_C_Pos                              31U                                                 /*!< ATTR7: CACHEABLE Position */
+#define ATTR7_C_Msk                              (0x1UL << ATTR7_C_Pos)                              /*!< ATTR7: CACHEABLE Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(ATTR1,CR<27,0>).
+ */
+typedef union {
+    struct {
+        uint32_t Attr8_reserved: 1;                      /*!< bit:      0  Reserved */
+        uint32_t Attr8_B: 1;                             /*!< bit:      1  Bufferable attribute setting */
+        uint32_t Attr8_SO: 1;                            /*!< bit:      2  Strong-Order attribute setting */
+        uint32_t Attr8_C: 1;                             /*!< bit:      3  Cacheable attribute setting */
+        uint32_t Attr9_reserved: 1;                      /*!< bit:      4  Reserved */
+        uint32_t Attr9_B: 1;                             /*!< bit:      5  Bufferable attribute setting */
+        uint32_t Attr9_SO: 1;                            /*!< bit:      6  Strong-Order attribute setting */
+        uint32_t Attr9_C: 1;                             /*!< bit:      7  Cacheable attribute setting */
+        uint32_t Attr10_reserved: 1;                     /*!< bit:      8  Reserved */
+        uint32_t Attr10_B: 1;                            /*!< bit:      9  Bufferable attribute setting */
+        uint32_t Attr10_SO: 1;                           /*!< bit:      10  Strong-Order attribute setting */
+        uint32_t Attr10_C: 1;                            /*!< bit:      11  Bufferable attribute setting */
+        uint32_t Attr11_reserved: 1;                     /*!< bit:      12  Reserved */
+        uint32_t Attr11_B: 1;                            /*!< bit:      13  Bufferable attribute setting */
+        uint32_t Attr11_SO: 1;                           /*!< bit:      14  Strong-Order attribute setting */
+        uint32_t Attr11_C: 1;                            /*!< bit:      15  Bufferable attribute setting */
+        uint32_t Attr12_reserved: 1;                     /*!< bit:      16  Reserved */
+        uint32_t Attr12_B: 1;                            /*!< bit:      17  Bufferable attribute setting */
+        uint32_t Attr12_SO: 1;                           /*!< bit:      18  Strong-Order attribute setting */
+        uint32_t Attr12_C: 1;                            /*!< bit:      19  Bufferable attribute setting */
+        uint32_t Attr13_reserved: 1;                     /*!< bit:      20  Reserved */
+        uint32_t Attr13_B: 1;                            /*!< bit:      21  Bufferable attribute setting */
+        uint32_t Attr13_SO: 1;                           /*!< bit:      22  Strong-Order attribute setting */
+        uint32_t Attr13_C: 1;                            /*!< bit:      23  Bufferable attribute setting */
+        uint32_t Attr14_reserved: 1;                     /*!< bit:      24  Reserved */
+        uint32_t Attr14_B: 1;                            /*!< bit:      25  Bufferable attribute setting */
+        uint32_t Attr14_SO: 1;                           /*!< bit:      26  Strong-Order attribute setting */
+        uint32_t Attr14_C: 1;                            /*!< bit:      27  Bufferable attribute setting */
+        uint32_t Attr15_reserved: 1;                     /*!< bit:      28  Reserved */
+        uint32_t Attr15_B: 1;                            /*!< bit:      29  Bufferable attribute setting */
+        uint32_t Attr15_SO: 1;                           /*!< bit:      30  Strong-Order attribute setting */
+        uint32_t Attr15_C: 1;                            /*!< bit:      31  Bufferable attribute setting */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} ATTR1_Type;
+
+/* ATTR0 Register Definitions */
+#define ATTR8_RESERVED_Pos                       0U                                                  /*!< ATTR8: RESERVED Position */
+#define ATTR8_RESERVED_Msk                       (0x1UL << ATTR8_RESERVED_Pos)                       /*!< ATTR8: RESERVED Mask */
+
+#define ATTR8_B_Pos                              1U                                                  /*!< ATTR8: BUFFERABLE Position */
+#define ATTR8_B_Msk                              (0x1UL << ATTR8_B_Pos)                              /*!< ATTR8: BUFFERABLE Mask */
+
+#define ATTR8_SO_Pos                             2U                                                  /*!< ATTR8: STRONG-ORDER Position */
+#define ATTR8_SO_Msk                             (0x1UL << ATTR8_SO_Pos)                             /*!< ATTR8: STRONG-ORDER Mask */
+
+#define ATTR8_C_Pos                              3U                                                  /*!< ATTR8: CACHEABLE Position */
+#define ATTR8_C_Msk                              (0x1UL << ATTR8_C_Pos)                              /*!< ATTR8: CACHEABLE Mask */
+
+#define ATTR9_RESERVED_Pos                       4U                                                  /*!< ATTR9: RESERVED Position */
+#define ATTR9_RESERVED_Msk                       (0x1UL << ATTR9_RESERVED_Pos)                       /*!< ATTR9: RESERVED Mask */
+
+#define ATTR9_B_Pos                              5U                                                  /*!< ATTR9: BUFFERABLE Position */
+#define ATTR9_B_Msk                              (0x1UL << ATTR9_B_Pos)                              /*!< ATTR9: BUFFERABLE Mask */
+
+#define ATTR9_SO_Pos                             6U                                                  /*!< ATTR9: STRONG-ORDER Position */
+#define ATTR9_SO_Msk                             (0x1UL << ATTR9_SO_Pos)                             /*!< ATTR9: STRONG-ORDER Mask */
+
+#define ATTR9_C_Pos                              7U                                                  /*!< ATTR9: CACHEABLE Position */
+#define ATTR9_C_Msk                              (0x1UL << ATTR9_C_Pos)                              /*!< ATTR9: CACHEABLE Mask */
+
+
+#define ATTR10_RESERVED_Pos                      8U                                                  /*!< ATTR10: RESERVED Position */
+#define ATTR10_RESERVED_Msk                      (0x1UL << ATTR10_RESERVED_Pos)                      /*!< ATTR10: RESERVED Mask */
+
+#define ATTR10_B_Pos                             9U                                                  /*!< ATTR10: BUFFERABLE Position */
+#define ATTR10_B_Msk                             (0x1UL << ATTR10_B_Pos)                             /*!< ATTR10: BUFFERABLE Mask */
+
+#define ATTR10_SO_Pos                            10U                                                 /*!< ATTR10: STRONG-ORDER Position */
+#define ATTR10_SO_Msk                            (0x1UL << ATTR10_SO_Pos)                            /*!< ATTR10: STRONG-ORDER Mask */
+
+#define ATTR10_C_Pos                             11U                                                 /*!< ATTR10: CACHEABLE Position */
+#define ATTR10_C_Msk                             (0x1UL << ATTR10_C_Pos)                             /*!< ATTR10: CACHEABLE Mask */
+
+#define ATTR11_RESERVED_Pos                      12U                                                 /*!< ATTR11: RESERVED Position */
+#define ATTR11_RESERVED_Msk                      (0x1UL << ATTR11_RESERVED_Pos)                      /*!< ATTR11: RESERVED Mask */
+
+#define ATTR11_B_Pos                             13U                                                 /*!< ATTR11: BUFFERABLE Position */
+#define ATTR11_B_Msk                             (0x1UL << ATTR11_B_Pos)                             /*!< ATTR11: BUFFERABLE Mask */
+
+#define ATTR11_SO_Pos                            14U                                                 /*!< ATTR11: STRONG-ORDER Position */
+#define ATTR11_SO_Msk                            (0x1UL << ATTR11_SO_Pos)                            /*!< ATTR11: STRONG-ORDER Mask */
+
+#define ATTR11_C_Pos                             15U                                                 /*!< ATTR11: CACHEABLE Position */
+#define ATTR11_C_Msk                             (0x1UL << ATTR11_C_Pos)                             /*!< ATTR11: CACHEABLE Mask */
+
+
+#define ATTR12_RESERVED_Pos                      16U                                                 /*!< ATTR12: RESERVED Position */
+#define ATTR12_RESERVED_Msk                      (0x1UL << ATTR12_RESERVED_Pos)                      /*!< ATTR12: RESERVED Mask */
+
+#define ATTR12_B_Pos                             17U                                                 /*!< ATTR12: BUFFERABLE Position */
+#define ATTR12_B_Msk                             (0x1UL << ATTR12_B_Pos)                             /*!< ATTR12: BUFFERABLE Mask */
+
+#define ATTR12_SO_Pos                            18U                                                 /*!< ATTR12: STRONG-ORDER Position */
+#define ATTR12_SO_Msk                            (0x1UL << ATTR12_SO_Pos                             /*!< ATTR12: STRONG-ORDER Mask */
+
+#define ATTR12_C_Pos                             19U                                                 /*!< ATTR12: CACHEABLE Position */
+#define ATTR12_C_Msk                             (0x1UL << ATTR12_C_Pos)                             /*!< ATTR12: CACHEABLE Mask */
+
+#define ATTR13_RESERVED_Pos                      20U                                                 /*!< ATTR13: RESERVED Position */
+#define ATTR13_RESERVED_Msk                      (0x1UL << ATTR13_RESERVED_Pos)                      /*!< ATTR13: RESERVED Mask */
+
+#define ATTR13_B_Pos                             21U                                                 /*!< ATTR13: BUFFERABLE Position */
+#define ATTR13_B_Msk                             (0x1UL << ATTR13_B_Pos)                             /*!< ATTR13: BUFFERABLE Mask */
+
+#define ATTR13_SO_Pos                            22U                                                 /*!< ATTR13: STRONG-ORDER Position */
+#define ATTR13_SO_Msk                            (0x1UL << ATTR13_SO_Pos)                            /*!< ATTR13: STRONG-ORDER Mask */
+
+#define ATTR13_C_Pos                             23U                                                 /*!< ATTR13: CACHEABLE Position */
+#define ATTR13_C_Msk                             (0x1UL << ATTR13_C_Pos)                             /*!< ATTR13: CACHEABLE Mask */
+
+#define ATTR14_RESERVED_Pos                      24U                                                 /*!< ATTR14: RESERVED Position */
+#define ATTR14_RESERVED_Msk                      (0x1UL << ATTR14_RESERVED_Pos)                      /*!< ATTR14: RESERVED Mask */
+
+#define ATTR14_B_Pos                             25U                                                 /*!< ATTR14: BUFFERABLE Position */
+#define ATTR14_B_Msk                             (0x1UL << ATTR14_B_Pos)                             /*!< ATTR14: BUFFERABLE Mask */
+
+#define ATTR14_SO_Pos                            26U                                                 /*!< ATTR14: STRONG-ORDER Position */
+#define ATTR14_SO_Msk                            (0x1UL << ATTR14_SO_Pos)                            /*!< ATTR14: STRONG-ORDER Mask */
+
+#define ATTR14_C_Pos                             27U                                                 /*!< ATTR14: CACHEABLE Position */
+#define ATTR14_C_Msk                             (0x1UL << ATTR14_C_Pos)                             /*!< ATTR14: CACHEABLE Mask */
+
+#define ATTR15_RESERVED_Pos                      28U                                                 /*!< ATTR15: RESERVED Position */
+#define ATTR15_RESERVED_Msk                      (0x1UL << ATTR15_RESERVED_Pos)                      /*!< ATTR15: RESERVED Mask */
+
+#define ATTR15_B_Pos                             29U                                                 /*!< ATTR15: BUFFERABLE Position */
+#define ATTR15_B_Msk                             (0x1UL << ATTR15_B_Pos)                             /*!< ATTR15: BUFFERABLE Mask */
+
+#define ATTR15_SO_Pos                            30U                                                 /*!< ATTR15: STRONG-ORDER Position */
+#define ATTR15_SO_Msk                            (0x1UL << ATTR15_SO_Pos)                            /*!< ATTR15: STRONG-ORDER Mask */
+
+#define ATTR15_C_Pos                             31U                                                 /*!< ATTR15: CACHEABLE Position */
+#define ATTR15_C_Msk                             (0x1UL << ATTR15_C_Pos)                             /*!< ATTR15: CACHEABLE Mask */
+
+/* CCR2 Register Definitions */
+#define CCR2_CRECC_Pos                           7U                                                  /*!< CCR2: CRECC Position */
+#define CCR2_CRECC_Msk                           (0x1UL << CCR2_CRECC_Pos)                           /*!< CCR2: CRECC Mask */
+
+#define CCR2_ECC_Pos                             6U                                                  /*!< CCR2: ECC Position */
+#define CCR2_ECC_Msk                             (0x1UL << CCR2_ECC_Pos)                             /*!< CCR2: ECC Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu index register(MIR,CR<0,15>).
+ */
+typedef union {
+    struct {
+        uint32_t Index: 10;                  /*!< bit:   0.. 9   TLB index */
+        uint32_t _reserved: 20;              /*!< bit:  10.. 29  Reserved */
+        uint32_t TF: 1;                      /*!< bit:       30  TLB fatal error */
+        uint32_t P: 1;                       /*!< bit:       31  TLBP instruction */
+    } b;
+    uint32_t w;
+} MIR_Type;
+
+/* MIR Register Definitions */
+#define MIR_P_Pos                          31                                            /*!< PRSR: P(TLBP instruction) Position */
+#define MIR_P_Msk                          (0x1UL << MIR_P_Pos)                          /*!< PRSR: P(TLBP instruction) Mask */
+
+#define MIR_TF_Pos                         30                                            /*!< PRSR: Tfatal Position */
+#define MIR_TF_Msk                         (0x1UL << MIR_TF_Pos)                         /*!< PRSR: Tfatal Mask */
+
+#define MIR_Index_Pos                      0                                             /*!< PRSR: Index Position */
+#define MIR_Index_Msk                      (0x3ffUL << MIR_Index_Pos)                    /*!< PRSR: Index Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEL, CR<2,15> and CR<3,15>).
+ */
+typedef union {
+    struct {
+        uint32_t G: 1;                        /*!< bit:       0   Global enbale bit */
+        uint32_t V: 1;                        /*!< bit:       1   TLB mapping valid bit */
+        uint32_t D: 1;                        /*!< bit:       2   TLB Page dirty bit */
+        uint32_t C: 1;                        /*!< bit:       3   TLB Page cacheable bit */
+        uint32_t SEC: 1;                      /*!< bit:       4   TLB Page security bit */
+        uint32_t SO: 1;                       /*!< bit:       2   Strong order enable bit */
+        uint32_t B: 1;                        /*!< bit:       2   TLB Page bufferable bit */
+        uint32_t _reserved: 5;                /*!< bit:   7.. 11  Reserved */
+        uint32_t PFN: 20;                     /*!< bit:  12.. 31  Physical frame number */
+    } b;
+    uint32_t w;
+} MEL_Type;
+
+/* MEL Register Definitions */
+#define MEL_PFN_Pos                        12                                            /*!< MEL: PFN Position */
+#define MEL_PFN_Msk                        (0xFFFFFUL << MEL_PFN_Pos)                    /*!< MEL: PFN Mask */
+
+#define MEL_B_Pos                          6                                             /*!< MEL: B Position */
+#define MEL_B_Msk                          (0x1UL << MEL_B_Pos)                          /*!< MEL: B Mask */
+
+#define MEL_SO_Pos                         5                                             /*!< MEL: SO Position */
+#define MEL_SO_Msk                         (0x1UL << MEL_SO_Pos)                         /*!< MEL: SO Mask */
+
+#define MEL_SEC_Pos                        4                                             /*!< MEL: SEC Position */
+#define MEL_SEC_Msk                        (0x1UL << MEL_SEC_Pos)                        /*!< MEL: SEC Mask */
+
+#define MEL_C_Pos                          3                                             /*!< MEL: C Position */
+#define MEL_C_Msk                          (0x1UL << MEL_C_Pos)                          /*!< MEL: C Mask */
+
+#define MEL_D_Pos                          2                                             /*!< MEL: D Position */
+#define MEL_D_Msk                          (0x1UL << MIR_D_Pos)                          /*!< MEL: D Mask */
+
+#define MEL_V_Pos                          1                                             /*!< MEL: V Position */
+#define MEL_V_Msk                          (0x1UL << MIR_V_Pos)                          /*!< MEL: V Mask */
+
+#define MEL_G_Pos                          0                                             /*!< MEL: G Position */
+#define MEL_G_Msk                          (0x1UL << MIR_G_Pos)                          /*!< MEL: G Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEH, CR<4,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID :8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved :4;                /*!< bit:   7.. 10  Reserved */
+        uint32_t VPN :20;                     /*!< bit:  11.. 31  Virtual page number */
+    } b;
+    uint32_t w;
+} MEH_Type;
+
+/* MEH Register Definitions */
+#define MEH_VPN_Pos                        12                                             /*!< MEH: VPN Position */
+#define MEH_VPN_Msk                        (0xFFFFFUL << MEH_VPN_Pos)                     /*!< MEH: VPN Mask */
+
+#define MEH_ASID_Pos                       0                                              /*!< MEH: ASID Position */
+#define MEH_ASID_Msk                       (0xFFUL << MEH_ASID_Pos)                       /*!< MEH: ASID Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MPR, CR<6,15>). 
+*/
+
+typedef union {
+    struct {
+        uint32_t _reserved0: 13;              /*!< bit:   0.. 12   Reserved */
+        uint32_t page_mask: 12;               /*!< bit:  13.. 24   Page mask */
+        uint32_t _reserved1: 7;               /*!< bit:  25.. 31   Reserved */
+    } b;
+    uint32_t w;
+} MPR_Type;
+
+/* MPR Register Definitions */
+#define MPR_PAGE_MASK_Pos                        13                                             /*!< MPR: PAGE_MASK Position */
+#define MPR_PAGE_MASK_Msk                        (0xFFFUL << MPR_PAGE_MASK_Pos)                 /*!< MPR: PAGE_MASK Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(CR<8,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID: 8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved: 17;               /*!< bit:   8.. 24  Reserved */
+        uint32_t TLBINV_INDEX: 1;             /*!< bit:       25  TLBINV_INDEX */
+        uint32_t TLBINV_ALL: 1;               /*!< bit:       26  TLBINV_ALL */
+        uint32_t TLBINV: 1;                   /*!< bit:       27  TLBINV */
+        uint32_t TLBWR: 1;                    /*!< bit:       28  TLBWR */
+        uint32_t TLBWI: 1;                    /*!< bit:       29  TLBWI */
+        uint32_t TLBR: 1;                     /*!< bit:       30  TLBR */
+        uint32_t TLBP: 1;                     /*!< bit:       31  TLBP */
+    } b;
+    uint32_t w;
+} MCIR_Type;
+
+/* MCIR Register Definitions */
+#define MCIR_TLBP_Pos                        31                                               /*!< MCIR: TLBP Position */
+#define MCIR_TLBP_Msk                        (0x1UL << MCIR_TLBP_Pos)                         /*!< MCIR: TLBP Mask */
+
+#define MCIR_TLBR_Pos                        30                                               /*!< MCIR: TLBR Position */
+#define MCIR_TLBR_Msk                        (0x1UL << MCIR_TLBR_Pos)                         /*!< MCIR: TLBR Mask */
+
+#define MCIR_TLBWI_Pos                       29                                               /*!< MCIR: TLBWI Position */
+#define MCIR_TLBWI_Msk                       (0x1UL << MCIR_TLBWI_Pos)                        /*!< MCIR: TLBWI Mask */
+
+#define MCIR_TLBWR_Pos                       28                                               /*!< MCIR: TLBWR Position */
+#define MCIR_TLBWR_Msk                       (0x1UL << MCIR_TLBWR_Pos)                        /*!< MCIR: TLBWR Mask */
+
+#define MCIR_TLBINV_Pos                      27                                               /*!< MCIR: TLBINV Position */
+#define MCIR_TLBINV_Msk                      (0x1UL << MCIR_TLBINV_Pos)                       /*!< MCIR: TLBINV Mask */
+
+#define MCIR_TLBINV_ALL_Pos                  26                                               /*!< MCIR: TLBINV_ALL Position */
+#define MCIR_TLBINV_ALL_Msk                  (0x1UL << MCIR_TLBINV_ALL_Pos)                   /*!< MCIR: TLBINV_ALL Mask */
+
+#define MCIR_TLBINV_INDEX_Pos                25                                               /*!< MCIR: TLBINV_INDEX Position */
+#define MCIR_TLBINV_INDEX_Msk                (0x1UL << MCIR_TLBINV_INDEX_Pos)                 /*!< MCIR: TLBINV_INDEX Mask */
+
+#define MCIR_ASID_Pos                        0                                                /*!< MCIR: ASID Position */
+#define MCIR_ASID_Msk                        (0xFFUL << MCIR_ASID_Pos)                        /*!< MCIR: ASID Mask */
+
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(CFR,CR<17,0>).
+ */
+typedef union {
+    struct {
+        uint32_t CACHE_SEL: 2;                      /*!< bit:  0..1  Instruction and data cache selection */
+        uint32_t _reserved0: 2;                     /*!< bit:  2..3  Reserved */
+        uint32_t INV: 1;                            /*!< bit:     4  Invalid data in cache */
+        uint32_t CLR: 1;                            /*!< bit:     5  Clear the dirty tlb table */
+        uint32_t OMS: 1;                            /*!< bit:     6  Cache invalid and clear operation mode (one line or all line)*/
+        uint32_t ITS: 1;                            /*!< bit:     7  Cache invalid and clear operation mode (CIR used as virtual index or SET/WAY/LEVE index)*/
+        uint32_t _reserved1: 8;                     /*!< bit: 8..15  Reserved */
+        uint32_t BHT_INV: 1;                        /*!< bit:    16  Invalid data in branch history table */
+        uint32_t _reserved2: 14;                    /*!< bit: 17..30 Reserved */
+        uint32_t LICF: 1;                           /*!< bit:     31 Failure of clearing or invalid cache line */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} CFR_Type;
+
+#define CFR_LICF_Pos                     31U                                             /*!< CFR: LICF Position */
+#define CFR_LICF_Msk                     (0x1UL << CFR_LICF_Pos)                         /*!< CFR: LICF Mask */
+
+#define CFR_BHT_INV_Pos                  16U                                             /*!< CFR: BHT Position */
+#define CFR_BHT_INV_Msk                  (0x1UL << CFR_BHT_INV_Pos)                      /*!< CFR: BHT Mask */
+
+#define CFR_ITS_Pos                      7U                                              /*!< CFR: ITS Position */
+#define CFR_ITS_Msk                      (0x1UL << CFR_ITS_Pos)                          /*!< CFR: ITS Mask */
+
+#define CFR_OMS_Pos                      6U                                              /*!< CFR: OMS Position */
+#define CFR_OMS_Msk                      (0x1UL << CFR_OMS_Pos)                          /*!< CFR: OMS Mask */
+
+#define CFR_CLR_Pos                      5U                                              /*!< CFR: CLR Position */
+#define CFR_CLR_Msk                      (0x1UL << CFR_CLR_Pos)                          /*!< CFR: CLR Mask */
+
+#define CFR_INV_Pos                      4U                                              /*!< CFR: INV Position */
+#define CFR_INV_Msk                      (0x1UL << CFR_INV_Pos)                          /*!< CFR: INV Mask */
+
+#define CFR_CACHE_SEL_Pos                0                                               /*!< CFR: CACHE_SEL Position */    
+#define CFR_CACHE_SEL_Msk                (0x3UL << CFR_CACHE_SEL_Pos)                    /*!< CFR: CACHE_SEL Masok */
+
+/* CFR Register Definitions */
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+#define SSEG0_BASE_ADDR         0x80000000
+#define CACHE_RANGE_MAX_SIZE    0x80000
+
+#define INS_CACHE               (1 << 0)
+#define DATA_CACHE              (1 << 1)
+#define CACHE_INV               (1 << 4)
+#define CACHE_CLR               (1 << 5)
+#define CACHE_OMS               (1 << 6)
+#define CACHE_ITS               (1 << 7)
+#define CACHE_LICF              (1 << 31)
+
+#define    L1_CACHE_SHIFT       4      /* 16 Bytes */
+#define    L1_CACHE_BYTES       (1 << L1_CACHE_SHIFT)
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+
+/**
+  \ingroup    CSI_tcm_register
+  \defgroup   CSI_TCM
+  \brief      Type definitions for the tcm Registers
+  @{
+ */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(ITCMCR,CR<22,1>).
+ */
+typedef union {
+    struct {
+        uint32_t EN: 1;                             /*!< bit:     0  Instruction Tightly-Coupled Memory enable */
+        uint32_t _reserved0: 1;                     /*!< bit:     1  Reserved */
+        uint32_t SIF: 1;                            /*!< bit:     2  Slave interface access ITCM */
+        uint32_t _reserved1: 1;                     /*!< bit:     3  Reserved */
+        uint32_t Size: 4;                           /*!< bit:  4..7  Size of ITCM */
+        uint32_t _reserved2: 1;                     /*!< bit:     8  Reserved */
+        uint32_t Delay: 1;                          /*!< bit:     9  The time from access ITCM to data come back. */
+        uint32_t _reserved3: 2;                     /*!< bit: 10..11 Reserved */
+        uint32_t Base_Address: 20;                  /*!< bit: 12..31 Base address of DTCM */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} ITCMCR_Type;
+
+#define ITCMCR_Base_Address_Pos             12U                                              /*!< ITCMCR: Base_Address Position */
+#define ITCMCR_Base_Address_Msk             (0xfffffUL << ITCMCR_Base_Address_Pos)           /*!< ITCMCR: Base_Address Mask */
+
+#define ITCMCR_Delay_Pos                    9U                                               /*!< ITCMCR: Delay Position */
+#define ITCMCR_Delay_Msk                    (0x1UL << ITCMCR_Delay_Pos)                      /*!< ITCMCR: Delay Mask */
+
+#define ITCMCR_Size_Pos                     4U                                               /*!< ITCMCR: Size Position */
+#define ITCMCR_Size_Msk                     (0xfUL << ITCMCR_Size_Pos)                       /*!< ITCMCR: Size Mask */
+
+#define ITCMCR_SIF_Pos                      2U                                               /*!< ITCMCR: SIF Position */
+#define ITCMCR_SIF_Msk                      (0x1UL << ITCMCR_SIF_Pos)                        /*!< ITCMCR: SIF Mask */
+
+#define ITCMCR_EN_Pos                       0U                                               /*!< ITCMCR: EN Position */
+#define ITCMCR_EN_Msk                       (0x1UL << ITCMCR_EN_Pos)                         /*!< ITCMCR: EN Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(DTCMCR,CR<23,1>).
+ */
+typedef union {
+    struct {
+        uint32_t EN: 1;                             /*!< bit:     0  Data Tightly-Coupled Memory enable */
+        uint32_t _reserved0: 1;                     /*!< bit:     1  Reserved */
+        uint32_t SIF: 1;                            /*!< bit:     2  Slave interface access DTCM */
+        uint32_t _reserved1: 1;                     /*!< bit:     3  Reserved */
+        uint32_t Size: 4;                           /*!< bit:  4..7  Size of DTCM */
+        uint32_t _reserved2: 1;                     /*!< bit:     8  Reserved */
+        uint32_t Delay: 1;                          /*!< bit:     9  The time from access DTCM to data come back.*/
+        uint32_t _reserved3: 2;                     /*!< bit: 10..11 Reserved */
+        uint32_t Base_Address: 20;                  /*!< bit: 12..31 Base address of DTCM */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} DTCMCR_Type;
+
+#define DTCMCR_Base_Address_Pos             12U                                              /*!< DTCMCR: Base_Address Position */
+#define DTCMCR_Base_Address_Msk             (0xfffffUL << DTCMCR_Base_Address_Pos)           /*!< DTCMCR: Base_Address Mask */
+
+#define DTCMCR_Delay_Pos                    9U                                               /*!< DTCMCR: Delay Position */
+#define DTCMCR_Delay_Msk                    (0x1UL << DTCMCR_Delay_Pos)                      /*!< DTCMCR: Delay Mask */
+
+#define DTCMCR_Size_Pos                     4U                                               /*!< DTCMCR: Size Position */
+#define DTCMCR_Size_Msk                     (0xfUL << DTCMCR_Size_Pos)                       /*!< DTCMCR: Size Mask */
+
+#define DTCMCR_SIF_Pos                      2U                                               /*!< DTCMCR: SIF Position */
+#define DTCMCR_SIF_Msk                      (0x1UL << DTCMCR_SIF_Pos)                        /*!< DTCMCR: SIF Mask */
+
+#define DTCMCR_EN_Pos                       0U                                               /*!< DTCMCR: EN Position */
+#define DTCMCR_EN_Msk                       (0x1UL << DTCMCR_EN_Pos)                         /*!< DTCMCR: EN Mask */
+
+/*@} end of group CSI_TCM_bitfield */
+
+/**
+  \ingroup    CSI_ECC_register
+  \defgroup   CSI_ECC
+  \brief      Type definitions for the ECC Registers
+  @{
+ */
+
+typedef union {
+    struct {
+        uint32_t Index: 17;                         /*!< bit:  0..16 RAM index */
+        uint32_t _reserved0: 1;                     /*!< bit:  2..23 Reserved */
+        uint32_t Way: 2;                            /*!< bit: 18..19 ICACHE/DCACHE WAY */
+        uint32_t _reserved1: 2;                     /*!< bit: 20..21 Reserved */
+        uint32_t RAMID: 8;                          /*!< bit: 22..29 ECC RAM ID */
+        uint32_t ERR_TYPE: 2;                       /*!< bit: 30..31 ECC ERR TYPE */
+    } b;
+    uint32_t w;
+} ERRLC_Type;
+
+#define ERRLC_ERR_TYPE_Pos                 30U                                               /*!< ERRLC: ERR_TYPE Position */
+#define ERRLC_ERR_TYPE_Msk                 (0x3UL << ERRLC_ERR_TYPE_Pos)                     /*!< ERRLC: ERR_TYPE Mask */
+
+#define ERRLC_RAMID_Pos                    22U                                               /*!< ERRLC: RAMID Position */
+#define ERRLC_RAMID_Msk                    (0xFFUL << ERRLC_RAMID_Pos)                       /*!< ERRLC: RAMID Mask */
+
+#define ERRLC_Way_Pos                      18U                                               /*!< ERRLC: Way Position */
+#define ERRLC_Way_Msk                      (0x3UL << ERRLC_Way_Pos)                          /*!< ERRLC: Way Mask */
+
+#define ERRLC_Index_Pos                    0U                                                /*!< ERRLC: Index Position */
+#define ERRLC_Index_Msk                    (0x1FFFFUL << ERRLC_Index_Pos)                    /*!< ERRLC: Index Mask */
+
+typedef union {
+    struct {
+        uint32_t INJ_EN: 1;                         /*!< bit:     0  ECC inject enable */
+        uint32_t ERR_TYPE: 1;                       /*!< bit:     1  ECC error type */
+        uint32_t _reserved0: 22;                    /*!< bit:  2..23 Reserved */
+        uint32_t RAMID: 8;                          /*!< bit: 24..31 ECC RAMID */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} ERRINJCR_Type;
+
+#define ERRINJCR_RAMID_Pos                 24U                                               /*!< ERRINJCR: RAMID Position */
+#define ERRINJCR_RAMID_Msk                 (0xFFUL << ERRINJCR_RAMID_Pos)                    /*!< ERRINJCR: RAMID Mask */
+
+#define ERRINJCR_ERR_TYPE_Pos              1U                                                /*!< ERRINJCR: ERR_TYPE Position */
+#define ERRINJCR_ERR_TYPE_Msk              (0x1UL << ERRINJCR_ERR_TYPE_Pos)                  /*!< ERRINJCR: ERR_TYPE Mask */
+
+#define ERRINJCR_INJ_EN_Pos                0U                                                /*!< ERRINJCR: INJ_EN Position */
+#define ERRINJCR_INJ_EN_Msk                (0x1UL << ERRINJCR_INJ_EN_Pos)                    /*!< ERRINJCR: INJ_EN Mask */
+
+/*@} end of group CSI_ECC_bitfield */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000004);
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFFB);
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+    __set_CFR(0x11);
+    __set_CFR(INS_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000008);
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFF7);
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR);
+}
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR | CACHE_INV);
+}
+
+__STATIC_INLINE  void set_cache_range (uint32_t start, uint32_t end, uint32_t value)
+{
+    if (!(start & SSEG0_BASE_ADDR) || (end - start) &~(CACHE_RANGE_MAX_SIZE - 1)) {
+        __set_CFR(value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_disable();
+    }
+    uint32_t i;
+    for (i = start; i < end; i += L1_CACHE_BYTES) {
+        __set_CIR(i);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (end & (L1_CACHE_BYTES-1)) {
+        __set_CIR(end);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_enable();
+    }
+
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_INV));
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR));
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR | CACHE_INV));
+}
+
+
+/*@} end of CSI_Core_CacheFunctions */
+
+
+/* ##########################  MMU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MMUFunctions MMU Functions
+  \brief    Functions that configure MMU.
+  @{
+ */
+
+typedef struct {
+    uint32_t global: 1;         /* tlb page global access. */
+    uint32_t valid: 1;          /* tlb page valid */
+    uint32_t writeable: 1;      /* tlb page writeable */
+    uint32_t cacheable: 1;      /* tlb page cacheable*/
+    uint32_t is_secure: 1;      /* tlb page security access */
+    uint32_t strong_order: 1;   /* the sequence of accessing data on tlb page is corresponding to the program flow? */
+    uint32_t bufferable: 1;     /* tlb page bufferable */
+} page_attr_t;
+
+typedef enum {
+    PAGE_SIZE_4KB   = 0x000,
+    PAGE_SIZE_16KB  = 0x003,
+    PAGE_SIZE_64KB  = 0x00F,
+    PAGE_SIZE_256KB = 0x03F,
+    PAGE_SIZE_1MB   = 0x0FF,
+    PAGE_SIZE_4MB   = 0x3FF,
+    PAGE_SIZE_16MB  = 0xFFF
+} page_size_e;
+
+
+/**
+  \brief  enable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_enable(void)
+{
+    __set_CCR(__get_CCR() | (1u << CCR_MP_Pos));
+}
+
+/**
+  \brief  disable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~(1u << CCR_MP_Pos)));
+}
+
+/**
+  \brief  create page with feature.
+  \details
+  \param [in]  vaddr     virtual address.
+  \param [in]  paddr     physical address.
+  \param [in]  asid      address sapce id (default: 0).
+  \param [in]  attr      \ref page_attr_t. tlb page attribute.
+  */
+__STATIC_INLINE void csi_mmu_set_tlb(uint32_t vaddr, uint32_t paddr, uint32_t asid, page_attr_t attr)
+{
+     MPR_Type pgmask;
+     MEH_Type meh;
+     MEL_Type mel;
+     uint32_t vaddr_bit;
+     uint32_t page_feature = 0;
+
+     page_feature |= attr.global << MEL_G_Pos | attr.valid << MEL_V_Pos |
+                     attr.writeable << MEL_D_Pos | attr.cacheable << MEL_C_Pos |
+                     attr.is_secure << MEL_SEC_Pos | attr.strong_order << MEL_SO_Pos |
+                     attr.bufferable << MEL_B_Pos;
+
+     pgmask.w = __get_MPR();
+     vaddr_bit = 44 - __FF0(~((uint32_t)pgmask.b.page_mask));
+
+     meh.b.ASID = (uint8_t)asid;
+     meh.b.VPN  = (vaddr & ((~pgmask.w | 0xFE000000) & 0xFFFFE000)) >> MEH_VPN_Pos;
+     __set_MEH(meh.w);
+
+     __set_MCIR(1u << MCIR_TLBP_Pos);
+
+     mel.w = ((paddr & ~(pgmask.b.page_mask << 12)) | page_feature);
+     if (vaddr & (1 << vaddr_bit)) {
+         __set_MEL1(mel.w);
+     }
+     else {
+         __set_MEL0(mel.w);
+     }
+
+     if (__get_MIR() & (1 << MIR_P_Pos)) {
+        __set_MCIR(1u << MCIR_TLBWR_Pos);
+     } else {
+        __set_MCIR(1u << MCIR_TLBWI_Pos);
+     }
+}
+
+
+/**
+  \brief  enble mmu
+  \details
+  \param [in]  size  tlb page size.
+  */
+__STATIC_INLINE void csi_mmu_set_pagesize(page_size_e size)
+{
+    MPR_Type pgmask;
+    pgmask.b.page_mask = size;
+    __set_MPR(pgmask.w);
+}
+
+
+/**
+  \brief  read MEH, MEL0, MEL1 by tlb index.
+  \details
+  \param [in]    index  tlb index(0, 1, 2, ...)
+  \param [out]   meh    pointer to variable for retrieving MEH.
+  \param [out]   mel0   pointer to variable for retrieving MEL0.
+  \param [out]   mel1   pointer to variable for retrieving MEL1.
+  */
+__STATIC_INLINE void csi_mmu_read_by_index(uint32_t index, uint32_t *meh, uint32_t *mel0, uint32_t *mel1)
+{
+    MIR_Type mir;
+
+    if (meh == NULL || mel0 == NULL || mel1 == NULL) {
+        return;
+    }
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBR_Pos);
+
+    *meh = __get_MEH();
+    *mel0 = __get_MEL0();
+    *mel1 = __get_MEL1();
+}
+
+
+/**
+  \brief  flush all mmu tlb.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_all(void)
+{
+    __set_MCIR(1u << MCIR_TLBINV_ALL_Pos);
+}
+
+/**
+  \brief  flush mmu tlb by index.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_index(uint32_t index)
+{
+    MIR_Type mir;
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by virtual address.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_vaddr(uint32_t vaddr, uint32_t asid)
+{
+    __set_MEH(vaddr | (asid & MEH_ASID_Msk));
+    __set_MCIR(1u << MCIR_TLBP_Pos);
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+        return;
+    } else {
+        __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+    }
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_128B     = 0x6,
+    REGION_SIZE_256B     = 0x7,
+    REGION_SIZE_512B     = 0x8,
+    REGION_SIZE_1KB      = 0x9,
+    REGION_SIZE_2KB      = 0xA,
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    uint32_t nx: 1;
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t s: 1;
+    uint32_t c: 1;                /* cacheable */
+    uint32_t so: 1;
+    uint32_t b: 1;
+} mpu_region_attr_t;
+
+/**
+  \brief  enable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    uint32_t op_idx;
+
+    if (idx > 15) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+    ATTR0_Type reg_attr;
+
+    if (idx <= 7) {
+        capr.w = __get_CAPR();
+        reg_attr.w = __get_ATTR0();
+        op_idx = idx;
+    } else {
+        capr.w = __get_CAPR1();
+        reg_attr.w = __get_ATTR1();
+        op_idx = idx - 8;
+    }
+
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.w = (base_addr & PACR_BASE_ADDR_Msk) | (size << PACR_SIZE_Pos);
+    pacr.w &= ~(((1u << (size - 6)) - 1) << 7);
+    pacr.b.E = enable;
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    capr.w &= ~((0x1 << op_idx) | (0x3 << (op_idx * 2 + 8)) | (0x1 << (op_idx + 24)));
+    capr.w = (capr.w | (attr.nx << op_idx) | (attr.ap << (op_idx * 2 + 8)) | (attr.s << (op_idx + 24)));
+
+    reg_attr.w &= ~(0x7 << (op_idx << 2));
+    reg_attr.w |= ((attr.c | (attr.so << 1) | (attr.b << 2)) << (op_idx << 2));
+
+    if (idx <= 7) {
+        __set_CAPR(capr.w);
+        __set_ATTR0(reg_attr.w);
+    } else {
+        __set_CAPR1(capr.w);
+        __set_ATTR1(reg_attr.w);
+    }
+
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 15) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 15) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MPUFunctions */
+
+/* ##########################  TCM functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_TCMFunctions TCM Functions
+  \brief    Functions that configure TCM.
+  @{
+ */
+
+/**
+  \brief   Enable ITCM
+  \details Turns on ITCM
+  */
+__STATIC_INLINE void csi_itcm_enable (void)
+{
+    __set_ITCMCR(__get_ITCMCR() | ITCMCR_EN_Msk);
+}
+
+/**
+  \brief   Enable DTCM
+  \details Turns on DTCM
+  */
+__STATIC_INLINE void csi_dtcm_enable (void)
+{
+    __set_DTCMCR(__get_DTCMCR() | DTCMCR_EN_Msk);
+}
+
+/**
+  \brief   Enable ITCM
+  \details Turns on ITCM
+  */
+__STATIC_INLINE void csi_itcm_disable (void)
+{
+    __set_ITCMCR(__get_ITCMCR() & (~ITCMCR_EN_Msk));
+}
+
+/**
+  \brief   Enable DTCM
+  \details Turns on DTCM
+  */
+__STATIC_INLINE void csi_dtcm_disable (void)
+{
+    __set_DTCMCR(__get_DTCMCR() & (~DTCMCR_EN_Msk));
+}
+
+/**
+  \brief   Enable ITCM slave interface access
+  \details Enable ITCM slave interface access
+  */
+__STATIC_INLINE void csi_itcm_enable_slave_access(void)
+{
+    __set_ITCMCR(__get_ITCMCR() | ITCMCR_SIF_Msk);
+}
+
+/**
+  \brief   Disable ITCM slave interface access
+  \details Disable ITCM slave interface access
+  */
+__STATIC_INLINE void csi_itcm_disable_slave_access(void)
+{
+    __set_ITCMCR(__get_ITCMCR() & (~ITCMCR_SIF_Msk));
+}
+
+/**
+  \brief   Enable DTCM slave interface access
+  \details Enable DTCM slave interface access
+  */
+__STATIC_INLINE void csi_dtcm_enable_slave_access(void)
+{
+    __set_DTCMCR(__get_DTCMCR() | DTCMCR_SIF_Msk);
+}
+
+/**
+  \brief   Disable DTCM slave interface access
+  \details Disable DTCM slave interface access
+  */
+__STATIC_INLINE void csi_dtcm_disable_slave_access(void)
+{
+    __set_DTCMCR(__get_DTCMCR() & (~DTCMCR_SIF_Msk));
+}
+
+/**
+  \brief   Get ITCM Size
+  \details Get ITCM Size
+  \return         ITCM size (bytes).
+  */
+__STATIC_INLINE uint32_t csi_itcm_get_size(void)
+{
+    ITCMCR_Type sizemask;
+    uint32_t ret;
+
+    sizemask.w = __get_ITCMCR();
+    ret = sizemask.b.Size;
+
+    return (1 << ret) << 10;
+}
+
+/**
+  \brief   Get DTCM Size
+  \details Get DTCM Size
+  \return         DTCM size (bytes).
+  */
+__STATIC_INLINE uint32_t csi_dtcm_get_size(void)
+{
+    DTCMCR_Type sizemask;
+    uint32_t ret;
+
+    sizemask.w = __get_DTCMCR();
+    ret = sizemask.b.Size;
+
+    return (1 << ret) << 10;
+}
+
+/**
+  \brief   Get ITCM Delay
+  \details Get ITCM Delay
+  \return         delay time.
+  */
+__STATIC_INLINE uint32_t csi_itcm_get_delay(void)
+{
+    ITCMCR_Type delaymask;
+    uint32_t ret;
+
+    delaymask.w = __get_ITCMCR();
+    ret = delaymask.b.Delay;
+
+    return ret;
+}
+
+/**
+  \brief   Get DTCM Delay
+  \details Get DTCM Delay
+  \return         delay time.
+  */
+__STATIC_INLINE uint32_t csi_dtcm_get_delay(void)
+{
+    DTCMCR_Type delaymask;
+    uint32_t ret;
+
+    delaymask.w = __get_DTCMCR();
+    ret = delaymask.b.Delay;
+
+    return ret;
+}
+
+/**
+  \brief   Set ITCM Base Address
+  \details Set ITCM Base Address
+  \param [in]  base_addr  itcm base address.
+  */
+__STATIC_INLINE void csi_itcm_set_base_addr(uint32_t base_addr)
+{
+    __set_ITCMCR((__get_ITCMCR() & (~ITCMCR_Base_Address_Msk)) | (base_addr & ITCMCR_Base_Address_Msk));
+}
+
+/**
+  \brief   Set DTCM Base Address
+  \details Set DTCM Base Address
+  \param [in]  base_addr  dtcm base address.
+  */
+__STATIC_INLINE void csi_dtcm_set_base_addr(uint32_t base_addr)
+{
+    __set_DTCMCR((__get_DTCMCR() & (~DTCMCR_Base_Address_Msk)) | (base_addr & DTCMCR_Base_Address_Msk));
+}
+
+
+/*@} end of CSI_Core_TCMFunctions */
+
+/* ##########################  ECC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_ECCFunctions ECC Functions
+  \brief    Functions that configure ECC.
+  @{
+ */
+
+typedef enum {
+    ECC_ERROR_CORRECTABLE = 0,
+    ECC_ERROR_FATAL = 1
+} ecc_error_type_e;
+
+typedef enum {
+    ECC_DTCM_RAM = 0,
+    ECC_ITCM_RAM = 1,
+    ECC_DCACHE_DATA_RAM = 4,
+    ECC_DCACHE_TAG_RAM = 5,
+    ECC_ICACHE_DATA_RAM = 6,
+    ECC_ICACHE_TAG_RAM = 7
+} ecc_ramid_e;
+
+typedef struct {
+    uint32_t erraddr;
+    uint32_t index;
+    uint8_t way;
+    ecc_ramid_e ramid: 8;
+    ecc_error_type_e err_type: 8;
+} ecc_error_info_t;
+
+/**
+  \brief   Enable ECC
+  \details Turns on ECC
+  */
+__STATIC_INLINE void csi_ecc_enable (void)
+{
+    __set_CCR2(__get_CCR2() | CCR2_ECC_Msk);
+}
+
+/**
+  \brief   Disable ECC
+  \details Turns off ECC
+  */
+__STATIC_INLINE void csi_ecc_disable (void)
+{
+    __set_CCR2(__get_CCR2() & ~CCR2_ECC_Msk);
+}
+
+/**
+  \brief   Enable ECC error fix function
+  \details Turns on ECC error fix function
+  */
+__STATIC_INLINE void csi_ecc_enable_error_fix (void)
+{
+    __set_CCR2(__get_CCR2() | CCR2_CRECC_Msk);
+}
+
+/**
+  \brief   Disable ECC error fix function
+  \details Turns off ECC error fix function
+  */
+__STATIC_INLINE void csi_ecc_disable_error_fix (void)
+{
+    __set_CCR2(__get_CCR2() & ~CCR2_CRECC_Msk);
+}
+
+/**
+  \brief   Inject ECC error
+  \details Inject ECC error
+  \param [in]      type ECC error type.
+  \param [in]      ramid ECC ram id.
+*/
+__STATIC_INLINE void csi_ecc_inject_error(ecc_error_type_e type, ecc_ramid_e ramid)
+{
+    ERRINJCR_Type errinjcr;
+
+    errinjcr.b.ERR_TYPE = type;
+    errinjcr.b.RAMID = (1U << ramid);
+    errinjcr.b.INJ_EN = 1U;
+    errinjcr.b._reserved0 = 0;
+
+    __set_ERRINJCR(errinjcr.w);
+}
+
+/**
+  \brief   Get ECC error info
+  \details Inject ECC error info
+  \param [out]      info ECC error info
+*/
+__STATIC_INLINE void csi_ecc_get_error_info(ecc_error_info_t *info)
+{
+    ERRLC_Type errlc;
+
+    if (info != NULL) {
+        errlc.w = __get_ERRLC();
+        info->erraddr = __get_ERRADDR();
+        info->index = errlc.b.Index;
+        info->way = errlc.b.Way;
+        info->ramid = 31 - __FF1(errlc.b.RAMID);
+        info->err_type = errlc.b.ERR_TYPE;
+    }
+}
+
+/*@} end of CSI_Core_ECCFunctions */
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK807_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 873 - 0
lib/sec_library/include/core/core_810.h

@@ -0,0 +1,873 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck810.h
+ * @brief    CSI CK810 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     26. Jan 2018
+ ******************************************************************************/
+
+#ifndef __CORE_CK810_H_GENERIC
+#define __CORE_CK810_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup CK810
+  @{
+ */
+
+/*  CSI CK810 definitions */
+#define __CK810_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK810_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK810_CSI_VERSION       ((__CK810_CSI_VERSION_MAIN << 16U) | \
+                                   __CK810_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK810
+#define __CK810                (0x0aU)                                         /*!< CK810 Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+*/
+#define __FPU_USED      1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK810_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK810_H_DEPENDANT
+#define __CORE_CK810_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK810_REV
+#define __CK810_REV               0x0000U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK810 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK810 processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t AF: 1;                      /*!< bit:      1  Alternate register valid control bit */
+        uint32_t _reserved0: 2;              /*!< bit:  2.. 3  Reserved */
+        uint32_t FE: 1;                      /*!< bit:      4  Fast interrupt enable control bit */
+        uint32_t _reserved1: 1;              /*!< bit:      5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved2: 2;              /*!< bit: 10..11  Reserved */
+        uint32_t TE: 1;                      /*!< bit:     12  Trace transmission control bit */
+        uint32_t TP: 1;                      /*!< bit:     13  Pending trace exception set bit */
+        uint32_t TM: 2;                      /*!< bit: 14..15  Tracing mode bit */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved3: 7;              /*!< bit: 24..30  Reserved */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (0x1UL << PSR_S_Pos)                           /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0xFFUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_TM_Pos                         14U                                            /*!< PSR: TM Position */
+#define PSR_TM_Msk                         (0x3UL << PSR_TM_Pos)                          /*!< PSR: TM Mask */
+
+#define PSR_TP_Pos                         13U                                            /*!< PSR: TP Position */
+#define PSR_TP_Msk                         (0x1UL << PSR_TM_Pos)                          /*!< PSR: TP Mask */
+
+#define PSR_TE_Pos                         12U                                            /*!< PSR: TE Position */
+#define PSR_TE_Msk                         (0x1UL << PSR_TE_Pos)                          /*!< PSR: TE Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (0x1UL << PSR_MM_Pos)                          /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (0x1UL << PSR_EE_Pos)                          /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (0x1UL << PSR_IC_Pos)                          /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (0x1UL << PSR_IE_Pos)                          /*!< PSR: IE Mask */
+
+#define PSR_FE_Pos                         4U                                             /*!< PSR: FE Position */
+#define PSR_FE_Msk                         (0x1UL << PSR_FE_Pos)                          /*!< PSR: FE Mask */
+
+#define PSR_AF_Pos                         1U                                             /*!< PSR: AF Position */
+#define PSR_AF_Msk                         (0x1UL << PSR_AF_Pos)                          /*!< PSR: AF Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (0x1UL << PSR_C_Pos)                           /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t IE: 1;                      /*!< bit:      2  Instruction cache enable */
+        uint32_t DE: 1;                      /*!< bit:      3  Data cache enable */
+        uint32_t WB: 1;                      /*!< bit:      4  Cache write back */
+        uint32_t RS: 1;                      /*!< bit:      5  Address return stack settings */
+        uint32_t Z: 1;                       /*!< bit:      6  Allow predictive jump bit */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved0: 1;              /*!< bit:     11  Reserved */
+        uint32_t WA: 1;                      /*!< bit:     12  Write allocate enable */
+        uint32_t E_V2: 1;                    /*!< bit:     13  V2 Endian mode */
+        uint32_t BSTE: 1;                    /*!< bit:     14  Burst transmit enable */
+        uint32_t IPE: 1;                     /*!< bit:     15  Indirect predict enable */
+        uint32_t _reserved1: 16;             /*!< bit: 16..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_IPE_Pos                       15u                                            /*!< CCR: IPE Position */
+#define CCR_IPE_Msk                       (0x1UL << CCR_IPE_Pos)                         /*!< CCR: IPE Mask */
+
+#define CCR_BSTE_Pos                      14u                                            /*!< CCR: BSTE Position */
+#define CCR_BSTE_Msk                      (0x1UL << CCR_BSTE_Pos)                        /*!< CCR: BSTE Mask */
+
+#define CCR_E_V2_Pos                      13U                                            /*!< CCR: E_V2 Position */
+#define CCR_E_V2_Msk                      (0x1UL << CCR_E_V2_Pos)                        /*!< CCR: E_V2 Mask */
+
+#define CCR_WA_Pos                        12u                                            /*!< CCR: WA Position */
+#define CCR_WA_Msk                        (0x1UL << CCR_WA_Pos)                          /*!< CCR: WA Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_Z_Pos                         6U                                             /*!< CCR: Z Position */
+#define CCR_Z_Msk                         (0x1UL << CCR_BE_Pos)                          /*!< CCR: Z Mask */
+
+#define CCR_RS_Pos                        5U                                             /*!< CCR: RS Position */
+#define CCR_RS_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: RS Mask */
+
+#define CCR_WB_Pos                        4U                                             /*!< CCR: WB Position */
+#define CCR_WB_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: WB Mask */
+
+#define CCR_DE_Pos                        3U                                             /*!< CCR: DE Position */
+#define CCR_DE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: DE Mask */
+
+#define CCR_IE_Pos                        2U                                             /*!< CCR: IE Position */
+#define CCR_IE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: IE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing mmu index register(MIR,CR<0,15>).
+ */
+typedef union {
+    struct {
+        uint32_t Index: 10;                  /*!< bit:   0.. 9   TLB index */
+        uint32_t _reserved: 20;              /*!< bit:  10.. 29  Reserved */
+        uint32_t TF: 1;                      /*!< bit:       30  TLB fatal error */
+        uint32_t P: 1;                       /*!< bit:       31  TLBP instruction */
+    } b;
+    uint32_t w;
+} MIR_Type;
+
+/* MIR Register Definitions */
+#define MIR_P_Pos                          31                                            /*!< PRSR: P(TLBP instruction) Position */
+#define MIR_P_Msk                          (0x1UL << MIR_P_Pos)                          /*!< PRSR: P(TLBP instruction) Mask */
+
+#define MIR_TF_Pos                         30                                            /*!< PRSR: Tfatal Position */
+#define MIR_TF_Msk                         (0x1UL << MIR_TF_Pos)                         /*!< PRSR: Tfatal Mask */
+
+#define MIR_Index_Pos                      0                                             /*!< PRSR: Index Position */
+#define MIR_Index_Msk                      (0x3ffUL << MIR_Index_Pos)                    /*!< PRSR: Index Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEL, CR<2,15> and CR<3,15>).
+ */
+typedef union {
+    struct {
+        uint32_t G: 1;                        /*!< bit:       0   Global enbale bit */
+        uint32_t V: 1;                        /*!< bit:       1   TLB mapping valid bit */
+        uint32_t D: 1;                        /*!< bit:       2   TLB Page dirty bit */
+        uint32_t C: 1;                        /*!< bit:       3   TLB Page cacheable bit */
+        uint32_t SEC: 1;                      /*!< bit:       4   TLB Page security bit */
+        uint32_t SO: 1;                       /*!< bit:       2   Strong order enable bit */
+        uint32_t B: 1;                        /*!< bit:       2   TLB Page bufferable bit */
+        uint32_t _reserved: 5;                /*!< bit:   7.. 11  Reserved */
+        uint32_t PFN: 20;                     /*!< bit:  12.. 31  Physical frame number */
+    } b;
+    uint32_t w;
+} MEL_Type;
+
+/* MEL Register Definitions */
+#define MEL_PFN_Pos                        12                                            /*!< MEL: PFN Position */
+#define MEL_PFN_Msk                        (0xFFFFFUL << MEL_PFN_Pos)                    /*!< MEL: PFN Mask */
+
+#define MEL_B_Pos                          6                                             /*!< MEL: B Position */
+#define MEL_B_Msk                          (0x1UL << MEL_B_Pos)                          /*!< MEL: B Mask */
+
+#define MEL_SO_Pos                         5                                             /*!< MEL: SO Position */
+#define MEL_SO_Msk                         (0x1UL << MEL_SO_Pos)                         /*!< MEL: SO Mask */
+
+#define MEL_SEC_Pos                        4                                             /*!< MEL: SEC Position */
+#define MEL_SEC_Msk                        (0x1UL << MEL_SEC_Pos)                        /*!< MEL: SEC Mask */
+
+#define MEL_C_Pos                          3                                             /*!< MEL: C Position */
+#define MEL_C_Msk                          (0x1UL << MEL_C_Pos)                          /*!< MEL: C Mask */
+
+#define MEL_D_Pos                          2                                             /*!< MEL: D Position */
+#define MEL_D_Msk                          (0x1UL << MIR_D_Pos)                          /*!< MEL: D Mask */
+
+#define MEL_V_Pos                          1                                             /*!< MEL: V Position */
+#define MEL_V_Msk                          (0x1UL << MIR_V_Pos)                          /*!< MEL: V Mask */
+
+#define MEL_G_Pos                          0                                             /*!< MEL: G Position */
+#define MEL_G_Msk                          (0x1UL << MIR_G_Pos)                          /*!< MEL: G Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEH, CR<4,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID :8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved :4;                /*!< bit:   7.. 10  Reserved */
+        uint32_t VPN :20;                     /*!< bit:  11.. 31  Virtual page number */
+    } b;
+    uint32_t w;
+} MEH_Type;
+
+/* MEH Register Definitions */
+#define MEH_VPN_Pos                        12                                             /*!< MEH: VPN Position */
+#define MEH_VPN_Msk                        (0xFFFFFUL << MEH_VPN_Pos)                     /*!< MEH: VPN Mask */
+
+#define MEH_ASID_Pos                       0                                              /*!< MEH: ASID Position */
+#define MEH_ASID_Msk                       (0xFFUL << MEH_ASID_Pos)                       /*!< MEH: ASID Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MPR, CR<6,15>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 13;              /*!< bit:   0.. 12   Reserved */
+        uint32_t page_mask: 12;               /*!< bit:  13.. 24   Page mask */
+        uint32_t _reserved1: 7;               /*!< bit:  25.. 31   Reserved */
+    } b;
+    uint32_t w;
+} MPR_Type;
+
+/* MPR Register Definitions */
+#define MPR_PAGE_MASK_Pos                        13                                             /*!< MPR: PAGE_MASK Position */
+#define MPR_PAGE_MASK_Msk                        (0xFFFUL << MPR_PAGE_MASK_Pos)                 /*!< MPR: PAGE_MASK Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(CR<8,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID: 8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved: 17;               /*!< bit:   8.. 24  Reserved */
+        uint32_t TLBINV_INDEX: 1;             /*!< bit:       25  TLBINV_INDEX */
+        uint32_t TLBINV_ALL: 1;               /*!< bit:       26  TLBINV_ALL */
+        uint32_t TLBINV: 1;                   /*!< bit:       27  TLBINV */
+        uint32_t TLBWR: 1;                    /*!< bit:       28  TLBWR */
+        uint32_t TLBWI: 1;                    /*!< bit:       29  TLBWI */
+        uint32_t TLBR: 1;                     /*!< bit:       30  TLBR */
+        uint32_t TLBP: 1;                     /*!< bit:       31  TLBP */
+    } b;
+    uint32_t w;
+} MCIR_Type;
+
+/* MCIR Register Definitions */
+#define MCIR_TLBP_Pos                        31                                               /*!< MCIR: TLBP Position */
+#define MCIR_TLBP_Msk                        (0x1UL << MCIR_TLBP_Pos)                         /*!< MCIR: TLBP Mask */
+
+#define MCIR_TLBR_Pos                        30                                               /*!< MCIR: TLBR Position */
+#define MCIR_TLBR_Msk                        (0x1UL << MCIR_TLBR_Pos)                         /*!< MCIR: TLBR Mask */
+
+#define MCIR_TLBWI_Pos                       29                                               /*!< MCIR: TLBWI Position */
+#define MCIR_TLBWI_Msk                       (0x1UL << MCIR_TLBWI_Pos)                        /*!< MCIR: TLBWI Mask */
+
+#define MCIR_TLBWR_Pos                       28                                               /*!< MCIR: TLBWR Position */
+#define MCIR_TLBWR_Msk                       (0x1UL << MCIR_TLBWR_Pos)                        /*!< MCIR: TLBWR Mask */
+
+#define MCIR_TLBINV_Pos                      27                                               /*!< MCIR: TLBINV Position */
+#define MCIR_TLBINV_Msk                      (0x1UL << MCIR_TLBINV_Pos)                       /*!< MCIR: TLBINV Mask */
+
+#define MCIR_TLBINV_ALL_Pos                  26                                               /*!< MCIR: TLBINV_ALL Position */
+#define MCIR_TLBINV_ALL_Msk                  (0x1UL << MCIR_TLBINV_ALL_Pos)                   /*!< MCIR: TLBINV_ALL Mask */
+
+#define MCIR_TLBINV_INDEX_Pos                25                                               /*!< MCIR: TLBINV_INDEX Position */
+#define MCIR_TLBINV_INDEX_Msk                (0x1UL << MCIR_TLBINV_INDEX_Pos)                 /*!< MCIR: TLBINV_INDEX Mask */
+
+#define MCIR_ASID_Pos                        0                                                /*!< MCIR: ASID Position */
+#define MCIR_ASID_Msk                        (0xFFUL << MCIR_ASID_Pos)                        /*!< MCIR: ASID Mask */
+
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(CFR,CR<17,0>).
+ */
+typedef union {
+    struct {
+        uint32_t CACHE_SEL: 2;                      /*!< bit:  0..1  Instruction and data cache selection */
+        uint32_t _reserved0: 2;                     /*!< bit:  2..3  Reserved */
+        uint32_t INV: 1;                            /*!< bit:     4  Invalid data in cache */
+        uint32_t CLR: 1;                            /*!< bit:     5  Clear the dirty tlb table */
+        uint32_t OMS: 1;                            /*!< bit:     6  Cache invalid and clear operation mode (one line or all line)*/
+        uint32_t ITS: 1;                            /*!< bit:     7  Cache invalid and clear operation mode (CIR used as virtual index or SET/WAY/LEVE index)*/
+        uint32_t UNLOCK: 1;                         /*!< bit:     8  Unclock data cache line. */
+        uint32_t _reserved1: 7;                     /*!< bit: 9..15  Reserved */
+        uint32_t BHT_INV: 1;                        /*!< bit:    16  Invalid data in branch history table */
+        uint32_t BTB_INV: 1;                        /*!< bit:    17  Invalid data in branch table buffer */
+        uint32_t _reserved2: 13;                    /*!< bit: 18..30 Reserved */
+        uint32_t LICF: 1;                           /*!< bit:     31 Failure of clearing or invalid cache line */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} CFR_Type;
+
+#define CFR_LICF_Pos                     31U                                             /*!< CFR: LICF Position */
+#define CFR_LICF_Msk                     (0x1UL << CFR_LICF_Pos)                         /*!< CFR: LICF Mask */
+
+#define CFR_BTB_INV_Pos                  17U                                             /*!< CFR: BTB Position */
+#define CFR_BTB_INV_Msk                  (0x1UL << CFR_BTB_INV_Pos)                      /*!< CFR: BTB Mask */
+
+#define CFR_BHT_INV_Pos                  16U                                             /*!< CFR: BHT Position */
+#define CFR_BHT_INV_Msk                  (0x1UL << CFR_BHT_INV_Pos)                      /*!< CFR: BHT Mask */
+
+#define CFR_UNLOCK_Pos                   8U                                              /*!< CFR: UNLOCK Position */
+#define CFR_UNLOCK_Msk                   (0x1UL << CFR_UNLOCK_Pos)                       /*!< CFR: UNLOCK Mask */
+
+#define CFR_ITS_Pos                      7U                                              /*!< CFR: ITS Position */
+#define CFR_ITS_Msk                      (0x1UL << CFR_ITS_Pos)                          /*!< CFR: ITS Mask */
+
+#define CFR_OMS_Pos                      6U                                              /*!< CFR: OMS Position */
+#define CFR_OMS_Msk                      (0x1UL << CFR_OMS_Pos)                          /*!< CFR: OMS Mask */
+
+#define CFR_CLR_Pos                      5U                                              /*!< CFR: CLR Position */
+#define CFR_CLR_Msk                      (0x1UL << CFR_CLR_Pos)                          /*!< CFR: CLR Mask */
+
+#define CFR_INV_Pos                      4U                                              /*!< CFR: INV Position */
+#define CFR_INV_Msk                      (0x1UL << CFR_INV_Pos)                          /*!< CFR: INV Mask */
+
+#define CFR_CACHE_SEL_Pos                0                                               /*!< CFR: CACHE_SEL Position */    
+#define CFR_CACHE_SEL_Msk                (0x3UL << CFR_CACHE_SEL_Pos)                    /*!< CFR: CACHE_SEL Masok */
+
+/* CFR Register Definitions */
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+#define SSEG0_BASE_ADDR         0x80000000
+#define CACHE_RANGE_MAX_SIZE    0x80000
+
+#define INS_CACHE               (1 << 0)
+#define DATA_CACHE              (1 << 1)
+#define CACHE_INV               (1 << 4)
+#define CACHE_CLR               (1 << 5)
+#define CACHE_OMS               (1 << 6)
+#define CACHE_ITS               (1 << 7)
+#define CACHE_LICF              (1 << 31)
+
+#define    L1_CACHE_SHIFT       4      /* 16 Bytes */
+#define    L1_CACHE_BYTES       (1 << L1_CACHE_SHIFT)
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000004);
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFFB);
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+    __set_CFR(0x11);
+    __set_CFR(INS_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000008);
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFF7);
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR);
+}
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR | CACHE_INV);
+}
+
+__STATIC_INLINE  void set_cache_range (uint32_t start, uint32_t end, uint32_t value)
+{
+    if (!(start & SSEG0_BASE_ADDR) || (end - start) &~(CACHE_RANGE_MAX_SIZE - 1)) {
+        __set_CFR(value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_disable();
+    }
+    uint32_t i;
+    for (i = start; i < end; i += L1_CACHE_BYTES) {
+        __set_CIR(i);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (end & (L1_CACHE_BYTES-1)) {
+        __set_CIR(end);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_enable();
+    }
+
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_INV));
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR));
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR | CACHE_INV));
+}
+
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/* ##########################  MMU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MMUFunctions MMU Functions
+  \brief    Functions that configure MMU.
+  @{
+ */
+
+typedef struct {
+    uint32_t global: 1;         /* tlb page global access. */
+    uint32_t valid: 1;          /* tlb page valid */
+    uint32_t writeable: 1;      /* tlb page writeable */
+    uint32_t cacheable: 1;      /* tlb page cacheable*/
+    uint32_t is_secure: 1;      /* tlb page security access */
+    uint32_t strong_order: 1;   /* the sequence of accessing data on tlb page is corresponding to the program flow? */
+    uint32_t bufferable: 1;     /* tlb page bufferable */
+} page_attr_t;
+
+typedef enum {
+    PAGE_SIZE_4KB   = 0x000,
+    PAGE_SIZE_16KB  = 0x003,
+    PAGE_SIZE_64KB  = 0x00F,
+    PAGE_SIZE_256KB = 0x03F,
+    PAGE_SIZE_1MB   = 0x0FF,
+    PAGE_SIZE_4MB   = 0x3FF,
+    PAGE_SIZE_16MB  = 0xFFF
+} page_size_e;
+
+
+/**
+  \brief  enable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_enable(void)
+{
+    __set_CCR(__get_CCR() | (1u << CCR_MP_Pos));
+}
+
+/**
+  \brief  disable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~(1u << CCR_MP_Pos)));
+}
+
+/**
+  \brief  create page with feature.
+  \details
+  \param [in]  vaddr     virtual address.
+  \param [in]  paddr     physical address.
+  \param [in]  asid      address sapce id (default: 0).
+  \param [in]  attr      \ref page_attr_t. tlb page attribute.
+  */
+__STATIC_INLINE void csi_mmu_set_tlb(uint32_t vaddr, uint32_t paddr, uint32_t asid, page_attr_t attr)
+{
+     MPR_Type pgmask;
+     MEH_Type meh;
+     MEL_Type mel;
+     uint32_t vaddr_bit;
+     uint32_t page_feature = 0;
+
+     page_feature |= attr.global << MEL_G_Pos | attr.valid << MEL_V_Pos |
+                     attr.writeable << MEL_D_Pos | attr.cacheable << MEL_C_Pos |
+                     attr.is_secure << MEL_SEC_Pos | attr.strong_order << MEL_SO_Pos |
+                     attr.bufferable << MEL_B_Pos;
+
+     pgmask.w = __get_MPR();
+     vaddr_bit = 44 - __FF0(~((uint32_t)pgmask.b.page_mask));
+
+     meh.b.ASID = (uint8_t)asid;
+     meh.b.VPN  = (vaddr & ((~pgmask.w | 0xFE000000) & 0xFFFFE000)) >> MEH_VPN_Pos;
+     __set_MEH(meh.w);
+
+     __set_MCIR(1u << MCIR_TLBP_Pos);
+
+     mel.w = ((paddr & ~(pgmask.b.page_mask << 12)) | page_feature);
+     if (vaddr & (1 << vaddr_bit)) {
+         __set_MEL1(mel.w);
+     }
+     else {
+         __set_MEL0(mel.w);
+     }
+
+     if (__get_MIR() & (1 << MIR_P_Pos)) {
+        __set_MCIR(1u << MCIR_TLBWR_Pos);
+     } else {
+        __set_MCIR(1u << MCIR_TLBWI_Pos);
+     }
+}
+
+
+/**
+  \brief  enble mmu
+  \details
+  \param [in]  size  tlb page size.
+  */
+__STATIC_INLINE void csi_mmu_set_pagesize(page_size_e size)
+{
+    MPR_Type pgmask;
+    pgmask.b.page_mask = size;
+    __set_MPR(pgmask.w);
+}
+
+
+/**
+  \brief  read MEH, MEL0, MEL1 by tlb index.
+  \details
+  \param [in]    index  tlb index(0, 1, 2, ...)
+  \param [out]   meh    pointer to variable for retrieving MEH.
+  \param [out]   mel0   pointer to variable for retrieving MEL0.
+  \param [out]   mel1   pointer to variable for retrieving MEL1.
+  */
+__STATIC_INLINE void csi_mmu_read_by_index(uint32_t index, uint32_t *meh, uint32_t *mel0, uint32_t *mel1)
+{
+    MIR_Type mir;
+
+    if (meh == NULL || mel0 == NULL || mel1 == NULL) {
+        return;
+    }
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBR_Pos);
+
+    *meh = __get_MEH();
+    *mel0 = __get_MEL0();
+    *mel1 = __get_MEL1();
+}
+
+
+/**
+  \brief  flush all mmu tlb.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_all(void)
+{
+    __set_MCIR(1u << MCIR_TLBINV_ALL_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by index.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_index(uint32_t index)
+{
+    MIR_Type mir;
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by virtual address.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_vaddr(uint32_t vaddr, uint32_t asid)
+{
+    __set_MEH(vaddr | (asid & MEH_ASID_Msk));
+    __set_MCIR(1u << MCIR_TLBP_Pos);
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+        return;
+    } else {
+        __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+    }
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK810_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 973 - 0
lib/sec_library/include/core/core_ck610.h

@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck610.h
+ * @brief    CSI CK610 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_CK610_H_GENERIC
+#define __CORE_CK610_H_GENERIC
+
+#include <stdint.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup Ck610
+  @{
+ */
+
+/*  CSI CK610 definitions */
+#define __CK610_CSI_VERSION_MAIN  (0x01U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK610_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK610_CSI_VERSION       ((__CK610_CSI_VERSION_MAIN << 16U) | \
+                                   __CK610_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#define __CK610                   (0x01U)                                      /*!< CK610 Core */
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK610_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK610_H_DEPENDANT
+#define __CORE_CK610_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK610_REV
+#define __CK610_REV               0x0000U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             0U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK610 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core MGU Register
+  - Core MMU Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK610 processor based devices.
+*/
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t AF: 1;                      /*!< bit:      1  Alternate register valid control bit */
+        uint32_t _reserved0: 2;              /*!< bit:  2.. 3  Reserved */
+        uint32_t FE: 1;                      /*!< bit:      4  Fast interrupt enable control bit */
+        uint32_t _reserved1: 1;              /*!< bit:      5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved2: 2;              /*!< bit: 10..11  Reserved */
+        uint32_t TE: 1;                      /*!< bit:     12  Trace transmission control bit */
+        uint32_t TP: 1;                      /*!< bit:     13  Pending trace exception set bit */
+        uint32_t TM: 2;                      /*!< bit: 14..15  Tracing mode bit */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t CPID: 4;                    /*!< bit: 24..27  Number of processor currently running */
+        uint32_t _reserved3: 3;              /*!< bit: 28..30  Reserved */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (0x1UL << PSR_S_Pos)                           /*!< PSR: S Mask */
+
+#define PSR_CPID_Pos                       24U                                            /*!< PSR: CPID Position */
+#define PSR_CPID_Msk                       (0xFUL << PSR_CPID_Pos)                        /*!< PSR: CPID Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0xFFUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_TM_Pos                         14U                                            /*!< PSR: TM Position */
+#define PSR_TM_Msk                         (0x3UL << PSR_TM_Pos)                          /*!< PSR: TM Mask */
+
+#define PSR_TP_Pos                         13U                                            /*!< PSR: TP Position */
+#define PSR_TP_Msk                         (0x1UL << PSR_TM_Pos)                          /*!< PSR: TP Mask */
+
+#define PSR_TE_Pos                         12U                                            /*!< PSR: TE Position */
+#define PSR_TE_Msk                         (0x1UL << PSR_TE_Pos)                          /*!< PSR: TE Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (0x1UL << PSR_MM_Pos)                          /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (0x1UL << PSR_EE_Pos)                          /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (0x1UL << PSR_IC_Pos)                          /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (0x1UL << PSR_IE_Pos)                          /*!< PSR: IE Mask */
+
+#define PSR_FE_Pos                         4U                                             /*!< PSR: FE Position */
+#define PSR_FE_Msk                         (0x1UL << PSR_FE_Pos)                          /*!< PSR: FE Mask */
+
+#define PSR_AF_Pos                         1U                                             /*!< PSR: AF Position */
+#define PSR_AF_Msk                         (0x1UL << PSR_AF_Pos)                          /*!< PSR: AF Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (0x1UL << PSR_C_Pos)                           /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0..1   Memory protection settings */
+        uint32_t IE: 1;                      /*!< bit:      2  Endian mode */
+        uint32_t DE: 1;                      /*!< bit:      3  Endian mode */
+        uint32_t WB: 1;                      /*!< bit:      4  Endian mode */
+        uint32_t RS: 1;                      /*!< bit:      5  Endian mode */
+        uint32_t Z:  1;                      /*!< bit:      6  Endian mode */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  The clock ratio of the system and the processor */
+        uint32_t _reserved0: 21;             /*!< bit:  11..31 Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_SCK_Pos                        8U                                               /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                        (0x7UL << CCR_SCK_Pos)                           /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                         7U                                               /*!< CCR: BE Position */
+#define CCR_BE_Msk                         (0x1UL << CCR_BE_Pos)                            /*!< CCR: BE Mask */
+
+#define CCR_Z_Pos                          6U                                               /*!< CCR: Z Position */
+#define CCR_Z_Msk                          (0x1UL << CCR_Z_Pos)                             /*!< CCR: Z Mask */
+
+#define CCR_RS_Pos                         5U                                               /*!< CCR: RS Position */
+#define CCR_RS_Msk                         (0x1UL << CCR_RS_Pos)                            /*!< CCR: RS Mask */
+
+#define CCR_WB_Pos                         4U                                               /*!< CCR: WB Position */
+#define CCR_WB_Msk                         (0x1UL << CCR_WB_Pos)                            /*!< CCR: WB Mask */
+
+#define CCR_DE_Pos                         3U                                               /*!< CCR: DE Position */
+#define CCR_DE_Msk                         (0x1UL << CCR_DE_Pos)                            /*!< CCR: DE Mask */
+
+#define CCR_IE_Pos                         2U                                               /*!< CCR: IE Position */
+#define CCR_IE_Msk                         (0x1UL << CCR_IE_Pos)                            /*!< CCR: IE Mask */
+
+#define CCR_MP_Pos                         0U                                               /*!< CCR: MP Position */
+#define CCR_MP_Msk                         (0x3UL << CCR_MP_Pos)                            /*!< CCR: MP Mask */
+
+
+/**
+  \brief  Consortium definition for accessing high ease access permission configutation registers(CAPR, CR<19,0>)
+ */
+typedef union {
+    struct {
+        uint32_t C0: 1;                      /*!< bit:      0  Cacheable setting */
+        uint32_t C1: 1;                      /*!< bit:      1  Cacheable setting */
+        uint32_t C2: 1;                      /*!< bit:      2  Cacheable setting */
+        uint32_t C3: 1;                      /*!< bit:      3  Cacheable setting */
+        uint32_t _reserved0: 4;              /*!< bit:  4.. 7  Reserved */
+        uint32_t AP0: 2;                     /*!< bit:  8.. 9  access permissions settings bit */
+        uint32_t AP1: 2;                     /*!< bit: 10..11  access permissions settings bit */
+        uint32_t AP2: 2;                     /*!< bit: 12..13  access permissions settings bit */
+        uint32_t AP3: 2;                     /*!< bit: 14..15  access permissions settings bit */
+        uint32_t _reserved1: 16;             /*!< bit: 16..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CAPR_Type;
+
+/* CAPR Register Definitions */
+#define CAPR_AP3_Pos                       14U                                            /*!< CAPR: AP3 Position */
+#define CAPR_AP3_Msk                       (0x3UL << CAPR_AP3_Pos)                        /*!< CAPR: AP3 Mask */
+
+#define CAPR_AP2_Pos                       12U                                            /*!< CAPR: AP2 Position */
+#define CAPR_AP2_Msk                       (0x3UL << CAPR_AP2_Pos)                        /*!< CAPR: AP2 Mask */
+
+#define CAPR_AP1_Pos                       10U                                            /*!< CAPR: AP1 Position */
+#define CAPR_AP1_Msk                       (0x3UL << CAPR_AP1_Pos)                        /*!< CAPR: AP1 Mask */
+
+#define CAPR_AP0_Pos                       8U                                             /*!< CAPR: AP0 Position */
+#define CAPR_AP0_Msk                       (0x3UL << CAPR_AP0_Pos)                        /*!< CAPR: AP0 Mask */
+
+#define CAPR_X3_Pos                        3U                                             /*!< CAPR: X3 Position */
+#define CAPR_X3_Msk                        (0x1UL << CAPR_X3_Pos)                         /*!< CAPR: X3 Mask */
+
+#define CAPR_X2_Pos                        2U                                             /*!< CAPR: X2 Position */
+#define CAPR_X2_Msk                        (0x1UL << CAPR_X2_Pos)                         /*!< CAPR: X2 Mask */
+
+#define CAPR_X1_Pos                        1U                                             /*!< CAPR: X1 Position */
+#define CAPR_X1_Msk                        (0x1UL << CAPR_X1_Pos)                         /*!< CAPR: X1 Mask */
+
+#define CAPR_X0_Pos                        0U                                             /*!< CAPR: X0 Position */
+#define CAPR_X0_Msk                        (0x1UL << CAPR_X0_Pos)                         /*!< CAPR: X0 Mask */
+
+
+/**
+  \brief  Consortium definition for accessing control register(PACR, CR<20,0>).
+ */
+typedef union {
+    struct {
+        uint32_t E: 1;                       /*!< bit:      0  Effective setting of protected area */
+        uint32_t size: 5;                    /*!< bit:  1.. 5  Size of protected area */
+        uint32_t _reserved0: 6;              /*!< bit:  6.. 11 Reserved */
+        uint32_t base_addr: 20;              /*!< bit:  10..31 The high position of the address of a protected area */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PACR_Type;
+
+    /* PACR Register Definitions */
+#define PACR_BASE_ADDR_Pos                 12U                                            /*!< PACR: base_addr Position */
+#define PACR_BASE_ADDR_Msk                 (0xFFFFFUL << PACR_BASE_ADDR_Pos)              /*!< PACR: base_addr Mask */
+
+#define PACR_SIZE_Pos                      1U                                             /*!< PACR: Size Position */
+#define PACR_SIZE_Msk                      (0x1FUL << PACR_SIZE_Pos)                      /*!< PACR: Size Mask */
+
+#define PACR_E_Pos                         0U                                             /*!< PACR: E Position */
+#define PACR_E_Msk                         (0x1UL << PACR_E_Pos)                          /*!< PACR: E Mask */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(PRSR,CR<21,0>).
+ */
+typedef union {
+    struct {
+        uint32_t RID: 2;                     /*!< bit:  0.. 1  Protected area index value */
+        uint32_t _reserved0: 30;             /*!< bit:  2..31  Reserved */
+    } b;                                     /*!< Structure    Access by bit */
+    uint32_t w;                              /*!< Type         Access by whole register */
+} PRSR_Type;
+
+/* PRSR Register Definitions */
+#define PRSR_RID_Pos                       0U                                            /*!< PRSR: RID Position */
+#define PRSR_RID_Msk                       (0x3UL << PRSR_RID_Pos)                       /*!< PRSR: RID Mask */
+
+/**
+  \brief  Consortium definition for accessing mmu index register(MIR,CP15_CR0).
+ */
+typedef union {
+    struct {
+        uint32_t Index: 10;
+        uint32_t _reserved: 20;
+        uint32_t TF: 1;
+        uint32_t P: 1;
+    } b;
+    uint32_t w;
+} MIR_Type;
+
+/* MIR Register Definitions */
+#define MIR_P_Pos                          31                                            /*!< PRSR: P(TLBP instruction) Position */
+#define MIR_P_Msk                          (0x1UL << MIR_P_Pos)                          /*!< PRSR: P(TLBP instruction) Mask */
+
+#define MIR_TF_Pos                         30                                            /*!< PRSR: Tfatal Position */
+#define MIR_TF_Msk                         (0x1UL << MIR_TF_Pos)                         /*!< PRSR: Tfatal Mask */
+
+#define MIR_Index_Pos                      0                                             /*!< PRSR: Index Position */
+#define MIR_Index_Msk                      (0x3ffUL << MIR_Index_Pos)                    /*!< PRSR: Index Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEL, CP15_CR2 and CP15_CR3).
+ */
+typedef union {
+    struct {
+        uint32_t G: 1;
+        uint32_t V: 1;
+        uint32_t D: 1;
+        uint32_t C: 3;
+        uint32_t PFN: 20;
+        uint32_t _reserved: 6;
+    } b;
+    uint32_t w;
+} MEL_Type;
+
+/* MEL Register Definitions */
+#define MEL_PFN_Pos                        6                                             /*!< MEL: PFN Position */
+#define MEL_PFN_Msk                        (0xFFFFFUL << MEL_PFN_Pos)                    /*!< MEL: PFN Mask */
+
+#define MEL_C_Pos                          3                                             /*!< MEL: C Position */
+#define MEL_C_Msk                          (0x7UL << MEL_C_Pos)                          /*!< MEL: C Mask */
+
+#define MEL_D_Pos                          2                                             /*!< MEL: D Position */
+#define MEL_D_Msk                          (0x1UL << MIR_D_Pos)                          /*!< MEL: D Mask */
+
+#define MEL_V_Pos                          1                                             /*!< MEL: V Position */
+#define MEL_V_Msk                          (0x1UL << MIR_V_Pos)                          /*!< MEL: V Mask */
+
+#define MEL_G_Pos                          0                                             /*!< MEL: G Position */
+#define MEL_G_Msk                          (0x1UL << MIR_G_Pos)                          /*!< MEL: G Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEH, CP15_CR4).
+ */
+typedef union {
+    struct {
+        uint32_t ASID :8;
+        uint32_t _reserved :4;
+        uint32_t VPN :20;
+    } b;
+    uint32_t w;
+} MEH_Type;
+
+/* MEH Register Definitions */
+#define MEH_VPN_Pos                        12                                             /*!< MEH: VPN Position */
+#define MEH_VPN_Msk                        (0xFFFFFUL << MEH_VPN_Pos)                     /*!< MEH: VPN Mask */
+
+#define MEH_ASID_Pos                       0                                              /*!< MEH: ASID Position */
+#define MEH_ASID_Msk                       (0xFFUL << MEH_ASID_Pos)                       /*!< MEH: ASID Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MPR, CP15_CR6).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 13;
+        uint32_t page_mask: 12;
+        uint32_t _reserved1: 7;
+    } b;
+    uint32_t w;
+} MPR_Type;
+
+/* MPR Register Definitions */
+#define MPR_PAGE_MASK_Pos                        13                                             /*!< MPR: PAGE_MASK Position */
+#define MPR_PAGE_MASK_Msk                        (0xFFFUL << MPR_PAGE_MASK_Pos)                 /*!< MPR: PAGE_MASK Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MCIR, CP15_CR8).
+ */
+typedef union {
+    struct {
+        uint32_t ASID: 8;
+        uint32_t _reserved: 17;
+        uint32_t TLBINV_INDEX: 1;
+        uint32_t TLBINV_ALL: 1;
+        uint32_t TLBINV: 1;
+        uint32_t TLBWR: 1;
+        uint32_t TLBWI: 1;
+        uint32_t TLBR: 1;
+        uint32_t TLBP: 1;
+    } b;
+    uint32_t w;
+} MCIR_Type;
+
+/* MCIR Register Definitions */
+#define MCIR_TLBP_Pos                        31                                               /*!< MCIR: TLBP Position */
+#define MCIR_TLBP_Msk                        (0x1UL << MCIR_TLBP_Pos)                         /*!< MCIR: TLBP Mask */
+
+#define MCIR_TLBR_Pos                        30                                               /*!< MCIR: TLBR Position */
+#define MCIR_TLBR_Msk                        (0x1UL << MCIR_TLBR_Pos)                         /*!< MCIR: TLBR Mask */
+
+#define MCIR_TLBWI_Pos                       29                                               /*!< MCIR: TLBWI Position */
+#define MCIR_TLBWI_Msk                       (0x1UL << MCIR_TLBWI_Pos)                        /*!< MCIR: TLBWI Mask */
+
+#define MCIR_TLBWR_Pos                       28                                               /*!< MCIR: TLBWR Position */
+#define MCIR_TLBWR_Msk                       (0x1UL << MCIR_TLBWR_Pos)                        /*!< MCIR: TLBWR Mask */
+
+#define MCIR_TLBINV_Pos                      27                                               /*!< MCIR: TLBINV Position */
+#define MCIR_TLBINV_Msk                      (0x1UL << MCIR_TLBINV_Pos)                       /*!< MCIR: TLBINV Mask */
+
+#define MCIR_TLBINV_ALL_Pos                  26                                               /*!< MCIR: TLBINV_ALL Position */
+#define MCIR_TLBINV_ALL_Msk                  (0x1UL << MCIR_TLBINV_ALL_Pos)                   /*!< MCIR: TLBINV_ALL Mask */
+
+#define MCIR_TLBINV_INDEX_Pos                25                                               /*!< MCIR: TLBINV_INDEX Position */
+#define MCIR_TLBINV_INDEX_Msk                (0x1UL << MCIR_TLBINV_INDEX_Pos)                 /*!< MCIR: TLBINV_INDEX Mask */
+
+#define MCIR_ASID_Pos                        0                                                /*!< MCIR: ASID Position */
+#define MCIR_ASID_Msk                        (0xFFUL << MCIR_ASID_Pos)                        /*!< MCIR: ASID Mask */
+
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+#define SSEG0_BASE_ADDR         0x80000000
+#define CACHE_RANGE_MAX_SIZE    0x80000
+
+#define INS_CACHE               (1 << 0)
+#define DATA_CACHE              (1 << 1)
+#define CACHE_INV               (1 << 4)
+#define CACHE_CLR               (1 << 5)
+#define CACHE_OMS               (1 << 6)
+#define CACHE_ITS               (1 << 7)
+#define CACHE_LICF              (1 << 31)
+
+#define    L1_CACHE_SHIFT       4      /* 16 Bytes */
+#define    L1_CACHE_BYTES       (1 << L1_CACHE_SHIFT)
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000004);
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFFB);
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+    __set_CFR(0x11);
+    __set_CFR(INS_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000008);
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFF7);
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR);
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR | CACHE_INV);
+}
+
+__STATIC_INLINE  void set_cache_range (uint32_t start, uint32_t end, uint32_t value)
+{
+    if (!(start & SSEG0_BASE_ADDR) || (end - start) &~(CACHE_RANGE_MAX_SIZE - 1)) {
+        __set_CFR(value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_disable();
+    }
+    uint32_t i;
+    for (i = start; i < end; i += L1_CACHE_BYTES) {
+        __set_CIR(i);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (end & (L1_CACHE_BYTES-1)) {
+        __set_CIR(end);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_enable();
+    }
+
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_INV));
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR));
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR | CACHE_INV));
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+
+/* ##########################  MMU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MMUFunctions MMU Functions
+  \brief    Functions that configure MMU.
+  @{
+ */
+
+typedef struct {
+    uint32_t global: 1;         /* tlb page global access. */
+    uint32_t valid: 1;          /* tlb page valid */
+    uint32_t writeable: 1;      /* tlb page writeable */
+    uint32_t cacheable: 1;      /* tlb page cacheable*/
+} page_attr_t;
+
+typedef enum {
+    PAGE_SIZE_4KB   = 0x000,
+    PAGE_SIZE_16KB  = 0x003,
+    PAGE_SIZE_64KB  = 0x00F,
+    PAGE_SIZE_256KB = 0x03F,
+    PAGE_SIZE_1MB   = 0x0FF,
+    PAGE_SIZE_4MB   = 0x3FF,
+    PAGE_SIZE_16MB  = 0xFFF
+} page_size_e;
+
+/**
+  \brief  enable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_enable(void)
+{
+    __set_CCR(__get_CCR() | (1u << CCR_MP_Pos));
+}
+
+/**
+  \brief  disable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~(1u << CCR_MP_Pos)));
+}
+
+/**
+  \brief  create page with feature.
+  \details
+  \param [in]  vaddr     virtual address.
+  \param [in]  paddr     physical address.
+  \param [in]  asid      address sapce id (default: 0).
+  \param [in]  attr      \ref page_attr_t. tlb page attribute.
+  */
+__STATIC_INLINE void csi_mmu_set_tlb(uint32_t vaddr, uint32_t paddr, uint32_t asid, page_attr_t attr)
+{
+    MPR_Type pgmask;
+    MEL_Type mel;
+    MEH_Type meh;
+    uint32_t vaddr_bit = 0;
+    uint32_t page_feature = 0;
+
+    page_feature |= attr.global << MEL_G_Pos | attr.valid << MEL_V_Pos |
+                    attr.writeable << MEL_D_Pos | (attr.cacheable | 0x2) << MEL_C_Pos;
+
+    pgmask.w = __FF1(__get_MPR());
+    vaddr_bit = (pgmask.w == 32 ? 12 : (31 - pgmask.w));
+
+    meh.b.ASID = asid;
+    meh.b.VPN  = (vaddr & ((~pgmask.w | 0xFE000000) & 0xFFFFE000)) >> MEH_VPN_Pos;
+    __set_MEH(meh.w);
+
+    __set_MCIR(1u << MCIR_TLBP_Pos);
+
+    mel.w = (((paddr >> 6) & ~(pgmask.b.page_mask << 6)) | page_feature);
+
+    if (vaddr & (1 << vaddr_bit)) {
+        __set_MEL1(mel.w);
+    } else {
+        __set_MEL0(mel.w);
+    }
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+       __set_MCIR(1u << MCIR_TLBWR_Pos);
+    } else {
+       __set_MCIR(1u << MCIR_TLBWI_Pos);
+    }
+}
+
+
+/**
+  \brief  enble mmu
+  \details
+  \param [in]  size  tlb page size.
+  */
+__STATIC_INLINE void csi_mmu_set_pagesize(page_size_e size)
+{
+    MPR_Type mpr;
+    mpr.w = __get_MPR();
+    mpr.b.page_mask = size;
+    __set_MPR(mpr.w);
+}
+
+
+/**
+  \brief  read MEH, MEL0, MEL1 by tlb index.
+  \details
+  \param [in]    index  tlb index(0, 1, 2, ...)
+  \param [out]   meh    pointer to variable for retrieving MEH.
+  \param [out]   mel0   pointer to variable for retrieving MEL0.
+  \param [out]   mel1   pointer to variable for retrieving MEL1.
+  */
+__STATIC_INLINE void csi_mmu_read_by_index(uint32_t index, uint32_t *meh, uint32_t *mel0, uint32_t *mel1)
+{
+    MIR_Type mir;
+
+    if (meh == NULL || mel0 == NULL || mel1 == NULL) {
+        return;
+    }
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBR_Pos);
+
+    *meh = __get_MEH();
+    *mel0 = __get_MEL0();
+    *mel1 = __get_MEL1();
+}
+
+
+/**
+  \brief  flush all mmu tlb.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_all(void)
+{
+    __set_MCIR(1u << MCIR_TLBINV_ALL_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by index.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_index(uint32_t index)
+{
+    MIR_Type mir;
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by virtual address.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_vaddr(uint32_t vaddr, uint32_t asid)
+{
+    __set_MEH(vaddr | (asid & MEH_ASID_Msk));
+    __set_MCIR(__get_MCIR() | (1 << MCIR_TLBP_Pos));
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+        return;
+    } else {
+        __set_MCIR(__get_MCIR() | (1 << MCIR_TLBINV_INDEX_Pos));
+    }
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+/* ##########################  MPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MPUFunctions MPU Functions
+  \brief    Functions that configure MPU.
+  @{
+ */
+
+typedef enum {
+    REGION_SIZE_4KB      = 0xB,
+    REGION_SIZE_8KB      = 0xC,
+    REGION_SIZE_16KB     = 0xD,
+    REGION_SIZE_32KB     = 0xE,
+    REGION_SIZE_64KB     = 0xF,
+    REGION_SIZE_128KB    = 0x10,
+    REGION_SIZE_256KB    = 0x11,
+    REGION_SIZE_512KB    = 0x12,
+    REGION_SIZE_1MB      = 0x13,
+    REGION_SIZE_2MB      = 0x14,
+    REGION_SIZE_4MB      = 0x15,
+    REGION_SIZE_8MB      = 0x16,
+    REGION_SIZE_16MB     = 0x17,
+    REGION_SIZE_32MB     = 0x18,
+    REGION_SIZE_64MB     = 0x19,
+    REGION_SIZE_128MB    = 0x1A,
+    REGION_SIZE_256MB    = 0x1B,
+    REGION_SIZE_512MB    = 0x1C,
+    REGION_SIZE_1GB      = 0x1D,
+    REGION_SIZE_2GB      = 0x1E,
+    REGION_SIZE_4GB      = 0x1F
+} region_size_e;
+
+typedef enum {
+    AP_BOTH_INACCESSIBLE = 0,
+    AP_SUPER_RW_USER_INACCESSIBLE,
+    AP_SUPER_RW_USER_RDONLY,
+    AP_BOTH_RW
+} access_permission_e;
+
+typedef struct {
+    access_permission_e ap: 2;    /* super user and normal user access.*/
+    uint32_t c: 1;                /* cacheable */
+} mpu_region_attr_t;
+/**
+  \brief  enable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_enable(void)
+{
+    __set_CCR(__get_CCR() | CCR_MP_Msk);
+}
+
+/**
+  \brief  disable mpu
+  \details
+  */
+__STATIC_INLINE void csi_mpu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~CCR_MP_Msk));
+}
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    if (idx > 3) {
+        return;
+    }
+
+    CAPR_Type capr;
+    PACR_Type pacr;
+    PRSR_Type prsr;
+
+    capr.w = __get_CAPR();
+    pacr.w = __get_PACR();
+    prsr.w = __get_PRSR();
+
+    pacr.b.base_addr = (base_addr >> PACR_BASE_ADDR_Pos) & (0xFFFFF);
+
+    prsr.b.RID = idx;
+    __set_PRSR(prsr.w);
+
+    if (size != REGION_SIZE_4KB) {
+        pacr.w &= ~(((1u << (size -11)) - 1) << 12);
+    }
+
+    pacr.b.size = size;
+
+    capr.w = (0xFFFFFFFE & capr.w) | (attr.c << idx);
+    capr.w = ((~((0x3) << (2*idx + 8))) & capr.w) | (attr.ap << (2*idx + 8));
+    __set_CAPR(capr.w);
+
+    pacr.b.E = enable;
+    __set_PACR(pacr.w);
+}
+
+/**
+  \brief  enable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  */
+__STATIC_INLINE void csi_mpu_enable_region(uint32_t idx)
+{
+    if (idx > 3) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() | PACR_E_Msk);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, 3.).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    if (idx > 3) {
+        return;
+    }
+
+    __set_PRSR((__get_PRSR() & (~PRSR_RID_Msk)) | idx);
+    __set_PACR(__get_PACR() & (~PACR_E_Msk));
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+/*@} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK610_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 18 - 0
lib/sec_library/include/core/core_ck801.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck801.h
+ * @brief    CSI CK801 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_CK801_H_GENERIC
+#define __CORE_CK801_H_GENERIC
+
+#include <core_801.h>
+
+#endif /* __CORE_CK801_H_DEPENDANT */

+ 18 - 0
lib/sec_library/include/core/core_ck802.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck802.h
+ * @brief    CSI CK802 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_CK802_H_GENERIC
+#define __CORE_CK802_H_GENERIC
+
+#include <core_802.h>
+
+#endif /* __CORE_CK802_H_DEPENDANT */

+ 18 - 0
lib/sec_library/include/core/core_ck803.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck803.h
+ * @brief    CSI CK803 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef __CORE_CK803_H_GENERIC
+#define __CORE_CK803_H_GENERIC
+
+#include <core_803.h>
+
+#endif /* __CORE_CK803_H_DEPENDANT */

+ 847 - 0
lib/sec_library/include/core/core_ck807.h

@@ -0,0 +1,847 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck807.h
+ * @brief    CSI CK807 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     26. Jan 2018
+ ******************************************************************************/
+
+#ifndef __CORE_CK807_H_GENERIC
+#define __CORE_CK807_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup CK807
+  @{
+ */
+
+/*  CSI CK807 definitions */
+#define __CK807_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK807_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK807_CSI_VERSION       ((__CK807_CSI_VERSION_MAIN << 16U) | \
+                                   __CK807_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK807
+#define __CK807                   (0x07U)                                      /*!< CK807 Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+*/
+#define __FPU_USED       1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK807_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK807_H_DEPENDANT
+#define __CORE_CK807_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK807_REV
+#define __CK807_REV               0x0000U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK807 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK807 processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 5;              /*!< bit: 24..28  Reserved */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pedning bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t IE: 1;                      /*!< bit:      2  Instruction cache enable */
+        uint32_t DE: 1;                      /*!< bit:      3  Data cache enable */
+        uint32_t WB: 1;                      /*!< bit:      4  Cache write back */
+        uint32_t RS: 1;                      /*!< bit:      5  Address return stack settings */
+        uint32_t Z: 1;                       /*!< bit:      6  Allow predictive jump bit */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved0: 1;              /*!< bit:     11  Reserved */
+        uint32_t WA: 1;                      /*!< bit:     12  Write allocate enable */
+        uint32_t E_V2: 1;                    /*!< bit:     13  V2 Endian mode */
+        uint32_t BSTE: 1;                    /*!< bit:     14  Burst transmit enable */
+        uint32_t IPE: 1;                     /*!< bit:     15  Indirect predict enable */
+        uint32_t _reserved1: 16;             /*!< bit: 16..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_IPE_Pos                       15u                                            /*!< CCR: IPE Position */
+#define CCR_IPE_Msk                       (0x1UL << CCR_IPE_Pos)                         /*!< CCR: IPE Mask */
+
+#define CCR_BSTE_Pos                      14u                                            /*!< CCR: BSTE Position */
+#define CCR_BSTE_Msk                      (0x1UL << CCR_BSTE_Pos)                        /*!< CCR: BSTE Mask */
+
+#define CCR_E_V2_Pos                      13U                                            /*!< CCR: E_V2 Position */
+#define CCR_E_V2_Msk                      (0x1UL << CCR_E_V2_Pos)                        /*!< CCR: E_V2 Mask */
+
+#define CCR_WA_Pos                        12u                                            /*!< CCR: WA Position */
+#define CCR_WA_Msk                        (0x1UL << CCR_WA_Pos)                          /*!< CCR: WA Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_Z_Pos                         6U                                             /*!< CCR: Z Position */
+#define CCR_Z_Msk                         (0x1UL << CCR_BE_Pos)                          /*!< CCR: Z Mask */
+
+#define CCR_RS_Pos                        5U                                             /*!< CCR: RS Position */
+#define CCR_RS_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: RS Mask */
+
+#define CCR_WB_Pos                        4U                                             /*!< CCR: WB Position */
+#define CCR_WB_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: WB Mask */
+
+#define CCR_DE_Pos                        3U                                             /*!< CCR: DE Position */
+#define CCR_DE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: DE Mask */
+
+#define CCR_IE_Pos                        2U                                             /*!< CCR: IE Position */
+#define CCR_IE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: IE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing mmu index register(MIR,CR<0,15>).
+ */
+typedef union {
+    struct {
+        uint32_t Index: 10;                  /*!< bit:   0.. 9   TLB index */
+        uint32_t _reserved: 20;              /*!< bit:  10.. 29  Reserved */
+        uint32_t TF: 1;                      /*!< bit:       30  TLB fatal error */
+        uint32_t P: 1;                       /*!< bit:       31  TLBP instruction */
+    } b;
+    uint32_t w;
+} MIR_Type;
+
+/* MIR Register Definitions */
+#define MIR_P_Pos                          31                                            /*!< PRSR: P(TLBP instruction) Position */
+#define MIR_P_Msk                          (0x1UL << MIR_P_Pos)                          /*!< PRSR: P(TLBP instruction) Mask */
+
+#define MIR_TF_Pos                         30                                            /*!< PRSR: Tfatal Position */
+#define MIR_TF_Msk                         (0x1UL << MIR_TF_Pos)                         /*!< PRSR: Tfatal Mask */
+
+#define MIR_Index_Pos                      0                                             /*!< PRSR: Index Position */
+#define MIR_Index_Msk                      (0x3ffUL << MIR_Index_Pos)                    /*!< PRSR: Index Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEL, CR<2,15> and CR<3,15>).
+ */
+typedef union {
+    struct {
+        uint32_t G: 1;                        /*!< bit:       0   Global enbale bit */
+        uint32_t V: 1;                        /*!< bit:       1   TLB mapping valid bit */
+        uint32_t D: 1;                        /*!< bit:       2   TLB Page dirty bit */
+        uint32_t C: 1;                        /*!< bit:       3   TLB Page cacheable bit */
+        uint32_t SEC: 1;                      /*!< bit:       4   TLB Page security bit */
+        uint32_t SO: 1;                       /*!< bit:       2   Strong order enable bit */
+        uint32_t B: 1;                        /*!< bit:       2   TLB Page bufferable bit */
+        uint32_t _reserved: 5;                /*!< bit:   7.. 11  Reserved */
+        uint32_t PFN: 20;                     /*!< bit:  12.. 31  Physical frame number */
+    } b;
+    uint32_t w;
+} MEL_Type;
+
+/* MEL Register Definitions */
+#define MEL_PFN_Pos                        12                                            /*!< MEL: PFN Position */
+#define MEL_PFN_Msk                        (0xFFFFFUL << MEL_PFN_Pos)                    /*!< MEL: PFN Mask */
+
+#define MEL_B_Pos                          6                                             /*!< MEL: B Position */
+#define MEL_B_Msk                          (0x1UL << MEL_B_Pos)                          /*!< MEL: B Mask */
+
+#define MEL_SO_Pos                         5                                             /*!< MEL: SO Position */
+#define MEL_SO_Msk                         (0x1UL << MEL_SO_Pos)                         /*!< MEL: SO Mask */
+
+#define MEL_SEC_Pos                        4                                             /*!< MEL: SEC Position */
+#define MEL_SEC_Msk                        (0x1UL << MEL_SEC_Pos)                        /*!< MEL: SEC Mask */
+
+#define MEL_C_Pos                          3                                             /*!< MEL: C Position */
+#define MEL_C_Msk                          (0x1UL << MEL_C_Pos)                          /*!< MEL: C Mask */
+
+#define MEL_D_Pos                          2                                             /*!< MEL: D Position */
+#define MEL_D_Msk                          (0x1UL << MIR_D_Pos)                          /*!< MEL: D Mask */
+
+#define MEL_V_Pos                          1                                             /*!< MEL: V Position */
+#define MEL_V_Msk                          (0x1UL << MIR_V_Pos)                          /*!< MEL: V Mask */
+
+#define MEL_G_Pos                          0                                             /*!< MEL: G Position */
+#define MEL_G_Msk                          (0x1UL << MIR_G_Pos)                          /*!< MEL: G Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEH, CR<4,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID :8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved :4;                /*!< bit:   7.. 10  Reserved */
+        uint32_t VPN :20;                     /*!< bit:  11.. 31  Virtual page number */
+    } b;
+    uint32_t w;
+} MEH_Type;
+
+/* MEH Register Definitions */
+#define MEH_VPN_Pos                        12                                             /*!< MEH: VPN Position */
+#define MEH_VPN_Msk                        (0xFFFFFUL << MEH_VPN_Pos)                     /*!< MEH: VPN Mask */
+
+#define MEH_ASID_Pos                       0                                              /*!< MEH: ASID Position */
+#define MEH_ASID_Msk                       (0xFFUL << MEH_ASID_Pos)                       /*!< MEH: ASID Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MPR, CR<6,15>). 
+*/
+
+typedef union {
+    struct {
+        uint32_t _reserved0: 13;              /*!< bit:   0.. 12   Reserved */
+        uint32_t page_mask: 12;               /*!< bit:  13.. 24   Page mask */
+        uint32_t _reserved1: 7;               /*!< bit:  25.. 31   Reserved */
+    } b;
+    uint32_t w;
+} MPR_Type;
+
+/* MPR Register Definitions */
+#define MPR_PAGE_MASK_Pos                        13                                             /*!< MPR: PAGE_MASK Position */
+#define MPR_PAGE_MASK_Msk                        (0xFFFUL << MPR_PAGE_MASK_Pos)                 /*!< MPR: PAGE_MASK Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(CR<8,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID: 8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved: 17;               /*!< bit:   8.. 24  Reserved */
+        uint32_t TLBINV_INDEX: 1;             /*!< bit:       25  TLBINV_INDEX */
+        uint32_t TLBINV_ALL: 1;               /*!< bit:       26  TLBINV_ALL */
+        uint32_t TLBINV: 1;                   /*!< bit:       27  TLBINV */
+        uint32_t TLBWR: 1;                    /*!< bit:       28  TLBWR */
+        uint32_t TLBWI: 1;                    /*!< bit:       29  TLBWI */
+        uint32_t TLBR: 1;                     /*!< bit:       30  TLBR */
+        uint32_t TLBP: 1;                     /*!< bit:       31  TLBP */
+    } b;
+    uint32_t w;
+} MCIR_Type;
+
+/* MCIR Register Definitions */
+#define MCIR_TLBP_Pos                        31                                               /*!< MCIR: TLBP Position */
+#define MCIR_TLBP_Msk                        (0x1UL << MCIR_TLBP_Pos)                         /*!< MCIR: TLBP Mask */
+
+#define MCIR_TLBR_Pos                        30                                               /*!< MCIR: TLBR Position */
+#define MCIR_TLBR_Msk                        (0x1UL << MCIR_TLBR_Pos)                         /*!< MCIR: TLBR Mask */
+
+#define MCIR_TLBWI_Pos                       29                                               /*!< MCIR: TLBWI Position */
+#define MCIR_TLBWI_Msk                       (0x1UL << MCIR_TLBWI_Pos)                        /*!< MCIR: TLBWI Mask */
+
+#define MCIR_TLBWR_Pos                       28                                               /*!< MCIR: TLBWR Position */
+#define MCIR_TLBWR_Msk                       (0x1UL << MCIR_TLBWR_Pos)                        /*!< MCIR: TLBWR Mask */
+
+#define MCIR_TLBINV_Pos                      27                                               /*!< MCIR: TLBINV Position */
+#define MCIR_TLBINV_Msk                      (0x1UL << MCIR_TLBINV_Pos)                       /*!< MCIR: TLBINV Mask */
+
+#define MCIR_TLBINV_ALL_Pos                  26                                               /*!< MCIR: TLBINV_ALL Position */
+#define MCIR_TLBINV_ALL_Msk                  (0x1UL << MCIR_TLBINV_ALL_Pos)                   /*!< MCIR: TLBINV_ALL Mask */
+
+#define MCIR_TLBINV_INDEX_Pos                25                                               /*!< MCIR: TLBINV_INDEX Position */
+#define MCIR_TLBINV_INDEX_Msk                (0x1UL << MCIR_TLBINV_INDEX_Pos)                 /*!< MCIR: TLBINV_INDEX Mask */
+
+#define MCIR_ASID_Pos                        0                                                /*!< MCIR: ASID Position */
+#define MCIR_ASID_Msk                        (0xFFUL << MCIR_ASID_Pos)                        /*!< MCIR: ASID Mask */
+
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(CFR,CR<17,0>).
+ */
+typedef union {
+    struct {
+        uint32_t CACHE_SEL: 2;                      /*!< bit:  0..1  Instruction and data cache selection */
+        uint32_t _reserved0: 2;                     /*!< bit:  2..3  Reserved */
+        uint32_t INV: 1;                            /*!< bit:     4  Invalid data in cache */
+        uint32_t CLR: 1;                            /*!< bit:     5  Clear the dirty tlb table */
+        uint32_t OMS: 1;                            /*!< bit:     6  Cache invalid and clear operation mode (one line or all line)*/
+        uint32_t ITS: 1;                            /*!< bit:     7  Cache invalid and clear operation mode (CIR used as virtual index or SET/WAY/LEVE index)*/
+        uint32_t _reserved1: 8;                     /*!< bit: 8..15  Reserved */
+        uint32_t BHT_INV: 1;                        /*!< bit:    16  Invalid data in branch history table */
+        uint32_t _reserved2: 14;                    /*!< bit: 17..30 Reserved */
+        uint32_t LICF: 1;                           /*!< bit:     31 Failure of clearing or invalid cache line */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} CFR_Type;
+
+#define CFR_LICF_Pos                     31U                                             /*!< CFR: LICF Position */
+#define CFR_LICF_Msk                     (0x1UL << CFR_LICF_Pos)                         /*!< CFR: LICF Mask */
+
+#define CFR_BHT_INV_Pos                  16U                                             /*!< CFR: BHT Position */
+#define CFR_BHT_INV_Msk                  (0x1UL << CFR_BHT_INV_Pos)                      /*!< CFR: BHT Mask */
+
+#define CFR_ITS_Pos                      7U                                              /*!< CFR: ITS Position */
+#define CFR_ITS_Msk                      (0x1UL << CFR_ITS_Pos)                          /*!< CFR: ITS Mask */
+
+#define CFR_OMS_Pos                      6U                                              /*!< CFR: OMS Position */
+#define CFR_OMS_Msk                      (0x1UL << CFR_OMS_Pos)                          /*!< CFR: OMS Mask */
+
+#define CFR_CLR_Pos                      5U                                              /*!< CFR: CLR Position */
+#define CFR_CLR_Msk                      (0x1UL << CFR_CLR_Pos)                          /*!< CFR: CLR Mask */
+
+#define CFR_INV_Pos                      4U                                              /*!< CFR: INV Position */
+#define CFR_INV_Msk                      (0x1UL << CFR_INV_Pos)                          /*!< CFR: INV Mask */
+
+#define CFR_CACHE_SEL_Pos                0                                               /*!< CFR: CACHE_SEL Position */    
+#define CFR_CACHE_SEL_Msk                (0x3UL << CFR_CACHE_SEL_Pos)                    /*!< CFR: CACHE_SEL Masok */
+
+/* CFR Register Definitions */
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+#define SSEG0_BASE_ADDR         0x80000000
+#define CACHE_RANGE_MAX_SIZE    0x80000
+
+#define INS_CACHE               (1 << 0)
+#define DATA_CACHE              (1 << 1)
+#define CACHE_INV               (1 << 4)
+#define CACHE_CLR               (1 << 5)
+#define CACHE_OMS               (1 << 6)
+#define CACHE_ITS               (1 << 7)
+#define CACHE_LICF              (1 << 31)
+
+#define    L1_CACHE_SHIFT       4      /* 16 Bytes */
+#define    L1_CACHE_BYTES       (1 << L1_CACHE_SHIFT)
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000004);
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFFB);
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+    __set_CFR(0x11);
+    __set_CFR(INS_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000008);
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFF7);
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR);
+}
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR | CACHE_INV);
+}
+
+__STATIC_INLINE  void set_cache_range (uint32_t start, uint32_t end, uint32_t value)
+{
+    if (!(start & SSEG0_BASE_ADDR) || (end - start) &~(CACHE_RANGE_MAX_SIZE - 1)) {
+        __set_CFR(value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_disable();
+    }
+    uint32_t i;
+    for (i = start; i < end; i += L1_CACHE_BYTES) {
+        __set_CIR(i);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (end & (L1_CACHE_BYTES-1)) {
+        __set_CIR(end);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_enable();
+    }
+
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_INV));
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR));
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR | CACHE_INV));
+}
+
+
+/*@} end of CSI_Core_CacheFunctions */
+
+
+/* ##########################  MMU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MMUFunctions MMU Functions
+  \brief    Functions that configure MMU.
+  @{
+ */
+
+typedef struct {
+    uint32_t global: 1;         /* tlb page global access. */
+    uint32_t valid: 1;          /* tlb page valid */
+    uint32_t writeable: 1;      /* tlb page writeable */
+    uint32_t cacheable: 1;      /* tlb page cacheable*/
+    uint32_t is_secure: 1;      /* tlb page security access */
+    uint32_t strong_order: 1;   /* the sequence of accessing data on tlb page is corresponding to the program flow? */
+    uint32_t bufferable: 1;     /* tlb page bufferable */
+} page_attr_t;
+
+typedef enum {
+    PAGE_SIZE_4KB   = 0x000,
+    PAGE_SIZE_16KB  = 0x003,
+    PAGE_SIZE_64KB  = 0x00F,
+    PAGE_SIZE_256KB = 0x03F,
+    PAGE_SIZE_1MB   = 0x0FF,
+    PAGE_SIZE_4MB   = 0x3FF,
+    PAGE_SIZE_16MB  = 0xFFF
+} page_size_e;
+
+
+/**
+  \brief  enable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_enable(void)
+{
+    __set_CCR(__get_CCR() | (1u << CCR_MP_Pos));
+}
+
+/**
+  \brief  disable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~(1u << CCR_MP_Pos)));
+}
+
+/**
+  \brief  create page with feature.
+  \details
+  \param [in]  vaddr     virtual address.
+  \param [in]  paddr     physical address.
+  \param [in]  asid      address sapce id (default: 0).
+  \param [in]  attr      \ref page_attr_t. tlb page attribute.
+  */
+__STATIC_INLINE void csi_mmu_set_tlb(uint32_t vaddr, uint32_t paddr, uint32_t asid, page_attr_t attr)
+{
+     MPR_Type pgmask;
+     MEH_Type meh;
+     MEL_Type mel;
+     uint32_t vaddr_bit;
+     uint32_t page_feature = 0;
+
+     page_feature |= attr.global << MEL_G_Pos | attr.valid << MEL_V_Pos |
+                     attr.writeable << MEL_D_Pos | attr.cacheable << MEL_C_Pos |
+                     attr.is_secure << MEL_SEC_Pos | attr.strong_order << MEL_SO_Pos |
+                     attr.bufferable << MEL_B_Pos;
+
+     pgmask.w = __get_MPR();
+     vaddr_bit = 44 - __FF0(~((uint32_t)pgmask.b.page_mask));
+
+     meh.b.ASID = (uint8_t)asid;
+     meh.b.VPN  = (vaddr & ((~pgmask.w | 0xFE000000) & 0xFFFFE000)) >> MEH_VPN_Pos;
+     __set_MEH(meh.w);
+
+     __set_MCIR(1u << MCIR_TLBP_Pos);
+
+     mel.w = ((paddr & ~(pgmask.b.page_mask << 12)) | page_feature);
+     if (vaddr & (1 << vaddr_bit)) {
+         __set_MEL1(mel.w);
+     }
+     else {
+         __set_MEL0(mel.w);
+     }
+
+     if (__get_MIR() & (1 << MIR_P_Pos)) {
+        __set_MCIR(1u << MCIR_TLBWR_Pos);
+     } else {
+        __set_MCIR(1u << MCIR_TLBWI_Pos);
+     }
+}
+
+
+/**
+  \brief  enble mmu
+  \details
+  \param [in]  size  tlb page size.
+  */
+__STATIC_INLINE void csi_mmu_set_pagesize(page_size_e size)
+{
+    MPR_Type pgmask;
+    pgmask.b.page_mask = size;
+    __set_MPR(pgmask.w);
+}
+
+
+/**
+  \brief  read MEH, MEL0, MEL1 by tlb index.
+  \details
+  \param [in]    index  tlb index(0, 1, 2, ...)
+  \param [out]   meh    pointer to variable for retrieving MEH.
+  \param [out]   mel0   pointer to variable for retrieving MEL0.
+  \param [out]   mel1   pointer to variable for retrieving MEL1.
+  */
+__STATIC_INLINE void csi_mmu_read_by_index(uint32_t index, uint32_t *meh, uint32_t *mel0, uint32_t *mel1)
+{
+    MIR_Type mir;
+
+    if (meh == NULL || mel0 == NULL || mel1 == NULL) {
+        return;
+    }
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBR_Pos);
+
+    *meh = __get_MEH();
+    *mel0 = __get_MEL0();
+    *mel1 = __get_MEL1();
+}
+
+
+/**
+  \brief  flush all mmu tlb.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_all(void)
+{
+    __set_MCIR(1u << MCIR_TLBINV_ALL_Pos);
+}
+
+/**
+  \brief  flush mmu tlb by index.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_index(uint32_t index)
+{
+    MIR_Type mir;
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by virtual address.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_vaddr(uint32_t vaddr, uint32_t asid)
+{
+    __set_MEH(vaddr | (asid & MEH_ASID_Msk));
+    __set_MCIR(1u << MCIR_TLBP_Pos);
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+        return;
+    } else {
+        __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+    }
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK807_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 854 - 0
lib/sec_library/include/core/core_ck810.h

@@ -0,0 +1,854 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_ck810.h
+ * @brief    CSI CK810 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     26. Jan 2018
+ ******************************************************************************/
+
+#ifndef __CORE_CK810_H_GENERIC
+#define __CORE_CK810_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup CK810
+  @{
+ */
+
+/*  CSI CK810 definitions */
+#define __CK810_CSI_VERSION_MAIN  (0x04U)                                      /*!< [31:16] CSI HAL main version */
+#define __CK810_CSI_VERSION_SUB   (0x1EU)                                      /*!< [15:0]  CSI HAL sub version */
+#define __CK810_CSI_VERSION       ((__CK810_CSI_VERSION_MAIN << 16U) | \
+                                   __CK810_CSI_VERSION_SUB           )         /*!< CSI HAL version number */
+
+#ifndef __CK810
+#define __CK810                (0x0aU)                                         /*!< CK810 Core */
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+*/
+#define __FPU_USED      1U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK810_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_CK810_H_DEPENDANT
+#define __CORE_CK810_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __CK810_REV
+#define __CK810_REV               0x0000U
+#endif
+
+#ifndef __GSR_GCR_PRESENT
+#define __GSR_GCR_PRESENT         0U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <csi_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK810 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK810 processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \brief  Access Processor Status Register(PSR)struct definition.
+ */
+typedef union {
+    struct {
+        uint32_t C: 1;                       /*!< bit:      0  Conditional code/Carry flag */
+        uint32_t _reserved0: 5;              /*!< bit:  2.. 5  Reserved */
+        uint32_t IE: 1;                      /*!< bit:      6  Interrupt effective control bit */
+        uint32_t IC: 1;                      /*!< bit:      7  Interrupt control bit */
+        uint32_t EE: 1;                      /*!< bit:      8  Abnormally effective control bit */
+        uint32_t MM: 1;                      /*!< bit:      9  Unsymmetrical masking bit */
+        uint32_t _reserved1: 6;              /*!< bit: 10..15  Reserved */
+        uint32_t VEC: 8;                     /*!< bit: 16..23  Abnormal event vector value */
+        uint32_t _reserved2: 5;              /*!< bit: 24..28  Reserved */
+        uint32_t SP: 1;                      /*!< bit:     29  Secure pedning bit */
+        uint32_t T: 1;                       /*!< bit:     30  TEE mode bit */
+        uint32_t S: 1;                       /*!< bit:     31  Superuser mode set bit */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} PSR_Type;
+
+/* PSR Register Definitions */
+#define PSR_S_Pos                          31U                                            /*!< PSR: S Position */
+#define PSR_S_Msk                          (1UL << PSR_S_Pos)                             /*!< PSR: S Mask */
+
+#define PSR_VEC_Pos                        16U                                            /*!< PSR: VEC Position */
+#define PSR_VEC_Msk                        (0x7FUL << PSR_VEC_Pos)                        /*!< PSR: VEC Mask */
+
+#define PSR_MM_Pos                         9U                                             /*!< PSR: MM Position */
+#define PSR_MM_Msk                         (1UL << PSR_MM_Pos)                            /*!< PSR: MM Mask */
+
+#define PSR_EE_Pos                         8U                                             /*!< PSR: EE Position */
+#define PSR_EE_Msk                         (1UL << PSR_EE_Pos)                            /*!< PSR: EE Mask */
+
+#define PSR_IC_Pos                         7U                                             /*!< PSR: IC Position */
+#define PSR_IC_Msk                         (1UL << PSR_IC_Pos)                            /*!< PSR: IC Mask */
+
+#define PSR_IE_Pos                         6U                                             /*!< PSR: IE Position */
+#define PSR_IE_Msk                         (1UL << PSR_IE_Pos)                            /*!< PSR: IE Mask */
+
+#define PSR_C_Pos                          0U                                             /*!< PSR: C Position */
+#define PSR_C_Msk                          (1UL << PSR_C_Pos)                             /*!< PSR: C Mask */
+
+/**
+  \brief Consortium definition for accessing Cache Configuration Registers(CCR, CR<18, 0>).
+ */
+typedef union {
+    struct {
+        uint32_t MP: 2;                      /*!< bit:  0.. 1  memory protection settings */
+        uint32_t IE: 1;                      /*!< bit:      2  Instruction cache enable */
+        uint32_t DE: 1;                      /*!< bit:      3  Data cache enable */
+        uint32_t WB: 1;                      /*!< bit:      4  Cache write back */
+        uint32_t RS: 1;                      /*!< bit:      5  Address return stack settings */
+        uint32_t Z: 1;                       /*!< bit:      6  Allow predictive jump bit */
+        uint32_t BE: 1;                      /*!< bit:      7  Endian mode */
+        uint32_t SCK: 3;                     /*!< bit:  8..10  the clock ratio of the system and the processor */
+        uint32_t _reserved0: 1;              /*!< bit:     11  Reserved */
+        uint32_t WA: 1;                      /*!< bit:     12  Write allocate enable */
+        uint32_t E_V2: 1;                    /*!< bit:     13  V2 Endian mode */
+        uint32_t BSTE: 1;                    /*!< bit:     14  Burst transmit enable */
+        uint32_t IPE: 1;                     /*!< bit:     15  Indirect predict enable */
+        uint32_t _reserved1: 16;             /*!< bit: 16..31  Reserved */
+    } b;                                   /*!< Structure    Access by bit */
+    uint32_t w;                            /*!< Type         Access by whole register */
+} CCR_Type;
+
+/* CCR Register Definitions */
+#define CCR_IPE_Pos                       15u                                            /*!< CCR: IPE Position */
+#define CCR_IPE_Msk                       (0x1UL << CCR_IPE_Pos)                         /*!< CCR: IPE Mask */
+
+#define CCR_BSTE_Pos                      14u                                            /*!< CCR: BSTE Position */
+#define CCR_BSTE_Msk                      (0x1UL << CCR_BSTE_Pos)                        /*!< CCR: BSTE Mask */
+
+#define CCR_E_V2_Pos                      13U                                            /*!< CCR: E_V2 Position */
+#define CCR_E_V2_Msk                      (0x1UL << CCR_E_V2_Pos)                        /*!< CCR: E_V2 Mask */
+
+#define CCR_WA_Pos                        12u                                            /*!< CCR: WA Position */
+#define CCR_WA_Msk                        (0x1UL << CCR_WA_Pos)                          /*!< CCR: WA Mask */
+
+#define CCR_SCK_Pos                       8U                                             /*!< CCR: SCK Position */
+#define CCR_SCK_Msk                       (0x3UL << CCR_SCK_Pos)                         /*!< CCR: SCK Mask */
+
+#define CCR_BE_Pos                        7U                                             /*!< CCR: BE Position */
+#define CCR_BE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: BE Mask */
+
+#define CCR_Z_Pos                         6U                                             /*!< CCR: Z Position */
+#define CCR_Z_Msk                         (0x1UL << CCR_BE_Pos)                          /*!< CCR: Z Mask */
+
+#define CCR_RS_Pos                        5U                                             /*!< CCR: RS Position */
+#define CCR_RS_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: RS Mask */
+
+#define CCR_WB_Pos                        4U                                             /*!< CCR: WB Position */
+#define CCR_WB_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: WB Mask */
+
+#define CCR_DE_Pos                        3U                                             /*!< CCR: DE Position */
+#define CCR_DE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: DE Mask */
+
+#define CCR_IE_Pos                        2U                                             /*!< CCR: IE Position */
+#define CCR_IE_Msk                        (0x1UL << CCR_BE_Pos)                          /*!< CCR: IE Mask */
+
+#define CCR_MP_Pos                        0U                                             /*!< CCR: MP Position */
+#define CCR_MP_Msk                        (0x3UL << CCR_MP_Pos)                          /*!< CCR: MP Mask */
+
+/**
+  \brief  Consortium definition for accessing mmu index register(MIR,CR<0,15>).
+ */
+typedef union {
+    struct {
+        uint32_t Index: 10;                  /*!< bit:   0.. 9   TLB index */
+        uint32_t _reserved: 20;              /*!< bit:  10.. 29  Reserved */
+        uint32_t TF: 1;                      /*!< bit:       30  TLB fatal error */
+        uint32_t P: 1;                       /*!< bit:       31  TLBP instruction */
+    } b;
+    uint32_t w;
+} MIR_Type;
+
+/* MIR Register Definitions */
+#define MIR_P_Pos                          31                                            /*!< PRSR: P(TLBP instruction) Position */
+#define MIR_P_Msk                          (0x1UL << MIR_P_Pos)                          /*!< PRSR: P(TLBP instruction) Mask */
+
+#define MIR_TF_Pos                         30                                            /*!< PRSR: Tfatal Position */
+#define MIR_TF_Msk                         (0x1UL << MIR_TF_Pos)                         /*!< PRSR: Tfatal Mask */
+
+#define MIR_Index_Pos                      0                                             /*!< PRSR: Index Position */
+#define MIR_Index_Msk                      (0x3ffUL << MIR_Index_Pos)                    /*!< PRSR: Index Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEL, CR<2,15> and CR<3,15>).
+ */
+typedef union {
+    struct {
+        uint32_t G: 1;                        /*!< bit:       0   Global enbale bit */
+        uint32_t V: 1;                        /*!< bit:       1   TLB mapping valid bit */
+        uint32_t D: 1;                        /*!< bit:       2   TLB Page dirty bit */
+        uint32_t C: 1;                        /*!< bit:       3   TLB Page cacheable bit */
+        uint32_t SEC: 1;                      /*!< bit:       4   TLB Page security bit */
+        uint32_t SO: 1;                       /*!< bit:       2   Strong order enable bit */
+        uint32_t B: 1;                        /*!< bit:       2   TLB Page bufferable bit */
+        uint32_t _reserved: 5;                /*!< bit:   7.. 11  Reserved */
+        uint32_t PFN: 20;                     /*!< bit:  12.. 31  Physical frame number */
+    } b;
+    uint32_t w;
+} MEL_Type;
+
+/* MEL Register Definitions */
+#define MEL_PFN_Pos                        12                                            /*!< MEL: PFN Position */
+#define MEL_PFN_Msk                        (0xFFFFFUL << MEL_PFN_Pos)                    /*!< MEL: PFN Mask */
+
+#define MEL_B_Pos                          6                                             /*!< MEL: B Position */
+#define MEL_B_Msk                          (0x1UL << MEL_B_Pos)                          /*!< MEL: B Mask */
+
+#define MEL_SO_Pos                         5                                             /*!< MEL: SO Position */
+#define MEL_SO_Msk                         (0x1UL << MEL_SO_Pos)                         /*!< MEL: SO Mask */
+
+#define MEL_SEC_Pos                        4                                             /*!< MEL: SEC Position */
+#define MEL_SEC_Msk                        (0x1UL << MEL_SEC_Pos)                        /*!< MEL: SEC Mask */
+
+#define MEL_C_Pos                          3                                             /*!< MEL: C Position */
+#define MEL_C_Msk                          (0x1UL << MEL_C_Pos)                          /*!< MEL: C Mask */
+
+#define MEL_D_Pos                          2                                             /*!< MEL: D Position */
+#define MEL_D_Msk                          (0x1UL << MIR_D_Pos)                          /*!< MEL: D Mask */
+
+#define MEL_V_Pos                          1                                             /*!< MEL: V Position */
+#define MEL_V_Msk                          (0x1UL << MIR_V_Pos)                          /*!< MEL: V Mask */
+
+#define MEL_G_Pos                          0                                             /*!< MEL: G Position */
+#define MEL_G_Msk                          (0x1UL << MIR_G_Pos)                          /*!< MEL: G Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MEH, CR<4,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID :8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved :4;                /*!< bit:   7.. 10  Reserved */
+        uint32_t VPN :20;                     /*!< bit:  11.. 31  Virtual page number */
+    } b;
+    uint32_t w;
+} MEH_Type;
+
+/* MEH Register Definitions */
+#define MEH_VPN_Pos                        12                                             /*!< MEH: VPN Position */
+#define MEH_VPN_Msk                        (0xFFFFFUL << MEH_VPN_Pos)                     /*!< MEH: VPN Mask */
+
+#define MEH_ASID_Pos                       0                                              /*!< MEH: ASID Position */
+#define MEH_ASID_Msk                       (0xFFUL << MEH_ASID_Pos)                       /*!< MEH: ASID Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(MPR, CR<6,15>).
+ */
+typedef union {
+    struct {
+        uint32_t _reserved0: 13;              /*!< bit:   0.. 12   Reserved */
+        uint32_t page_mask: 12;               /*!< bit:  13.. 24   Page mask */
+        uint32_t _reserved1: 7;               /*!< bit:  25.. 31   Reserved */
+    } b;
+    uint32_t w;
+} MPR_Type;
+
+/* MPR Register Definitions */
+#define MPR_PAGE_MASK_Pos                        13                                             /*!< MPR: PAGE_MASK Position */
+#define MPR_PAGE_MASK_Msk                        (0xFFFUL << MPR_PAGE_MASK_Pos)                 /*!< MPR: PAGE_MASK Mask */
+
+
+/**
+  \brief  Consortium definition for accessing mmu entry of high physical address register(CR<8,15>).
+ */
+typedef union {
+    struct {
+        uint32_t ASID: 8;                     /*!< bit:   0.. 7   ASID */
+        uint32_t _reserved: 17;               /*!< bit:   8.. 24  Reserved */
+        uint32_t TLBINV_INDEX: 1;             /*!< bit:       25  TLBINV_INDEX */
+        uint32_t TLBINV_ALL: 1;               /*!< bit:       26  TLBINV_ALL */
+        uint32_t TLBINV: 1;                   /*!< bit:       27  TLBINV */
+        uint32_t TLBWR: 1;                    /*!< bit:       28  TLBWR */
+        uint32_t TLBWI: 1;                    /*!< bit:       29  TLBWI */
+        uint32_t TLBR: 1;                     /*!< bit:       30  TLBR */
+        uint32_t TLBP: 1;                     /*!< bit:       31  TLBP */
+    } b;
+    uint32_t w;
+} MCIR_Type;
+
+/* MCIR Register Definitions */
+#define MCIR_TLBP_Pos                        31                                               /*!< MCIR: TLBP Position */
+#define MCIR_TLBP_Msk                        (0x1UL << MCIR_TLBP_Pos)                         /*!< MCIR: TLBP Mask */
+
+#define MCIR_TLBR_Pos                        30                                               /*!< MCIR: TLBR Position */
+#define MCIR_TLBR_Msk                        (0x1UL << MCIR_TLBR_Pos)                         /*!< MCIR: TLBR Mask */
+
+#define MCIR_TLBWI_Pos                       29                                               /*!< MCIR: TLBWI Position */
+#define MCIR_TLBWI_Msk                       (0x1UL << MCIR_TLBWI_Pos)                        /*!< MCIR: TLBWI Mask */
+
+#define MCIR_TLBWR_Pos                       28                                               /*!< MCIR: TLBWR Position */
+#define MCIR_TLBWR_Msk                       (0x1UL << MCIR_TLBWR_Pos)                        /*!< MCIR: TLBWR Mask */
+
+#define MCIR_TLBINV_Pos                      27                                               /*!< MCIR: TLBINV Position */
+#define MCIR_TLBINV_Msk                      (0x1UL << MCIR_TLBINV_Pos)                       /*!< MCIR: TLBINV Mask */
+
+#define MCIR_TLBINV_ALL_Pos                  26                                               /*!< MCIR: TLBINV_ALL Position */
+#define MCIR_TLBINV_ALL_Msk                  (0x1UL << MCIR_TLBINV_ALL_Pos)                   /*!< MCIR: TLBINV_ALL Mask */
+
+#define MCIR_TLBINV_INDEX_Pos                25                                               /*!< MCIR: TLBINV_INDEX Position */
+#define MCIR_TLBINV_INDEX_Msk                (0x1UL << MCIR_TLBINV_INDEX_Pos)                 /*!< MCIR: TLBINV_INDEX Mask */
+
+#define MCIR_ASID_Pos                        0                                                /*!< MCIR: ASID Position */
+#define MCIR_ASID_Msk                        (0xFFUL << MCIR_ASID_Pos)                        /*!< MCIR: ASID Mask */
+
+
+/*@} end of group CSI_CORE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief  Consortium definition for accessing protection area selection register(CFR,CR<17,0>).
+ */
+typedef union {
+    struct {
+        uint32_t CACHE_SEL: 2;                      /*!< bit:  0..1  Instruction and data cache selection */
+        uint32_t _reserved0: 2;                     /*!< bit:  2..3  Reserved */
+        uint32_t INV: 1;                            /*!< bit:     4  Invalid data in cache */
+        uint32_t CLR: 1;                            /*!< bit:     5  Clear the dirty tlb table */
+        uint32_t OMS: 1;                            /*!< bit:     6  Cache invalid and clear operation mode (one line or all line)*/
+        uint32_t ITS: 1;                            /*!< bit:     7  Cache invalid and clear operation mode (CIR used as virtual index or SET/WAY/LEVE index)*/
+        uint32_t UNLOCK: 1;                         /*!< bit:     8  Unclock data cache line. */
+        uint32_t _reserved1: 7;                     /*!< bit: 9..15  Reserved */
+        uint32_t BHT_INV: 1;                        /*!< bit:    16  Invalid data in branch history table */
+        uint32_t BTB_INV: 1;                        /*!< bit:    17  Invalid data in branch table buffer */
+        uint32_t _reserved2: 13;                    /*!< bit: 18..30 Reserved */
+        uint32_t LICF: 1;                           /*!< bit:     31 Failure of clearing or invalid cache line */
+    } b;                                            /*!< Structure    Access by bit */
+    uint32_t w;                                     /*!< Type         Access by whole register */
+} CFR_Type;
+
+#define CFR_LICF_Pos                     31U                                             /*!< CFR: LICF Position */
+#define CFR_LICF_Msk                     (0x1UL << CFR_LICF_Pos)                         /*!< CFR: LICF Mask */
+
+#define CFR_BTB_INV_Pos                  17U                                             /*!< CFR: BTB Position */
+#define CFR_BTB_INV_Msk                  (0x1UL << CFR_BTB_INV_Pos)                      /*!< CFR: BTB Mask */
+
+#define CFR_BHT_INV_Pos                  16U                                             /*!< CFR: BHT Position */
+#define CFR_BHT_INV_Msk                  (0x1UL << CFR_BHT_INV_Pos)                      /*!< CFR: BHT Mask */
+
+#define CFR_UNLOCK_Pos                   8U                                              /*!< CFR: UNLOCK Position */
+#define CFR_UNLOCK_Msk                   (0x1UL << CFR_UNLOCK_Pos)                       /*!< CFR: UNLOCK Mask */
+
+#define CFR_ITS_Pos                      7U                                              /*!< CFR: ITS Position */
+#define CFR_ITS_Msk                      (0x1UL << CFR_ITS_Pos)                          /*!< CFR: ITS Mask */
+
+#define CFR_OMS_Pos                      6U                                              /*!< CFR: OMS Position */
+#define CFR_OMS_Msk                      (0x1UL << CFR_OMS_Pos)                          /*!< CFR: OMS Mask */
+
+#define CFR_CLR_Pos                      5U                                              /*!< CFR: CLR Position */
+#define CFR_CLR_Msk                      (0x1UL << CFR_CLR_Pos)                          /*!< CFR: CLR Mask */
+
+#define CFR_INV_Pos                      4U                                              /*!< CFR: INV Position */
+#define CFR_INV_Msk                      (0x1UL << CFR_INV_Pos)                          /*!< CFR: INV Mask */
+
+#define CFR_CACHE_SEL_Pos                0                                               /*!< CFR: CACHE_SEL Position */    
+#define CFR_CACHE_SEL_Msk                (0x3UL << CFR_CACHE_SEL_Pos)                    /*!< CFR: CACHE_SEL Masok */
+
+/* CFR Register Definitions */
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+#define SSEG0_BASE_ADDR         0x80000000
+#define CACHE_RANGE_MAX_SIZE    0x80000
+
+#define INS_CACHE               (1 << 0)
+#define DATA_CACHE              (1 << 1)
+#define CACHE_INV               (1 << 4)
+#define CACHE_CLR               (1 << 5)
+#define CACHE_OMS               (1 << 6)
+#define CACHE_ITS               (1 << 7)
+#define CACHE_LICF              (1 << 31)
+
+#define    L1_CACHE_SHIFT       4      /* 16 Bytes */
+#define    L1_CACHE_BYTES       (1 << L1_CACHE_SHIFT)
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000004);
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFFB);
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+    __set_CFR(0x11);
+    __set_CFR(INS_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+   __set_CCR(__get_CCR() | 0x00000008);
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+   __set_CCR(__get_CCR() & 0xFFFFFFF7);
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_INV);
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR);
+}
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+    __set_CFR(DATA_CACHE | CACHE_CLR | CACHE_INV);
+}
+
+__STATIC_INLINE  void set_cache_range (uint32_t start, uint32_t end, uint32_t value)
+{
+    if (!(start & SSEG0_BASE_ADDR) || (end - start) &~(CACHE_RANGE_MAX_SIZE - 1)) {
+        __set_CFR(value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_disable();
+    }
+    uint32_t i;
+    for (i = start; i < end; i += L1_CACHE_BYTES) {
+        __set_CIR(i);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (end & (L1_CACHE_BYTES-1)) {
+        __set_CIR(end);
+        __set_CFR(CACHE_OMS | value);
+    }
+
+    if (value & INS_CACHE) {
+        csi_icache_enable();
+    }
+
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_INV));
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR));
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+    set_cache_range((uint32_t)addr, (uint32_t)addr + dsize, (DATA_CACHE | CACHE_CLR | CACHE_INV));
+}
+
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/* ##########################  MMU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_MMUFunctions MMU Functions
+  \brief    Functions that configure MMU.
+  @{
+ */
+
+typedef struct {
+    uint32_t global: 1;         /* tlb page global access. */
+    uint32_t valid: 1;          /* tlb page valid */
+    uint32_t writeable: 1;      /* tlb page writeable */
+    uint32_t cacheable: 1;      /* tlb page cacheable*/
+    uint32_t is_secure: 1;      /* tlb page security access */
+    uint32_t strong_order: 1;   /* the sequence of accessing data on tlb page is corresponding to the program flow? */
+    uint32_t bufferable: 1;     /* tlb page bufferable */
+} page_attr_t;
+
+typedef enum {
+    PAGE_SIZE_4KB   = 0x000,
+    PAGE_SIZE_16KB  = 0x003,
+    PAGE_SIZE_64KB  = 0x00F,
+    PAGE_SIZE_256KB = 0x03F,
+    PAGE_SIZE_1MB   = 0x0FF,
+    PAGE_SIZE_4MB   = 0x3FF,
+    PAGE_SIZE_16MB  = 0xFFF
+} page_size_e;
+
+
+/**
+  \brief  enable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_enable(void)
+{
+    __set_CCR(__get_CCR() | (1u << CCR_MP_Pos));
+}
+
+/**
+  \brief  disable mmu
+  \details
+  */
+__STATIC_INLINE void csi_mmu_disable(void)
+{
+    __set_CCR(__get_CCR() & (~(1u << CCR_MP_Pos)));
+}
+
+/**
+  \brief  create page with feature.
+  \details
+  \param [in]  vaddr     virtual address.
+  \param [in]  paddr     physical address.
+  \param [in]  asid      address sapce id (default: 0).
+  \param [in]  attr      \ref page_attr_t. tlb page attribute.
+  */
+__STATIC_INLINE void csi_mmu_set_tlb(uint32_t vaddr, uint32_t paddr, uint32_t asid, page_attr_t attr)
+{
+     MPR_Type pgmask;
+     MEH_Type meh;
+     MEL_Type mel;
+     uint32_t vaddr_bit;
+     uint32_t page_feature = 0;
+
+     page_feature |= attr.global << MEL_G_Pos | attr.valid << MEL_V_Pos |
+                     attr.writeable << MEL_D_Pos | attr.cacheable << MEL_C_Pos |
+                     attr.is_secure << MEL_SEC_Pos | attr.strong_order << MEL_SO_Pos |
+                     attr.bufferable << MEL_B_Pos;
+
+     pgmask.w = __get_MPR();
+     vaddr_bit = 44 - __FF0(~((uint32_t)pgmask.b.page_mask));
+
+     meh.b.ASID = (uint8_t)asid;
+     meh.b.VPN  = (vaddr & ((~pgmask.w | 0xFE000000) & 0xFFFFE000)) >> MEH_VPN_Pos;
+     __set_MEH(meh.w);
+
+     __set_MCIR(1u << MCIR_TLBP_Pos);
+
+     mel.w = ((paddr & ~(pgmask.b.page_mask << 12)) | page_feature);
+     if (vaddr & (1 << vaddr_bit)) {
+         __set_MEL1(mel.w);
+     }
+     else {
+         __set_MEL0(mel.w);
+     }
+
+     if (__get_MIR() & (1 << MIR_P_Pos)) {
+        __set_MCIR(1u << MCIR_TLBWR_Pos);
+     } else {
+        __set_MCIR(1u << MCIR_TLBWI_Pos);
+     }
+}
+
+
+/**
+  \brief  enble mmu
+  \details
+  \param [in]  size  tlb page size.
+  */
+__STATIC_INLINE void csi_mmu_set_pagesize(page_size_e size)
+{
+    MPR_Type pgmask;
+    pgmask.b.page_mask = size;
+    __set_MPR(pgmask.w);
+}
+
+
+/**
+  \brief  read MEH, MEL0, MEL1 by tlb index.
+  \details
+  \param [in]    index  tlb index(0, 1, 2, ...)
+  \param [out]   meh    pointer to variable for retrieving MEH.
+  \param [out]   mel0   pointer to variable for retrieving MEL0.
+  \param [out]   mel1   pointer to variable for retrieving MEL1.
+  */
+__STATIC_INLINE void csi_mmu_read_by_index(uint32_t index, uint32_t *meh, uint32_t *mel0, uint32_t *mel1)
+{
+    MIR_Type mir;
+
+    if (meh == NULL || mel0 == NULL || mel1 == NULL) {
+        return;
+    }
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBR_Pos);
+
+    *meh = __get_MEH();
+    *mel0 = __get_MEL0();
+    *mel1 = __get_MEL1();
+}
+
+
+/**
+  \brief  flush all mmu tlb.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_all(void)
+{
+    __set_MCIR(1u << MCIR_TLBINV_ALL_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by index.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_index(uint32_t index)
+{
+    MIR_Type mir;
+
+    mir.b.Index = index;
+    __set_MIR(mir.w);
+    __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+}
+
+
+/**
+  \brief  flush mmu tlb by virtual address.
+  \details
+  */
+__STATIC_INLINE void csi_mmu_invalid_tlb_by_vaddr(uint32_t vaddr, uint32_t asid)
+{
+    __set_MEH(vaddr | (asid & MEH_ASID_Msk));
+    __set_MCIR(1u << MCIR_TLBP_Pos);
+
+    if (__get_MIR() & (1 << MIR_P_Pos)) {
+        return;
+    } else {
+        __set_MCIR(1u << MCIR_TLBINV_INDEX_Pos);
+    }
+}
+
+/*@} end of CSI_Core_MMUFunctions */
+
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+  \param [in]      irq_num  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_PSR();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_PSR(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_CK810_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1109 - 0
lib/sec_library/include/core/core_rv32.h

@@ -0,0 +1,1109 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_rv32.h
+ * @brief    CSI RV32 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     01. Sep 2018
+ ******************************************************************************/
+
+#ifndef __CORE_RV32_H_GENERIC
+#define __CORE_RV32_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup RV32
+  @{
+ */
+
+#ifndef __RV32
+#define __RV32                (0x01U)
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_RV32_H_DEPENDANT
+#define __CORE_RV32_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __RV32_REV
+#define __RV32_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_rv32_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK802 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core CLIC Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CLIC Core-Local Interrupt Controller (CLIC)
+  \brief      Type definitions for the CLIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint8_t IP;           /*!< Offset: 0x000 (R/W)  Interrupt set pending register */
+    __IOM uint8_t IE;           /*!< Offset: 0x004 (R/W)  Interrupt set enable register */
+    __IOM uint8_t ATTR;         /*!< Offset: 0x008 (R/W)  Interrupt set attribute register */
+    __IOM uint8_t CTL;          /*!< Offset: 0x00C (R/W)  Interrupt control register */
+} CLIC_INT_Control;
+
+typedef struct {
+    __IOM uint32_t CLICCFG:8;                 /*!< Offset: 0x000 (R/W)  CLIC configure register */
+    __IM  uint32_t CLICINFO;
+    __IOM uint32_t MINTTHRESH;
+    uint32_t RESERVED[1021];
+    CLIC_INT_Control CLICINT[4096];
+} CLIC_Type;
+
+#define CLIC_INFO_CLICINTCTLBITS_Pos           21U
+#define CLIC_INFO_CLICINTCTLBITS_Msk           (0xFUL << CLIC_INFO_CLICINTCTLBITS_Pos)
+
+#define CLIC_INTIP_IP_Pos                      0U                                    /*!< CLIC INTIP: IP Position */
+#define CLIC_INTIP_IP_Msk                      (0x1UL << CLIC_INTIP_IP_Pos)          /*!< CLIC INTIP: IP Mask */
+
+#define CLIC_INTIE_IE_Pos                      0U                                    /*!< CLIC INTIE: IE Position */
+#define CLIC_INTIE_IE_Msk                      (0x1UL << CLIC_INTIE_IE_Pos)          /*!< CLIC INTIE: IE Mask */
+
+#define CLIC_INTIE_T_Pos                       7U                                    /*!< CLIC INTIE: T Position */
+#define CLIC_INTIE_T_Msk                       (0x1UL << CLIC_INTIE_T_Pos)           /*!< CLIC INTIE: T Mask */
+
+#define CLIC_INTATTR_TRIG_Pos                  1U                                    /*!< CLIC INTATTR: TRIG Position */
+#define CLIC_INTATTR_TRIG_Msk                  (0x3UL << CLIC_INTATTR_TRIG_Pos)      /*!< CLIC INTATTR: TRIG Mask */
+
+#define CLIC_INTATTR_SHV_Pos                   0U                                    /*!< CLIC INTATTR: SHV Position */
+#define CLIC_INTATTR_SHV_Msk                   (0x1UL << CLIC_INTATTR_SHV_Pos)       /*!< CLIC INTATTR: SHV Mask */
+
+#define CLIC_INTCFG_NVBIT_Pos                  5U                                    /*!< CLIC INTCFG: NVBIT Position */
+#define CLIC_INTCFG_NVBIT_Msk                  (0x1UL << CLIC_INTCFG_NVBIT_Pos)      /*!< CLIC INTCFG: NVBIT Mask */
+
+#define CLIC_INTCFG_PRIO_Pos                   5U                                    /*!< CLIC INTCFG: INTCFG Position */
+#define CLIC_INTCFG_PRIO_Msk                   (0x7UL << CLIC_INTCFG_PRIO_Pos)       /*!< CLIC INTCFG: INTCFG Mask */
+
+#define CLIC_CLICCFG_NVBIT_Pos                 0U                                    /*!< CLIC CLICCFG: NVBIT Position */
+#define CLIC_CLICCFG_NVBIT_Msk                 (0x1UL << CLIC_CLICCFG_NVBIT_Pos)     /*!< CLIC CLICCFG: NVBIT Mask */
+
+#define CLIC_CLICCFG_NLBIT_Pos                 1U                                    /*!< CLIC CLICCFG: NLBIT Position */
+#define CLIC_CLICCFG_NLBIT_Msk                 (0xFUL << CLIC_CLICCFG_NLBIT_Pos)     /*!< CLIC CLICCFG: NLBIT Mask */
+
+#define CLIC_CLICCFG_NMBIT_Pos                 5U                                    /*!< CLIC CLICCFG: NMBIT Position */
+#define CLIC_CLICCFG_NMBIT_Msk                 (0x3UL << CLIC_CLICCFG_NMBIT_Pos)     /*!< CLIC CLICCFG: NMBIT Mask */
+
+/*@} end of group CSI_CLIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_PMP Physical Memory Protection (PMP)
+  \brief      Type definitions for the PMP Registers
+  @{
+ */
+
+#define PMP_PMPCFG_R_Pos                       0U                                    /*!< PMP PMPCFG: R Position */
+#define PMP_PMPCFG_R_Msk                       (0x1UL << PMP_PMPCFG_R_Pos)           /*!< PMP PMPCFG: R Mask */
+
+#define PMP_PMPCFG_W_Pos                       1U                                    /*!< PMP PMPCFG: W Position */
+#define PMP_PMPCFG_W_Msk                       (0x1UL << PMP_PMPCFG_W_Pos)           /*!< PMP PMPCFG: W Mask */
+
+#define PMP_PMPCFG_X_Pos                       2U                                    /*!< PMP PMPCFG: X Position */
+#define PMP_PMPCFG_X_Msk                       (0x1UL << PMP_PMPCFG_X_Pos)           /*!< PMP PMPCFG: X Mask */
+
+#define PMP_PMPCFG_A_Pos                       3U                                    /*!< PMP PMPCFG: A Position */
+#define PMP_PMPCFG_A_Msk                       (0x3UL << PMP_PMPCFG_A_Pos)           /*!< PMP PMPCFG: A Mask */
+
+#define PMP_PMPCFG_L_Pos                       7U                                    /*!< PMP PMPCFG: L Position */
+#define PMP_PMPCFG_L_Msk                       (0x1UL << PMP_PMPCFG_L_Pos)           /*!< PMP PMPCFG: L Mask */
+
+typedef enum {
+    REGION_SIZE_4B       = -1,
+    REGION_SIZE_8B       = 0,
+    REGION_SIZE_16B      = 1,
+    REGION_SIZE_32B      = 2,
+    REGION_SIZE_64B      = 3,
+    REGION_SIZE_128B     = 4,
+    REGION_SIZE_256B     = 5,
+    REGION_SIZE_512B     = 6,
+    REGION_SIZE_1KB      = 7,
+    REGION_SIZE_2KB      = 8,
+    REGION_SIZE_4KB      = 9,
+    REGION_SIZE_8KB      = 10,
+    REGION_SIZE_16KB     = 11,
+    REGION_SIZE_32KB     = 12,
+    REGION_SIZE_64KB     = 13,
+    REGION_SIZE_128KB    = 14,
+    REGION_SIZE_256KB    = 15,
+    REGION_SIZE_512KB    = 16,
+    REGION_SIZE_1MB      = 17,
+    REGION_SIZE_2MB      = 18,
+    REGION_SIZE_4MB      = 19,
+    REGION_SIZE_8MB      = 20,
+    REGION_SIZE_16MB     = 21,
+    REGION_SIZE_32MB     = 22,
+    REGION_SIZE_64MB     = 23,
+    REGION_SIZE_128MB    = 24,
+    REGION_SIZE_256MB    = 25,
+    REGION_SIZE_512MB    = 26,
+    REGION_SIZE_1GB      = 27,
+    REGION_SIZE_2GB      = 28,
+    REGION_SIZE_4GB      = 29,
+    REGION_SIZE_8GB      = 30,
+    REGION_SIZE_16GB     = 31
+} region_size_e;
+
+typedef enum {
+    ADDRESS_MATCHING_TOR   = 1,
+    ADDRESS_MATCHING_NAPOT = 3
+} address_matching_e;
+
+typedef struct {
+    uint32_t r: 1;           /* readable enable */
+    uint32_t w: 1;           /* writeable enable */
+    uint32_t x: 1;           /* execable enable */
+    address_matching_e a: 2; /* address matching mode */
+    uint32_t reserved: 2;    /* reserved */
+    uint32_t l: 1;           /* lock enable */
+} mpu_region_attr_t;
+
+/*@} end of group CSI_PMP */
+
+/* CACHE Register Definitions */
+#define CACHE_MHCR_L0BTB_Pos                   12U                                           /*!< CACHE MHCR: L0BTB Position */
+#define CACHE_MHCR_L0BTB_Msk                   (0x1UL << CACHE_MHCR_L0BTB_Pos)               /*!< CACHE MHCR: WA Mask */
+
+#define CACHE_MHCR_BPE_Pos                     5U                                            /*!< CACHE MHCR: BPE Position */
+#define CACHE_MHCR_BPE_Msk                     (0x1UL << CACHE_MHCR_BPE_Pos)                 /*!< CACHE MHCR: BPE Mask */
+
+#define CACHE_MHCR_RS_Pos                      4U                                            /*!< CACHE MHCR: RS Position */
+#define CACHE_MHCR_RS_Msk                      (0x1UL << CACHE_MHCR_RS_Pos)                  /*!< CACHE MHCR: RS Mask */
+
+#define CACHE_MHCR_WA_Pos                      3U                                            /*!< CACHE MHCR: WA Position */
+#define CACHE_MHCR_WA_Msk                      (0x1UL << CACHE_MHCR_WA_Pos)                  /*!< CACHE MHCR: WA Mask */
+
+#define CACHE_MHCR_WB_Pos                      2U                                            /*!< CACHE MHCR: WB Position */
+#define CACHE_MHCR_WB_Msk                      (0x1UL << CACHE_MHCR_WB_Pos)                  /*!< CACHE MHCR: WB Mask */
+
+#define CACHE_MHCR_DE_Pos                      1U                                            /*!< CACHE MHCR: DE Position */
+#define CACHE_MHCR_DE_Msk                      (0x1UL << CACHE_MHCR_DE_Pos)                  /*!< CACHE MHCR: DE Mask */
+
+#define CACHE_MHCR_IE_Pos                      0U                                            /*!< CACHE MHCR: IE Position */
+#define CACHE_MHCR_IE_Msk                      (0x1UL << CACHE_MHCR_IE_Pos)                  /*!< CACHE MHCR: IE Mask */
+
+#define CACHE_INV_ADDR_Pos                     5U
+#define CACHE_INV_ADDR_Msk                     (0xFFFFFFFFUL << CACHE_INV_ADDR_Pos)
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t MTIMECMPLO;            /*!< Offset: 0x000 (R/W) Timer compare low 32-bits register */
+    __IOM uint32_t MTIMECMPHI;            /*!< Offset: 0x004 (R/W) Timer compare high 32-bits register */
+    uint32_t RESERVED[8188];
+    __IM  uint32_t MTIMELO;               /*!< Offset: 0x7FF8 (R)  Timer current low 32-bits register */
+    __IM  uint32_t MTIMEHI;               /*!< Offset: 0x7FFC (R)  Timer current high 32-bits register */
+} CORET_Type;
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    uint32_t RESERVED1;
+    __IM uint32_t EHSR;                    /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED2[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK802 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (0xE0004000UL)                            /*!< CORET Base Address */
+#define CLIC_BASE           (0xE0800000UL)                            /*!< CLIC Base Address */
+#define DCC_BASE            (0xE4010000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define CLIC                ((CLIC_Type    *)     CLIC_BASE   )       /*!< CLIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP2_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IE |= CLIC_INTIE_IE_Msk;
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IE &= ~CLIC_INTIE_IE_Msk;
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IE |= (CLIC_INTIE_IE_Msk | CLIC_INTIE_T_Msk);
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IE &= ~(CLIC_INTIE_IE_Msk | CLIC_INTIE_T_Msk);
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    return (uint32_t)(CLIC->CLICINT[IRQn].IE & CLIC_INTIE_IE_Msk);
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    return (uint32_t)(CLIC->CLICINT[IRQn].IP & CLIC_INTIP_IP_Msk);
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IP |= CLIC_INTIP_IP_Msk;
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    CLIC->CLICINT[IRQn].IP &= ~CLIC_INTIP_IP_Msk;
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    uint8_t val = 0xFFU;
+    uint8_t nlbits = (CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos;
+    CLIC->CLICINT[IRQn].CTL = (CLIC->CLICINT[IRQn].CTL & (~val)) | ((priority << (8 - nlbits)) & 0xFFU);
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    uint8_t nlbits = (CLIC->CLICINFO & CLIC_INFO_CLICINTCTLBITS_Msk) >> CLIC_INFO_CLICINTCTLBITS_Pos;
+    return CLIC->CLICINT[IRQn].CTL >> (8 - nlbits);
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint32_t *vectors = (uint32_t *)__get_MTVT();
+        vectors[IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint32_t *vectors = (uint32_t *)__get_MTVT();
+        return (uint32_t)vectors[IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##########################   PMP functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_PMPFunctions PMP Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    uint8_t  pmpxcfg = 0;
+    uint32_t addr = 0;
+
+    if (idx > 15) {
+        return;
+    }
+
+    if (!enable) {
+        attr.a = 0;
+    }
+
+    if (attr.a == ADDRESS_MATCHING_TOR) {
+        addr = base_addr >> 2;
+    } else {
+        if (size == REGION_SIZE_4B) {
+            addr = base_addr >> 2;
+            attr.a = 2;
+        } else {
+            addr = ((base_addr >> 2) & (0xFFFFFFFFU - ((1 << (size + 1)) - 1))) | ((1 << size) - 1);
+        }
+    }
+
+    __set_PMPADDRx(idx, addr);
+
+    pmpxcfg |= (attr.r << PMP_PMPCFG_R_Pos) | (attr.w << PMP_PMPCFG_W_Pos) |
+               (attr.x << PMP_PMPCFG_X_Pos) | (attr.a << PMP_PMPCFG_A_Pos) |
+               (attr.l << PMP_PMPCFG_L_Pos);
+
+    __set_PMPxCFG(idx, pmpxcfg);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    __set_PMPxCFG(idx, __get_PMPxCFG(idx) & (~PMP_PMPCFG_A_Msk));
+}
+
+/*@} end of CSI_Core_PMPFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((!CORET->MTIMECMPLO && !CORET->MTIMECMPHI) || ((CORET->MTIMECMPLO == 0xFFFFFFFF) && (CORET->MTIMECMPHI == 0xFFFFFFFF))) {
+        CORET->MTIMECMPLO = CORET->MTIMELO + ticks;
+        CORET->MTIMECMPHI = CORET->MTIMEHI;
+    } else {
+        CORET->MTIMECMPLO = CORET->MTIMECMPLO + ticks;
+    }
+    if (CORET->MTIMECMPLO < ticks) {
+        CORET->MTIMECMPHI++;
+    }
+
+    return (0UL);
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->MTIMECMPLO;
+}
+
+/**
+  \brief   get CORE timer reload high value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_loadh(void)
+{
+    return CORET->MTIMECMPHI;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->MTIMELO;
+}
+
+/**
+  \brief   get CORE timer counter high value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_valueh(void)
+{
+    return CORET->MTIMEHI;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    __ICACHE_IALL();
+    cache = __get_MHCR();
+    cache |= CACHE_MHCR_IE_Msk;
+    __set_MHCR(cache);
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    cache = __get_MHCR();
+    cache &= ~CACHE_MHCR_IE_Msk;            /* disable icache */
+    __set_MHCR(cache);
+    __ICACHE_IALL();                        /* invalidate all icache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __ICACHE_IALL();                        /* invalidate all icache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    __DCACHE_IALL();                        /* invalidate all dcache */
+    cache = __get_MHCR();
+    cache |= (CACHE_MHCR_DE_Msk | CACHE_MHCR_WB_Msk | CACHE_MHCR_WA_Msk | CACHE_MHCR_RS_Msk | CACHE_MHCR_BPE_Msk | CACHE_MHCR_L0BTB_Msk);      /* enable all Cache */
+    __set_MHCR(cache);
+
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    cache = __get_MHCR();
+    cache &= ~(uint32_t)CACHE_MHCR_DE_Msk; /* disable all Cache */
+    __set_MHCR(cache);
+    __DCACHE_IALL();                             /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_IALL();                            /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_CALL();                                     /* clean all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_CIALL();                                   /* clean and inv all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 32-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 32;
+    uint32_t op_addr = (uint32_t)addr;
+    int32_t linesize = 32;
+
+    __DSB();
+
+    while (op_size > 0) {
+        __DCACHE_IPA(op_addr);
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 32-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 32;
+    uint32_t op_addr = (uint32_t) addr & CACHE_INV_ADDR_Msk;
+    int32_t linesize = 32;
+
+    __DSB();
+
+    while (op_size > 0) {
+        __DCACHE_CPA(op_addr);
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 32;
+    uint32_t op_addr = (uint32_t) addr;
+    int32_t linesize = 32;
+
+    __DSB();
+
+    while (op_size > 0) {
+        __DCACHE_CIPA(op_addr);
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+
+    __DSB();
+    __ISB();
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \param [in]  index      cache scope index (0, 1, 2, 3).
+  \param [in]  baseAddr   base address must be aligned with size.
+  \param [in]  size       \ref CACHE Register Definitions. cache scope size.
+  \param [in]  enable     enable or disable cache scope.
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    ;
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    ;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    ;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    ;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return 0;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return 0;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_MSTATUS();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_MSTATUS(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1187 - 0
lib/sec_library/include/core/core_rv32_old.h

@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_rv32.h
+ * @brief    CSI RV32 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     01. Sep 2018
+ ******************************************************************************/
+
+#ifndef __CORE_RV32_H_GENERIC
+#define __CORE_RV32_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup RV32
+  @{
+ */
+
+#ifndef __RV32
+#define __RV32                (0x01U)
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_RV32_H_DEPENDANT
+#define __CORE_RV32_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __RV32_REV
+#define __RV32_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+#include <core/csi_rv32_gcc.h>
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group CK802 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core CLIC Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CLIC Core-Local Interrupt Controller (CLIC)
+  \brief      Type definitions for the CLIC Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+typedef struct {
+    __IOM uint8_t INTIP[1024U];            /*!< Offset: 0x000 (R/W)  Interrupt set pending register */
+    __IOM uint8_t INTIE[1024U];            /*!< Offset: 0x000 (R/W)  Interrupt set enable register */
+    __IOM uint8_t INTCFG[768U];            /*!< Offset: 0x000 (R/W)  Interrupt configure register */
+    __IOM uint8_t CLICCFG;                 /*!< Offset: 0x000 (R/W)  CLIC configure register */
+} CLIC_Type;
+
+#define CLIC_INTIP_IP_Pos                      0U                                    /*!< CLIC INTIP: IP Position */
+#define CLIC_INTIP_IP_Msk                      (0x1UL << CLIC_INTIP_IP_Pos)          /*!< CLIC INTIP: IP Mask */
+
+#define CLIC_INTIE_IE_Pos                      0U                                    /*!< CLIC INTIE: IE Position */
+#define CLIC_INTIE_IE_Msk                      (0x1UL << CLIC_INTIE_IE_Pos)          /*!< CLIC INTIE: IE Mask */
+
+#define CLIC_INTIE_T_Pos                       7U                                    /*!< CLIC INTIE: T Position */
+#define CLIC_INTIE_T_Msk                       (0x1UL << CLIC_INTIE_T_Pos)           /*!< CLIC INTIE: T Mask */
+
+#define CLIC_INTCFG_NVBIT_Pos                  5U                                    /*!< CLIC INTCFG: NVBIT Position */
+#define CLIC_INTCFG_NVBIT_Msk                  (0x1UL << CLIC_INTCFG_NVBIT_Pos)      /*!< CLIC INTCFG: NVBIT Mask */
+
+#define CLIC_INTCFG_PRIO_Pos                   6U                                    /*!< CLIC INTCFG: INTCFG Position */
+#define CLIC_INTCFG_PRIO_Msk                   (0x1UL << CLIC_INTCFG_PRIO_Pos)       /*!< CLIC INTCFG: INTCFG Mask */
+
+#define CLIC_CLICCFG_NVBIT_Pos                 0U                                    /*!< CLIC CLICCFG: NVBIT Position */
+#define CLIC_CLICCFG_NVBIT_Msk                 (0x1UL << CLIC_CLICCFG_NVBIT_Pos)     /*!< CLIC CLICCFG: NVBIT Mask */
+
+#define CLIC_CLICCFG_NLBIT_Pos                 1U                                    /*!< CLIC CLICCFG: NLBIT Position */
+#define CLIC_CLICCFG_NLBIT_Msk                 (0xFUL << CLIC_CLICCFG_NLBIT_Pos)     /*!< CLIC CLICCFG: NLBIT Mask */
+
+#define CLIC_CLICCFG_NMBIT_Pos                 5U                                    /*!< CLIC CLICCFG: NMBIT Position */
+#define CLIC_CLICCFG_NMBIT_Msk                 (0x3UL << CLIC_CLICCFG_NMBIT_Pos)     /*!< CLIC CLICCFG: NMBIT Mask */
+
+/*@} end of group CSI_CLIC */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_PMP Physical Memory Protection (PMP)
+  \brief      Type definitions for the PMP Registers
+  @{
+ */
+
+#define PMP_PMPCFG_R_Pos                       0U                                    /*!< PMP PMPCFG: R Position */
+#define PMP_PMPCFG_R_Msk                       (0x1UL << PMP_PMPCFG_R_Pos)           /*!< PMP PMPCFG: R Mask */
+
+#define PMP_PMPCFG_W_Pos                       1U                                    /*!< PMP PMPCFG: W Position */
+#define PMP_PMPCFG_W_Msk                       (0x1UL << PMP_PMPCFG_W_Pos)           /*!< PMP PMPCFG: W Mask */
+
+#define PMP_PMPCFG_X_Pos                       2U                                    /*!< PMP PMPCFG: X Position */
+#define PMP_PMPCFG_X_Msk                       (0x1UL << PMP_PMPCFG_X_Pos)           /*!< PMP PMPCFG: X Mask */
+
+#define PMP_PMPCFG_A_Pos                       3U                                    /*!< PMP PMPCFG: A Position */
+#define PMP_PMPCFG_A_Msk                       (0x3UL << PMP_PMPCFG_A_Pos)           /*!< PMP PMPCFG: A Mask */
+
+#define PMP_PMPCFG_L_Pos                       7U                                    /*!< PMP PMPCFG: L Position */
+#define PMP_PMPCFG_L_Msk                       (0x1UL << PMP_PMPCFG_L_Pos)           /*!< PMP PMPCFG: L Mask */
+
+typedef enum {
+    REGION_SIZE_4B       = -1,
+    REGION_SIZE_8B       = 0,
+    REGION_SIZE_16B      = 1,
+    REGION_SIZE_32B      = 2,
+    REGION_SIZE_64B      = 3,
+    REGION_SIZE_128B     = 4,
+    REGION_SIZE_256B     = 5,
+    REGION_SIZE_512B     = 6,
+    REGION_SIZE_1KB      = 7,
+    REGION_SIZE_2KB      = 8,
+    REGION_SIZE_4KB      = 9,
+    REGION_SIZE_8KB      = 10,
+    REGION_SIZE_16KB     = 11,
+    REGION_SIZE_32KB     = 12,
+    REGION_SIZE_64KB     = 13,
+    REGION_SIZE_128KB    = 14,
+    REGION_SIZE_256KB    = 15,
+    REGION_SIZE_512KB    = 16,
+    REGION_SIZE_1MB      = 17,
+    REGION_SIZE_2MB      = 18,
+    REGION_SIZE_4MB      = 19,
+    REGION_SIZE_8MB      = 20,
+    REGION_SIZE_16MB     = 21,
+    REGION_SIZE_32MB     = 22,
+    REGION_SIZE_64MB     = 23,
+    REGION_SIZE_128MB    = 24,
+    REGION_SIZE_256MB    = 25,
+    REGION_SIZE_512MB    = 26,
+    REGION_SIZE_1GB      = 27,
+    REGION_SIZE_2GB      = 28,
+    REGION_SIZE_4GB      = 29,
+    REGION_SIZE_8GB      = 30,
+    REGION_SIZE_16GB     = 31
+} region_size_e;
+
+typedef enum {
+    ADDRESS_MATCHING_TOR   = 1,
+    ADDRESS_MATCHING_NAPOT = 3
+} address_matching_e;
+
+typedef struct {
+    uint32_t r: 1;           /* readable enable */
+    uint32_t w: 1;           /* writeable enable */
+    uint32_t x: 1;           /* execable enable */
+    address_matching_e a: 2; /* address matching mode */
+    uint32_t reserved: 2;    /* reserved */
+    uint32_t l: 1;           /* lock enable */
+} mpu_region_attr_t;
+
+/*@} end of group CSI_PMP */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CACHE
+  \brief      Type definitions for the cache Registers
+  @{
+ */
+
+/**
+  \brief On chip cache structure.
+ */
+typedef struct
+{
+    __IOM uint32_t CER;                    /*!< Offset: 0x000 (R/W)  Cache enable register */
+    __IOM uint32_t CIR;                    /*!< Offset: 0x004 (R/W)  Cache invalid register */
+    __IOM uint32_t CRCR[4U];               /*!< Offset: 0x008 (R/W)  Cache Configuration register */
+          uint32_t RSERVED0[1015U];
+    __IOM uint32_t CPFCR;                  /*!< Offset: 0xFF4 (R/W)  Cache performance analisis control register */
+    __IOM uint32_t CPFATR;                 /*!< Offset: 0xFF8 (R/W)  Cache access times register */
+    __IOM uint32_t CPFMTR;                 /*!< Offset: 0xFFC (R/W)  Cache missing times register */
+} CACHE_Type;
+
+/* CACHE Register Definitions */
+#define CACHE_CER_EN_Pos                       0U                                            /*!< CACHE CER: EN Position */
+#define CACHE_CER_EN_Msk                       (0x1UL << CACHE_CER_EN_Pos)                   /*!< CACHE CER: EN Mask */
+
+#define CACHE_CER_CFIG_Pos                     1U                                            /*!< CACHE CER: CFIG Position */
+#define CACHE_CER_CFIG_Msk                     (0x1UL << CACHE_CER_CFIG_Pos)                 /*!< CACHE CER: CFIG Mask */
+
+#define CACHE_CER_WB_Pos                       2U                                            /*!< CACHE CER: WB Position */
+#define CACHE_CER_WB_Msk                       (0x1UL << CACHE_CER_WB_Pos)                   /*!< CACHE CER: WB Mask */
+
+#define CACHE_CER_WCFIG_Pos                    3U                                            /*!< CACHE CER: WCFIG Position */
+#define CACHE_CER_WCFIG_Msk                    (0x1UL << CACHE_CER_WCFIG_Pos)                /*!< CACHE CER: WCFIG Mask */
+
+#define CACHE_CER_DCW_Pos                      4U                                            /*!< CACHE CER: DCW Position */
+#define CACHE_CER_DCW_Msk                      (0x1UL << CACHE_CER_DCW_Pos)                  /*!< CACHE CER: DCW Mask */
+
+#define CACHE_CER_CS_Pos                       31U                                           /*!< CACHE CER: CS Position */
+#define CACHE_CER_CS_Msk                       (0x1UL << CACHE_CER_CS_Pos)                   /*!< CACHE CER: CS Mask */
+
+#define CACHE_CIR_INV_ALL_Pos                  0U                                            /*!< CACHE CIR: INV_ALL Position */
+#define CACHE_CIR_INV_ALL_Msk                  (0x1UL << CACHE_CIR_INV_ALL_Pos)              /*!< CACHE CIR: INV_ALL Mask */
+
+#define CACHE_CIR_INV_ONE_Pos                  1U                                            /*!< CACHE CIR: INV_ONE Position */
+#define CACHE_CIR_INV_ONE_Msk                  (0x1UL << CACHE_CIR_INV_ONE_Pos)              /*!< CACHE CIR: INV_ONE Mask */
+
+#define CACHE_CIR_CLR_ALL_Pos                  2U                                            /*!< CACHE CIR: CLR_ALL Position */
+#define CACHE_CIR_CLR_ALL_Msk                  (0x1UL << CACHE_CIR_CLR_ALL_Pos)              /*!< CACHE CIR: CLR_ALL Mask */
+
+#define CACHE_CIR_CLR_ONE_Pos                  3U                                            /*!< CACHE CIR: CLR_ONE Position */
+#define CACHE_CIR_CLR_ONE_Msk                  (0x1UL << CACHE_CIR_CLR_ONE_Pos)              /*!< CACHE CIR: CLR_ONE Mask */
+
+#define CACHE_CIR_INV_ADDR_Pos                 4U                                            /*!< CACHE CIR: INV_ADDR Position */
+#define CACHE_CIR_INV_ADDR_Msk                 (0xFFFFFFFUL << CACHE_CIR_INV_ADDR_Pos)       /*!< CACHE CIR: INV_ADDR Mask */
+
+#define CACHE_CRCR_EN_Pos                      0U                                            /*!< CACHE CRCR: EN Position */
+#define CACHE_CRCR_EN_Msk                      (0x1UL << CACHE_CRCR_EN_Pos)                  /*!< CACHE CRCR: EN Mask */
+
+#define CACHE_CRCR_SIZE_Pos                    1U                                            /*!< CACHE CRCR: Size Position */
+#define CACHE_CRCR_SIZE_Msk                    (0x1FUL << CACHE_CRCR_SIZE_Pos)               /*!< CACHE CRCR: Size Mask */
+
+#define CACHE_CRCR_BASE_ADDR_Pos               10U                                           /*!< CACHE CRCR: base addr Position */
+#define CACHE_CRCR_BASE_ADDR_Msk               (0x3FFFFFUL << CACHE_CRCR_BASE_ADDR_Pos)      /*!< CACHE CRCR: base addr Mask */
+
+#define CACHE_CPFCR_PFEN_Pos                   0U                                            /*!< CACHE CPFCR: PFEN Position */
+#define CACHE_CPFCR_PFEN_Msk                   (0x1UL << CACHE_CPFCR_PFEN_Pos)               /*!< CACHE CPFCR: PFEN Mask */
+
+#define CACHE_CPFCR_PFRST_Pos                  1U                                            /*!< CACHE CPFCR: PFRST Position */
+#define CACHE_CPFCR_PFRST_Msk                  (0x1UL << CACHE_CPFCR_PFRST_Pos)              /*!< CACHE CPFCR: PFRST Mask */
+
+#define CACHE_CRCR_4K                          0xB                                           /* 01011 */
+#define CACHE_CRCR_8K                          0xC                                           /* 01100 */
+#define CACHE_CRCR_16K                         0xD                                           /* 01101 */
+#define CACHE_CRCR_32K                         0xE                                           /* 01110 */
+#define CACHE_CRCR_64K                         0xF                                           /* 01111 */
+#define CACHE_CRCR_128K                        0x10                                          /* 10000 */
+#define CACHE_CRCR_256K                        0x11                                          /* 10001 */
+#define CACHE_CRCR_512K                        0x12                                          /* 10010 */
+#define CACHE_CRCR_1M                          0x13                                          /* 10011 */
+#define CACHE_CRCR_2M                          0x14                                          /* 10100 */
+#define CACHE_CRCR_4M                          0x15                                          /* 10101 */
+#define CACHE_CRCR_8M                          0x16                                          /* 10110 */
+#define CACHE_CRCR_16M                         0x17                                          /* 10111 */
+#define CACHE_CRCR_32M                         0x18                                          /* 11000 */
+#define CACHE_CRCR_64M                         0x19                                          /* 11001 */
+#define CACHE_CRCR_128M                        0x1A                                          /* 11010 */
+#define CACHE_CRCR_256M                        0x1B                                          /* 11011 */
+#define CACHE_CRCR_512M                        0x1C                                          /* 11100 */
+#define CACHE_CRCR_1G                          0x1D                                          /* 11101 */
+#define CACHE_CRCR_2G                          0x1E                                          /* 11110 */
+#define CACHE_CRCR_4G                          0x1F                                          /* 11111 */
+
+/*@} end of group CSI_CACHE */
+
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t CTRL;                   /*!< Offset: 0x000 (R/W)  Control register */
+    __IOM uint32_t LOAD;                   /*!< Offset: 0x004 (R/W)  Backfill register */
+    __IOM uint32_t VAL;                    /*!< Offset: 0x008 (R/W)  Current register */
+    __IM  uint32_t CALIB;                  /*!< Offset: 0x00C (R/ )  Calibration register */
+} CORET_Type;
+
+/* CORET Control / Status Register Definitions */
+#define CORET_CTRL_COUNTFLAG_Pos           16U                                            /*!< CORET CTRL: COUNTFLAG Position */
+#define CORET_CTRL_COUNTFLAG_Msk           (1UL << CORET_CTRL_COUNTFLAG_Pos)              /*!< CORET CTRL: COUNTFLAG Mask */
+
+#define CORET_CTRL_CLKSOURCE_Pos           2U                                             /*!< CORET CTRL: CLKSOURCE Position */
+#define CORET_CTRL_CLKSOURCE_Msk           (1UL << CORET_CTRL_CLKSOURCE_Pos)              /*!< CORET CTRL: CLKSOURCE Mask */
+
+#define CORET_CTRL_TICKINT_Pos             1U                                             /*!< CORET CTRL: TICKINT Position */
+#define CORET_CTRL_TICKINT_Msk             (1UL << CORET_CTRL_TICKINT_Pos)                /*!< CORET CTRL: TICKINT Mask */
+
+#define CORET_CTRL_ENABLE_Pos              0U                                             /*!< CORET CTRL: ENABLE Position */
+#define CORET_CTRL_ENABLE_Msk              (1UL /*<< CORET_CTRL_ENABLE_Pos*/)             /*!< CORET CTRL: ENABLE Mask */
+
+    /* CORET Reload Register Definitions */
+#define CORET_LOAD_RELOAD_Pos              0U                                             /*!< CORET LOAD: RELOAD Position */
+#define CORET_LOAD_RELOAD_Msk              (0xFFFFFFUL /*<< CORET_LOAD_RELOAD_Pos*/)      /*!< CORET LOAD: RELOAD Mask */
+
+    /* CORET Current Register Definitions */
+#define CORET_VAL_CURRENT_Pos              0U                                             /*!< CORET VAL: CURRENT Position */
+#define CORET_VAL_CURRENT_Msk              (0xFFFFFFUL /*<< CORET_VAL_CURRENT_Pos*/)      /*!< CORET VAL: CURRENT Mask */
+
+    /* CORET Calibration Register Definitions */
+#define CORET_CALIB_NOREF_Pos              31U                                            /*!< CORET CALIB: NOREF Position */
+#define CORET_CALIB_NOREF_Msk              (1UL << CORET_CALIB_NOREF_Pos)                 /*!< CORET CALIB: NOREF Mask */
+
+#define CORET_CALIB_SKEW_Pos               30U                                            /*!< CORET CALIB: SKEW Position */
+#define CORET_CALIB_SKEW_Msk               (1UL << CORET_CALIB_SKEW_Pos)                  /*!< CORET CALIB: SKEW Mask */
+
+#define CORET_CALIB_TENMS_Pos              0U                                             /*!< CORET CALIB: TENMS Position */
+#define CORET_CALIB_TENMS_Msk              (0xFFFFFFUL /*<< CORET_CALIB_TENMS_Pos*/)      /*!< CORET CALIB: TENMS Mask */
+
+/*@} end of group CSI_SysTick */
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_DCC
+  \brief    Type definitions for the DCC.
+  @{
+ */
+
+/**
+  \brief  Access to the data structure of DCC.
+ */
+typedef struct {
+    uint32_t RESERVED0[13U];
+    __IOM uint32_t HCR;                    /*!< Offset: 0x034 (R/W) */
+    __IM  uint32_t EHSR;                   /*!< Offset: 0x03C (R/ ) */
+    uint32_t RESERVED1[6U];
+    union {
+        __IM uint32_t DERJW;               /*!< Offset: 0x058 (R/ )  Data exchange register CPU read*/
+        __OM uint32_t DERJR;               /*!< Offset: 0x058 ( /W)  Data exchange register CPU writer*/
+    };
+
+} DCC_Type;
+
+#define DCC_HCR_JW_Pos                   18U                                            /*!< DCC HCR: jw_int_en Position */
+#define DCC_HCR_JW_Msk                   (1UL << DCC_HCR_JW_Pos)                        /*!< DCC HCR: jw_int_en Mask */
+
+#define DCC_HCR_JR_Pos                   19U                                            /*!< DCC HCR: jr_int_en Position */
+#define DCC_HCR_JR_Msk                   (1UL << DCC_HCR_JR_Pos)                        /*!< DCC HCR: jr_int_en Mask */
+
+#define DCC_EHSR_JW_Pos                  1U                                             /*!< DCC EHSR: jw_vld Position */
+#define DCC_EHSR_JW_Msk                  (1UL << DCC_EHSR_JW_Pos)                       /*!< DCC EHSR: jw_vld Mask */
+
+#define DCC_EHSR_JR_Pos                  2U                                             /*!< DCC EHSR: jr_vld Position */
+#define DCC_EHSR_JR_Msk                  (1UL << DCC_EHSR_JR_Pos)                       /*!< DCC EHSR: jr_vld Mask */
+
+/*@} end of group CSI_DCC */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_base     Core Definitions
+  \brief      Definitions for base addresses, unions, and structures.
+  @{
+ */
+
+/* Memory mapping of CK802 Hardware */
+#define TCIP_BASE           (0xE000E000UL)                            /*!< Titly Coupled IP Base Address */
+#define CORET_BASE          (TCIP_BASE +  0x0010UL)                   /*!< CORET Base Address */
+#define CLIC_BASE           (TCIP_BASE +  0x0100UL)                   /*!< CLIC Base Address */
+#define DCC_BASE            (0xE0011000UL)                            /*!< DCC Base Address */
+#define CACHE_BASE          (TCIP_BASE +  0x1000UL)                   /*!< CACHE Base Address */
+
+#define CORET               ((CORET_Type   *)     CORET_BASE  )       /*!< SysTick configuration struct */
+#define CLIC                ((CLIC_Type    *)     CLIC_BASE   )       /*!< CLIC configuration struct */
+#define DCC                 ((DCC_Type     *)     DCC_BASE    )       /*!< DCC configuration struct */
+#define CACHE               ((CACHE_Type   *)     CACHE_BASE  )       /*!< cache configuration struct */
+
+/*@} */
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP2_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+    CLIC->INTIE[IRQn] |= CLIC_INTIE_IE_Msk;
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+    CLIC->INTIE[IRQn] &= ~CLIC_INTIE_IE_Msk;
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+    CLIC->INTIE[IRQn] |= (CLIC_INTIE_IE_Msk | CLIC_INTIE_T_Msk);
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+    CLIC->INTIE[IRQn] &= ~(CLIC_INTIE_IE_Msk | CLIC_INTIE_T_Msk);
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    return (uint32_t)(CLIC->INTIE[IRQn] & CLIC_INTIE_IE_Msk);
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    return (uint32_t)(CLIC->INTIP[IRQn] & CLIC_INTIP_IP_Msk);
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+    CLIC->INTIP[IRQn] |= CLIC_INTIP_IP_Msk;
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+    CLIC->INTIP[IRQn] &= ~CLIC_INTIP_IP_Msk;
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+    CLIC->INTCFG[IRQn] = (CLIC->INTCFG[IRQn] & (~CLIC_INTCFG_PRIO_Msk)) | (priority << CLIC_INTCFG_PRIO_Pos);
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return CLIC->INTCFG[IRQn] >> CLIC_INTCFG_PRIO_Pos;
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint32_t handler)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint32_t *vectors = (uint32_t *)__get_MTVT();
+        vectors[32 + IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint32_t *vectors = (uint32_t *)__get_MTVT();
+        return (uint32_t)vectors[32 + IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##########################   PMP functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_PMPFunctions PMP Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint32_t base_addr, region_size_e size,
+                                           mpu_region_attr_t attr, uint32_t enable)
+{
+    uint8_t  pmpxcfg = 0;
+    uint32_t addr = 0;
+
+    if (idx > 15) {
+        return;
+    }
+
+    if (!enable) {
+        attr.a = 0;
+    }
+
+    if (attr.a == ADDRESS_MATCHING_TOR) {
+        addr = base_addr >> 2;
+    } else {
+        if (size == REGION_SIZE_4B) {
+            addr = base_addr >> 2;
+            attr.a = 2;
+        } else {
+            addr = ((base_addr >> 2) & (0xFFFFFFFFU - ((1 << (size + 1)) - 1))) | ((1 << size) - 1);
+        }
+    }
+
+    __set_PMPADDRx(idx, addr);
+
+    pmpxcfg |= (attr.r << PMP_PMPCFG_R_Pos) | (attr.w << PMP_PMPCFG_W_Pos) |
+               (attr.x << PMP_PMPCFG_X_Pos) | (attr.a << PMP_PMPCFG_A_Pos) |
+               (attr.l << PMP_PMPCFG_L_Pos);
+
+    __set_PMPxCFG(idx, pmpxcfg);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    __set_PMPxCFG(idx, __get_PMPxCFG(idx) & (~PMP_PMPCFG_A_Msk));
+}
+
+/*@} end of CSI_Core_PMPFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    if ((ticks - 1UL) > CORET_LOAD_RELOAD_Msk) {
+        return (1UL);                                                   /* Reload value impossible */
+    }
+
+    CORET->LOAD = (uint32_t)(ticks - 1UL);                              /* set reload register */
+    CORET->VAL  = 0UL;                                                  /* Load the CORET Counter Value */
+    CORET->CTRL = CORET_CTRL_CLKSOURCE_Msk |
+                  CORET_CTRL_TICKINT_Msk |
+                  CORET_CTRL_ENABLE_Msk;                                /* Enable CORET IRQ and CORET Timer */
+    return (0UL);                                                       /* Function successful */
+}
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_load(void)
+{
+    return CORET->LOAD;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_value(void)
+{
+    return CORET->VAL;
+}
+
+/*@} end of CSI_Core_SysTickFunctions */
+
+/* ##################################### DCC function ########################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_core_DebugFunctions HAD Functions
+  \brief    Functions that access the HAD debug interface.
+  @{
+ */
+
+/**
+  \brief   HAD Send Character
+  \details Transmits a character via the HAD channel 0, and
+           \li Just returns when no debugger is connected that has booked the output.
+           \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.
+  \param [in]     ch  Character to transmit.
+  \returns            Character to transmit.
+ */
+__STATIC_INLINE uint32_t csi_had_send_char(uint32_t ch)
+{
+    DCC->DERJR = (uint8_t)ch;
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Receive Character
+  \details Inputs a character via the external variable \ref HAD_RxBuffer.
+  \return             Received character.
+  \return         -1  No character pending.
+ */
+__STATIC_INLINE int32_t csi_had_receive_char(void)
+{
+    int32_t ch = -1;                           /* no character available */
+
+    if (_FLD2VAL(DCC_EHSR_JW, DCC->EHSR)) {
+        ch = DCC->DERJW;
+    }
+
+    return (ch);
+}
+
+
+/**
+  \brief   HAD Check Character
+  \details Check whether a character is pending for reading in the variable \ref HAD_RxBuffer.
+  \return          0  No character available.
+  \return          1  Character available.
+ */
+__STATIC_INLINE int32_t csi_had_check_char(void)
+{
+    return _FLD2VAL(DCC_EHSR_JW, DCC->EHSR);                              /* no character available */
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER |=  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* enable all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CER &=  ~(uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_CFIG_Msk);  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid (void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable (void)
+{
+  #if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    CACHE->CER =  (uint32_t)(CACHE_CER_EN_Msk | CACHE_CER_WB_Msk | CACHE_CER_DCW_Msk) & (~CACHE_CER_CFIG_Msk);  /* enable all Cache */
+    __DSB();
+    __ISB();
+  #endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CER &=  ~(uint32_t)CACHE_CER_EN_Msk;  /* disable all Cache */
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;          /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = CACHE_CIR_INV_ALL_Msk;         /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid (void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    CACHE->CIR = _VAL2FLD(CACHE_CIR_INV_ALL, 1) | _VAL2FLD(CACHE_CIR_CLR_ALL, 1);         /* clean and inv all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 16-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 16-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range (uint32_t *addr, int32_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int32_t op_size = dsize + (uint32_t)addr % 16;
+    uint32_t op_addr = (uint32_t)addr & CACHE_CIR_INV_ADDR_Msk;
+    int32_t linesize = 16;
+
+    op_addr |= _VAL2FLD(CACHE_CIR_CLR_ONE, 1) | _VAL2FLD(CACHE_CIR_INV_ONE, 1);
+
+    while (op_size >= 128) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+
+        op_size -= 128;
+    }
+
+    while (op_size > 0) {
+        CACHE->CIR = op_addr;
+        op_addr += linesize;
+        op_size -= linesize;
+    }
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range (uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    CACHE->CRCR[index] =  ((baseAddr & CACHE_CRCR_BASE_ADDR_Msk) |
+                           (_VAL2FLD(CACHE_CRCR_SIZE, size)) |
+                           (_VAL2FLD(CACHE_CRCR_EN, enable)));
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile (void)
+{
+    CACHE->CPFCR &=  ~(uint32_t)CACHE_CPFCR_PFEN_Msk;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile (void)
+{
+    CACHE->CPFCR |=  (uint32_t)CACHE_CPFCR_PFRST_Msk;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time (void)
+{
+    return CACHE->CPFATR;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time (void)
+{
+    return CACHE->CPFMTR;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+    result = __get_MSTATUS();
+    __disable_irq();
+    return(result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+    __set_MSTATUS(irq_state);
+}
+
+/*@} end of IRQ Functions */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 1119 - 0
lib/sec_library/include/core/core_rv64.h

@@ -0,0 +1,1119 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     core_rv64.h
+ * @brief    CSI RV32 Core Peripheral Access Layer Header File
+ * @version  V1.0
+ * @date     01. Sep 2018
+ ******************************************************************************/
+
+#ifndef __CORE_RV32_H_GENERIC
+#define __CORE_RV32_H_GENERIC
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ *                 CSI definitions
+ ******************************************************************************/
+/**
+  \ingroup RV32
+  @{
+ */
+
+#ifndef __RV64
+#define __RV64                (0x01U)
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not.
+    This core does not support an FPU at all
+*/
+#define __FPU_USED       0U
+
+#if defined ( __GNUC__ )
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_GENERIC */
+
+#ifndef __CSI_GENERIC
+
+#ifndef __CORE_RV32_H_DEPENDANT
+#define __CORE_RV32_H_DEPENDANT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* check device defines and use defaults */
+#ifndef __RV64_REV
+#define __RV64_REV               0x0000U
+#endif
+
+#ifndef __VIC_PRIO_BITS
+#define __VIC_PRIO_BITS           2U
+#endif
+
+#ifndef __Vendor_SysTickConfig
+#define __Vendor_SysTickConfig    1U
+#endif
+
+#ifndef __MPU_PRESENT
+#define __MPU_PRESENT             1U
+#endif
+
+#ifndef __ICACHE_PRESENT
+#define __ICACHE_PRESENT          1U
+#endif
+
+#ifndef __DCACHE_PRESENT
+#define __DCACHE_PRESENT          1U
+#endif
+
+
+#ifndef __L2CACHE_PRESENT
+#define __L2CACHE_PRESENT          1U
+#endif
+
+#include "csi_rv64_gcc.h"
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+    \defgroup CSI_glob_defs CSI Global Defines
+
+    <strong>IO Type Qualifiers</strong> are used
+    \li to specify the access to peripheral variables.
+    \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+#define     __I      volatile             /*!< Defines 'read only' permissions */
+#else
+#define     __I      volatile const       /*!< Defines 'read only' permissions */
+#endif
+#define     __O      volatile             /*!< Defines 'write only' permissions */
+#define     __IO     volatile             /*!< Defines 'read / write' permissions */
+
+/* following defines should be used for structure members */
+#define     __IM     volatile const       /*! Defines 'read only' structure member permissions */
+#define     __OM     volatile             /*! Defines 'write only' structure member permissions */
+#define     __IOM    volatile             /*! Defines 'read / write' structure member permissions */
+
+/*@} end of group C910 */
+
+/*******************************************************************************
+ *                 Register Abstraction
+  Core Register contain:
+  - Core Register
+  - Core CLINT Register
+ ******************************************************************************/
+/**
+  \defgroup CSI_core_register Defines and Type Definitions
+  \brief Type definitions and defines for CK80X processor based devices.
+*/
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CORE  Status and Control Registers
+  \brief      Core Register type definitions.
+  @{
+ */
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_CLINT Core-Local Interrupt Controller (CLINT)
+  \brief      Type definitions for the CLINT Registers
+  @{
+ */
+
+/**
+  \brief Access to the structure of a vector interrupt controller.
+ */
+
+typedef struct {
+    uint32_t RESERVED0;                 /*!< Offset: 0x000 (R/W)  CLINT configure register */
+    __IOM uint32_t PLIC_PRIO[1023];
+    __IOM uint32_t PLIC_IP[32];
+    uint32_t RESERVED1[3972 / 4 - 1];
+    __IOM uint32_t PLIC_H0_MIE[32];
+    __IOM uint32_t PLIC_H0_SIE[32];
+    __IOM uint32_t PLIC_H1_MIE[32];
+    __IOM uint32_t PLIC_H1_SIE[32];
+    __IOM uint32_t PLIC_H2_MIE[32];
+    __IOM uint32_t PLIC_H2_SIE[32];
+    __IOM uint32_t PLIC_H3_MIE[32];
+    __IOM uint32_t PLIC_H3_SIE[32];
+    uint32_t RESERVED2[(0x01FFFFC - 0x00023FC) / 4 - 1];
+    __IOM uint32_t PLIC_PER;
+    __IOM uint32_t PLIC_H0_MTH;
+    __IOM uint32_t PLIC_H0_MCLAIM;
+    uint32_t RESERVED3[0xFFC / 4 - 1];
+    __IOM uint32_t PLIC_H0_STH;
+    __IOM uint32_t PLIC_H0_SCLAIM;
+    uint32_t RESERVED4[0xFFC / 4 - 1];
+
+    __IOM uint32_t PLIC_H1_MTH;
+    __IOM uint32_t PLIC_H1_MCLAIM;
+    uint32_t RESERVED5[0xFFC / 4 - 1];
+    __IOM uint32_t PLIC_H1_STH;
+    __IOM uint32_t PLIC_H1_SCLAIM;
+    uint32_t RESERVED6[0xFFC / 4 - 1];
+
+    __IOM uint32_t PLIC_H2_MTH;
+    __IOM uint32_t PLIC_H2_MCLAIM;
+    uint32_t RESERVED7[0xFFC / 4 - 1];
+    __IOM uint32_t PLIC_H2_STH;
+    __IOM uint32_t PLIC_H2_SCLAIM;
+    uint32_t RESERVED8[0xFFC / 4 - 1];
+
+    __IOM uint32_t PLIC_H3_MTH;
+    __IOM uint32_t PLIC_H3_MCLAIM;
+    uint32_t RESERVED9[0xFFC / 4 - 1];
+    __IOM uint32_t PLIC_H3_STH;
+    __IOM uint32_t PLIC_H3_SCLAIM;
+    uint32_t RESERVED10[0xFFC / 4 - 1];
+} PLIC_Type;
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_PMP Physical Memory Protection (PMP)
+  \brief      Type definitions for the PMP Registers
+  @{
+ */
+
+#define PMP_PMPCFG_R_Pos                       0U                                    /*!< PMP PMPCFG: R Position */
+#define PMP_PMPCFG_R_Msk                       (0x1UL << PMP_PMPCFG_R_Pos)           /*!< PMP PMPCFG: R Mask */
+
+#define PMP_PMPCFG_W_Pos                       1U                                    /*!< PMP PMPCFG: W Position */
+#define PMP_PMPCFG_W_Msk                       (0x1UL << PMP_PMPCFG_W_Pos)           /*!< PMP PMPCFG: W Mask */
+
+#define PMP_PMPCFG_X_Pos                       2U                                    /*!< PMP PMPCFG: X Position */
+#define PMP_PMPCFG_X_Msk                       (0x1UL << PMP_PMPCFG_X_Pos)           /*!< PMP PMPCFG: X Mask */
+
+#define PMP_PMPCFG_A_Pos                       3U                                    /*!< PMP PMPCFG: A Position */
+#define PMP_PMPCFG_A_Msk                       (0x3UL << PMP_PMPCFG_A_Pos)           /*!< PMP PMPCFG: A Mask */
+
+#define PMP_PMPCFG_L_Pos                       7U                                    /*!< PMP PMPCFG: L Position */
+#define PMP_PMPCFG_L_Msk                       (0x1UL << PMP_PMPCFG_L_Pos)           /*!< PMP PMPCFG: L Mask */
+
+typedef enum {
+    REGION_SIZE_4B       = -1,
+    REGION_SIZE_8B       = 0,
+    REGION_SIZE_16B      = 1,
+    REGION_SIZE_32B      = 2,
+    REGION_SIZE_64B      = 3,
+    REGION_SIZE_128B     = 4,
+    REGION_SIZE_256B     = 5,
+    REGION_SIZE_512B     = 6,
+    REGION_SIZE_1KB      = 7,
+    REGION_SIZE_2KB      = 8,
+    REGION_SIZE_4KB      = 9,
+    REGION_SIZE_8KB      = 10,
+    REGION_SIZE_16KB     = 11,
+    REGION_SIZE_32KB     = 12,
+    REGION_SIZE_64KB     = 13,
+    REGION_SIZE_128KB    = 14,
+    REGION_SIZE_256KB    = 15,
+    REGION_SIZE_512KB    = 16,
+    REGION_SIZE_1MB      = 17,
+    REGION_SIZE_2MB      = 18,
+    REGION_SIZE_4MB      = 19,
+    REGION_SIZE_8MB      = 20,
+    REGION_SIZE_16MB     = 21,
+    REGION_SIZE_32MB     = 22,
+    REGION_SIZE_64MB     = 23,
+    REGION_SIZE_128MB    = 24,
+    REGION_SIZE_256MB    = 25,
+    REGION_SIZE_512MB    = 26,
+    REGION_SIZE_1GB      = 27,
+    REGION_SIZE_2GB      = 28,
+    REGION_SIZE_4GB      = 29,
+    REGION_SIZE_8GB      = 30,
+    REGION_SIZE_16GB     = 31
+} region_size_e;
+
+typedef enum {
+    ADDRESS_MATCHING_TOR   = 1,
+    ADDRESS_MATCHING_NAPOT = 3
+} address_matching_e;
+
+typedef struct {
+    uint32_t r: 1;           /* readable enable */
+    uint32_t w: 1;           /* writeable enable */
+    uint32_t x: 1;           /* execable enable */
+    address_matching_e a: 2; /* address matching mode */
+    uint32_t reserved: 2;    /* reserved */
+    uint32_t l: 1;           /* lock enable */
+} mpu_region_attr_t;
+
+/*@} end of group CSI_PMP */
+
+/* CACHE Register Definitions */
+#define CACHE_MHCR_L0BTB_Pos                   12U                                           /*!< CACHE MHCR: L0BTB Position */
+#define CACHE_MHCR_L0BTB_Msk                   (0x1UL << CACHE_MHCR_L0BTB_Pos)               /*!< CACHE MHCR: WA Mask */
+
+#define CACHE_MHCR_WBR_Pos                     8U                                            /*!< CACHE MHCR: WBR Position */
+#define CACHE_MHCR_WBR_Msk                     (0x1UL << CACHE_MHCR_WBR_Pos)                 /*!< CACHE MHCR: WBR Mask */
+
+#define CACHE_MHCR_IBPE_Pos                    7U                                            /*!< CACHE MHCR: IBPE Position */
+#define CACHE_MHCR_IBPE_Msk                    (0x1UL << CACHE_MHCR_IBPE_Pos)                /*!< CACHE MHCR: IBPE Mask */
+
+#define CACHE_MHCR_BTB_Pos                     6U                                            /*!< CACHE MHCR: BTB Position */
+#define CACHE_MHCR_BTB_Msk                     (0x1UL << CACHE_MHCR_BTB_Pos)                 /*!< CACHE MHCR: BTB Mask */
+
+#define CACHE_MHCR_BPE_Pos                     5U                                            /*!< CACHE MHCR: BPE Position */
+#define CACHE_MHCR_BPE_Msk                     (0x1UL << CACHE_MHCR_BPE_Pos)                 /*!< CACHE MHCR: BPE Mask */
+
+#define CACHE_MHCR_RS_Pos                      4U                                            /*!< CACHE MHCR: RS Position */
+#define CACHE_MHCR_RS_Msk                      (0x1UL << CACHE_MHCR_RS_Pos)                  /*!< CACHE MHCR: RS Mask */
+
+#define CACHE_MHCR_WB_Pos                      3U                                            /*!< CACHE MHCR: WB Position */
+#define CACHE_MHCR_WB_Msk                      (0x1UL << CACHE_MHCR_WB_Pos)                  /*!< CACHE MHCR: WB Mask */
+
+#define CACHE_MHCR_WA_Pos                      2U                                            /*!< CACHE MHCR: WA Position */
+#define CACHE_MHCR_WA_Msk                      (0x1UL << CACHE_MHCR_WA_Pos)                  /*!< CACHE MHCR: WA Mask */
+
+#define CACHE_MHCR_DE_Pos                      1U                                            /*!< CACHE MHCR: DE Position */
+#define CACHE_MHCR_DE_Msk                      (0x1UL << CACHE_MHCR_DE_Pos)                  /*!< CACHE MHCR: DE Mask */
+
+#define CACHE_MHCR_IE_Pos                      0U                                            /*!< CACHE MHCR: IE Position */
+#define CACHE_MHCR_IE_Msk                      (0x1UL << CACHE_MHCR_IE_Pos)                  /*!< CACHE MHCR: IE Mask */
+
+#define CACHE_INV_ADDR_Pos                     5U
+#define CACHE_INV_ADDR_Msk                     (0xFFFFFFFFUL << CACHE_INV_ADDR_Pos)
+
+/*@} end of group CSI_CACHE */
+
+// MSTATUS Register
+#define MSTATUS_MPP_MASK (3L << 11)     // mstatus.SPP                      [11:12]
+#define MSTATUS_MPP_M    (3L << 11)     // Machine mode                     11
+#define MSTATUS_MPP_S    (1L << 11)     // Supervisor mode                  01
+#define MSTATUS_MPP_U    (0L << 11)     // User mode                        00
+
+// SSTATUS Register
+#define SSTATUS_SPP_MASK (3L << 8)      // sstatus.SPP                      [8:9]
+#define SSTATUS_SPP_S    (1L << 8)      // Supervisor mode                  01
+#define SSTATUS_SPP_U    (0L << 8)      // User mode                        00
+
+typedef enum {
+    USER_MODE        = 0,
+    SUPERVISOR_MODE  = 1,
+    MACHINE_MODE     = 3,
+} cpu_work_mode_t;
+
+/**
+  \ingroup  CSI_core_register
+  \defgroup CSI_SysTick     System Tick Timer (CORET)
+  \brief    Type definitions for the System Timer Registers.
+  @{
+ */
+
+/**
+  \brief  The data structure of the access system timer.
+ */
+typedef struct {
+    __IOM uint32_t MSIP0;
+    __IOM uint32_t MSIP1;
+    __IOM uint32_t MSIP2;
+    __IOM uint32_t MSIP3;
+    uint32_t RESERVED0[(0x4004000 - 0x400000C) / 4 - 1];
+    __IOM uint32_t MTIMECMPL0;
+    __IOM uint32_t MTIMECMPH0;
+    __IOM uint32_t MTIMECMPL1;
+    __IOM uint32_t MTIMECMPH1;
+    __IOM uint32_t MTIMECMPL2;
+    __IOM uint32_t MTIMECMPH2;
+    __IOM uint32_t MTIMECMPL3;
+    __IOM uint32_t MTIMECMPH3;
+    uint32_t RESERVED1[(0x400C000 - 0x400401C) / 4 - 1];
+    __IOM uint32_t SSIP0;
+    __IOM uint32_t SSIP1;
+    __IOM uint32_t SSIP2;
+    __IOM uint32_t SSIP3;
+    uint32_t RESERVED2[(0x400D000 - 0x400C00C) / 4 - 1];
+    __IOM uint32_t STIMECMPL0;
+    __IOM uint32_t STIMECMPH0;
+    __IOM uint32_t STIMECMPL1;
+    __IOM uint32_t STIMECMPH1;
+    __IOM uint32_t STIMECMPL2;
+    __IOM uint32_t STIMECMPH2;
+    __IOM uint32_t STIMECMPL3;
+    __IOM uint32_t STIMECMPH3;
+} CORET_Type;
+/*@} end of group CSI_SysTick */
+
+
+/**
+  \ingroup    CSI_core_register
+  \defgroup   CSI_core_bitfield     Core register bit field macros
+  \brief      Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
+  @{
+ */
+
+/**
+  \brief   Mask and shift a bit field value for use in a register bit range.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of the bit field.
+  \return           Masked and shifted value.
+*/
+#define _VAL2FLD(field, value)    ((value << field ## _Pos) & field ## _Msk)
+
+/**
+  \brief     Mask and shift a register value to extract a bit filed value.
+  \param[in] field  Name of the register bit field.
+  \param[in] value  Value of register.
+  \return           Masked and shifted bit field value.
+*/
+#define _FLD2VAL(field, value)    ((value & field ## _Msk) >> field ## _Pos)
+
+/*@} end of group CSI_core_bitfield */
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+  Core Function Interface contains:
+  - Core VIC Functions
+  - Core CORET Functions
+  - Core Register Access Functions
+ ******************************************************************************/
+/**
+  \defgroup CSI_Core_FunctionInterface Functions and Instructions Reference
+*/
+
+/* ##########################   VIC functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_VICFunctions VIC Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/* The following MACROS handle generation of the register offset and byte masks */
+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)
+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    5UL)      )
+#define _IP2_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )
+
+/**
+  \brief   Enable External Interrupt
+  \details Enable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Disable External Interrupt
+  \details Disable a device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Enable External Secure Interrupt
+  \details Enable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_enable_sirq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Disable External Secure Interrupt
+  \details Disable a secure device-specific interrupt in the VIC interrupt controller.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_disable_sirq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Check Interrupt is Enabled or not
+  \details Read the enabled register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not enabled.
+  \return             1  Interrupt status is enabled.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_enabled_irq(int32_t IRQn)
+{
+    return 0;
+}
+
+/**
+  \brief   Check Interrupt is Pending or not
+  \details Read the pending register in the VIC and returns the pending bit for the specified interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \return             0  Interrupt status is not pending.
+  \return             1  Interrupt status is pending.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_pending_irq(int32_t IRQn)
+{
+    return 0;
+}
+
+/**
+  \brief   Set Pending Interrupt
+  \details Set the pending bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_pending_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Clear Pending Interrupt
+  \details Clear the pending bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_pending_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Set Wake up Interrupt
+  \details Set the wake up bit of an external interrupt.
+  \param [in]      IRQn  Interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_set_wakeup_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Clear Wake up Interrupt
+  \details Clear the wake up bit of an external interrupt.
+  \param [in]      IRQn  External interrupt number. Value cannot be negative.
+ */
+__STATIC_INLINE void csi_vic_clear_wakeup_irq(int32_t IRQn)
+{
+}
+
+/**
+  \brief   Set Interrupt Priority
+  \details Set the priority of an interrupt.
+  \note    The priority cannot be set for every core interrupt.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]  priority  Priority to set.
+ */
+__STATIC_INLINE void csi_vic_set_prio(int32_t IRQn, uint32_t priority)
+{
+}
+
+/**
+  \brief   Get Interrupt Priority
+  \details Read the priority of an interrupt.
+           The interrupt number can be positive to specify an external (device specific) interrupt,
+           or negative to specify an internal (core) interrupt.
+  \param [in]   IRQn  Interrupt number.
+  \return             Interrupt Priority.
+                      Value is aligned automatically to the implemented priority bits of the microcontroller.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_prio(int32_t IRQn)
+{
+    return 0;
+}
+
+/**
+  \brief   Set interrupt handler
+  \details Set the interrupt handler according to the interrupt num, the handler will be filled in irq vectors.
+  \param [in]      IRQn  Interrupt number.
+  \param [in]   handler  Interrupt handler.
+ */
+__STATIC_INLINE void csi_vic_set_vector(int32_t IRQn, uint64_t handler)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint64_t *vectors = (uint64_t *)__get_MTVT();
+        vectors[IRQn] = handler;
+    }
+}
+
+/**
+  \brief   Get interrupt handler
+  \details Get the address of interrupt handler function.
+  \param [in]      IRQn  Interrupt number.
+ */
+__STATIC_INLINE uint32_t csi_vic_get_vector(int32_t IRQn)
+{
+    if (IRQn >= 0 && IRQn < 1024) {
+        uint64_t *vectors = (uint64_t *)__get_MTVT();
+        return (uint32_t)vectors[IRQn];
+    }
+
+    return 0;
+}
+
+/*@} end of CSI_Core_VICFunctions */
+
+/* ##########################   PMP functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_PMPFunctions PMP Functions
+  \brief    Functions that manage interrupts and exceptions via the VIC.
+  @{
+ */
+
+/**
+  \brief  configure memory protected region.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  \param [in]  base_addr  base address must be aligned with page size.
+  \param [in]  size       \ref region_size_e. memory protected region size.
+  \param [in]  attr       \ref region_size_t. memory protected region attribute.
+  \param [in]  enable     enable or disable memory protected region.
+  */
+__STATIC_INLINE void csi_mpu_config_region(uint32_t idx, uint64_t base_addr, region_size_e size,
+        mpu_region_attr_t attr, uint32_t enable)
+{
+    uint8_t  pmpxcfg = 0;
+    uint64_t addr = 0;
+
+    if (idx > 15) {
+        return;
+    }
+
+    if (!enable) {
+        attr.a = 0;
+    }
+
+    if (attr.a == ADDRESS_MATCHING_TOR) {
+        addr = base_addr >> 2;
+    } else {
+        if (size == REGION_SIZE_4B) {
+            addr = base_addr >> 2;
+            attr.a = 2;
+        } else {
+            addr = ((base_addr >> 2) & (0xFFFFFFFFFFFFFFFFU - ((1 << (size + 1)) - 1))) | ((1 << size) - 1);
+        }
+    }
+
+    __set_PMPADDRx(idx, addr);
+
+    pmpxcfg |= (attr.r << PMP_PMPCFG_R_Pos) | (attr.w << PMP_PMPCFG_W_Pos) |
+               (attr.x << PMP_PMPCFG_X_Pos) | (attr.a << PMP_PMPCFG_A_Pos) |
+               (attr.l << PMP_PMPCFG_L_Pos);
+
+    __set_PMPxCFG(idx, pmpxcfg);
+}
+
+/**
+  \brief  disable mpu region by idx.
+  \details
+  \param [in]  idx        memory protected region (0, 1, 2, ..., 15).
+  */
+__STATIC_INLINE void csi_mpu_disable_region(uint32_t idx)
+{
+    __set_PMPxCFG(idx, __get_PMPxCFG(idx) & (~PMP_PMPCFG_A_Msk));
+}
+
+/*@} end of CSI_Core_PMPFunctions */
+
+/* ##################################    SysTick function  ############################################ */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_SysTickFunctions SysTick Functions
+  \brief    Functions that configure the System.
+  @{
+ */
+
+
+/**
+  \brief   CORE timer Configuration
+  \details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
+           Counter is in free running mode to generate periodic interrupts.
+  \param [in]  ticks  Number of ticks between two interrupts.
+  \param [in]  IRQn   core timer Interrupt number.
+  \return          0  Function succeeded.
+  \return          1  Function failed.
+  \note    When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
+           function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
+           must contain a vendor-specific implementation of this function.
+ */
+__STATIC_INLINE uint32_t csi_coret_config(uint32_t ticks, int32_t IRQn)
+{
+    return (0UL);
+}
+
+
+
+
+/**
+  \brief   get CORE timer reload value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint64_t csi_coret_get_load(void)
+{
+    return 0;
+}
+
+/**
+  \brief   get CORE timer reload high value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_loadh(void)
+{
+    return 0;
+}
+
+/**
+  \brief   get CORE timer counter value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint64_t csi_coret_get_value(void)
+{
+    uint64_t result;
+    __ASM volatile("csrr %0, 0xc01" : "=r"(result));
+    return result;
+}
+
+/**
+  \brief   get CORE timer counter high value
+  \return          CORE timer counter value.
+ */
+__STATIC_INLINE uint32_t csi_coret_get_valueh(void)
+{
+    uint64_t result;
+    __ASM volatile("csrr %0, time" : "=r"(result));
+    return (result >> 32) & 0xFFFFFFFF;
+}
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##########################  Cache functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_CacheFunctions Cache Functions
+  \brief    Functions that configure Instruction and Data cache.
+  @{
+ */
+
+/**
+  \brief   Enable I-Cache
+  \details Turns on I-Cache
+  */
+__STATIC_INLINE void csi_icache_enable(void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    __ICACHE_IALL();
+    cache = __get_MHCR();
+    cache |= CACHE_MHCR_IE_Msk;
+    __set_MHCR(cache);
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Disable I-Cache
+  \details Turns off I-Cache
+  */
+__STATIC_INLINE void csi_icache_disable(void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    cache = __get_MHCR();
+    cache &= ~CACHE_MHCR_IE_Msk;            /* disable icache */
+    __set_MHCR(cache);
+    __ICACHE_IALL();                        /* invalidate all icache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate I-Cache
+  \details Invalidates I-Cache
+  */
+__STATIC_INLINE void csi_icache_invalid(void)
+{
+#if (__ICACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __ICACHE_IALL();                        /* invalidate all icache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Enable D-Cache
+  \details Turns on D-Cache
+  \note    I-Cache also turns on.
+  */
+__STATIC_INLINE void csi_dcache_enable(void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    __DCACHE_IALL();                        /* invalidate all dcache */
+    cache = __get_MHCR();
+    cache |= (CACHE_MHCR_DE_Msk | CACHE_MHCR_WB_Msk | CACHE_MHCR_WA_Msk | CACHE_MHCR_RS_Msk | CACHE_MHCR_BPE_Msk | CACHE_MHCR_BTB_Msk | CACHE_MHCR_IBPE_Msk | CACHE_MHCR_WBR_Msk | CACHE_MHCR_L0BTB_Msk);      /* enable all Cache */
+    __set_MHCR(cache);
+
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Disable D-Cache
+  \details Turns off D-Cache
+  \note    I-Cache also turns off.
+  */
+__STATIC_INLINE void csi_dcache_disable(void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    uint32_t cache;
+    __DSB();
+    __ISB();
+    cache = __get_MHCR();
+    cache &= ~(uint32_t)CACHE_MHCR_DE_Msk; /* disable all Cache */
+    __set_MHCR(cache);
+    __DCACHE_IALL();                             /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+/**
+  \brief   Invalidate D-Cache
+  \details Invalidates D-Cache
+  \note    I-Cache also invalid
+  */
+__STATIC_INLINE void csi_dcache_invalid(void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_IALL();                            /* invalidate all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean D-Cache
+  \details Cleans D-Cache
+  \note    I-Cache also cleans
+  */
+__STATIC_INLINE void csi_dcache_clean(void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_CALL();                                     /* clean all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate D-Cache
+  \details Cleans and Invalidates D-Cache
+  \note    I-Cache also flush.
+  */
+__STATIC_INLINE void csi_dcache_clean_invalid(void)
+{
+#if (__DCACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __DCACHE_CIALL();                                   /* clean and inv all Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Invalidate L2-Cache
+  \details Invalidates L2-Cache
+  \note
+  */
+__STATIC_INLINE void csi_l2cache_invalid(void)
+{
+#if (__L2CACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __L2CACHE_IALL();                            /* invalidate l2 Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean L2-Cache
+  \details Cleans L2-Cache
+  \note
+  */
+__STATIC_INLINE void csi_l2cache_clean(void)
+{
+#if (__L2CACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __L2CACHE_CALL();                                     /* clean l2 Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   Clean & Invalidate L2-Cache
+  \details Cleans and Invalidates L2-Cache
+  \note
+  */
+__STATIC_INLINE void csi_l2cache_clean_invalid(void)
+{
+#if (__L2CACHE_PRESENT == 1U)
+    __DSB();
+    __ISB();
+    __L2CACHE_CIALL();                                   /* clean and inv l2 Cache */
+    __DSB();
+    __ISB();
+#endif
+}
+
+/**
+  \brief   D-Cache Invalidate by address
+  \details Invalidates D-Cache for the given address
+  \param[in]   addr    address (aligned to 64-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_invalid_range(uint64_t *addr, int64_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int64_t op_size = dsize + (uint64_t)addr % 64;
+    uint64_t op_addr = (uint64_t)addr;
+    int64_t linesize = 64;
+    cpu_work_mode_t cpu_work_mode;
+    cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
+
+    __DSB();
+
+    if (cpu_work_mode == MACHINE_MODE) {
+        while (op_size > 0) {
+            __DCACHE_IPA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    } else if (cpu_work_mode == SUPERVISOR_MODE) {
+        while (op_size > 0) {
+            __DCACHE_IVA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    }
+
+    __SYNC_IS();
+    __DSB();
+    __ISB();
+#endif
+}
+
+
+/**
+  \brief   D-Cache Clean by address
+  \details Cleans D-Cache for the given address
+  \param[in]   addr    address (aligned to 64-byte boundary)
+  \param[in]   dsize   size of memory block (in number of bytes)
+*/
+__STATIC_INLINE void csi_dcache_clean_range(uint64_t *addr, int64_t dsize)
+{
+
+#if (__DCACHE_PRESENT == 1)
+    int64_t op_size = dsize + (uint64_t)addr % 64;
+    uint64_t op_addr = (uint64_t) addr & CACHE_INV_ADDR_Msk;
+    int64_t linesize = 64;
+    cpu_work_mode_t cpu_work_mode;
+    cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
+
+    __DSB();
+
+    if (cpu_work_mode == MACHINE_MODE) {
+        while (op_size > 0) {
+            __DCACHE_CPA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    } else if (cpu_work_mode == SUPERVISOR_MODE) {
+        while (op_size > 0) {
+            __DCACHE_CVA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    }
+
+    __SYNC_IS();
+    __DSB();
+    __ISB();
+#endif
+
+}
+
+
+/**
+  \brief   D-Cache Clean and Invalidate by address
+  \details Cleans and invalidates D_Cache for the given address
+  \param[in]   addr    address (aligned to 64-byte boundary)
+  \param[in]   dsize   size of memory block (aligned to 64-byte boundary)
+*/
+__STATIC_INLINE void csi_dcache_clean_invalid_range(uint64_t *addr, int64_t dsize)
+{
+#if (__DCACHE_PRESENT == 1U)
+    int64_t op_size = dsize + (uint64_t)addr % 64;
+    uint64_t op_addr = (uint64_t) addr;
+    int64_t linesize = 64;
+    cpu_work_mode_t cpu_work_mode;
+    cpu_work_mode = (cpu_work_mode_t)__get_CPU_WORK_MODE();
+
+    __DSB();
+
+    if (cpu_work_mode == MACHINE_MODE) {
+        while (op_size > 0) {
+            __DCACHE_CIPA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    } else if (cpu_work_mode == SUPERVISOR_MODE) {
+        while (op_size > 0) {
+            __DCACHE_CIVA(op_addr);
+            op_addr += linesize;
+            op_size -= linesize;
+        }
+    }
+
+    __SYNC_IS();
+    __DSB();
+    __ISB();
+#endif
+}
+
+/**
+  \brief   setup cacheable range Cache
+  \details setup Cache range
+  */
+__STATIC_INLINE void csi_cache_set_range(uint32_t index, uint32_t baseAddr, uint32_t size, uint32_t enable)
+{
+    ;
+}
+
+/**
+  \brief   Enable cache profile
+  \details Turns on Cache profile
+  */
+__STATIC_INLINE void csi_cache_enable_profile(void)
+{
+    ;
+}
+
+/**
+  \brief   Disable cache profile
+  \details Turns off Cache profile
+  */
+__STATIC_INLINE void csi_cache_disable_profile(void)
+{
+    ;
+}
+
+/**
+  \brief   Reset cache profile
+  \details Reset Cache profile
+  */
+__STATIC_INLINE void csi_cache_reset_profile(void)
+{
+    ;
+}
+
+/**
+  \brief   cache access times
+  \details Cache access times
+  \note    every 256 access add 1.
+  \return          cache access times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_access_time(void)
+{
+    return 0;
+}
+
+/**
+  \brief   cache miss times
+  \details Cache miss times
+  \note    every 256 miss add 1.
+  \return          cache miss times, actual times should be multiplied by 256
+  */
+__STATIC_INLINE uint32_t csi_cache_get_miss_time(void)
+{
+    return 0;
+}
+
+/*@} end of CSI_Core_CacheFunctions */
+
+/*@} end of CSI_core_DebugFunctions */
+
+/* ##################################    IRQ Functions  ############################################ */
+
+/**
+  \brief   Save the Irq context
+  \details save the psr result before disable irq.
+ */
+__STATIC_INLINE uint32_t csi_irq_save(void)
+{
+    uint32_t result;
+#ifdef CONFIG_MMU
+    result = __get_SSTATUS();
+    __disable_supervisor_irq();
+#else
+    result = __get_MSTATUS();
+    __disable_irq();
+#endif
+    return (result);
+}
+
+/**
+  \brief   Restore the Irq context
+  \details restore saved primask state.
+  \param [in]      irq_state  psr irq state.
+ */
+__STATIC_INLINE void csi_irq_restore(uint32_t irq_state)
+{
+#ifdef CONFIG_MMU
+    __set_SSTATUS(irq_state);
+#else
+    __set_MSTATUS(irq_state);
+#endif
+}
+
+/*@} end of IRQ Functions */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CORE_RV32_H_DEPENDANT */
+
+#endif /* __CSI_GENERIC */

+ 3279 - 0
lib/sec_library/include/core/csi_gcc.h

@@ -0,0 +1,3279 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     csi_gcc.h
+ * @brief    CSI Header File for GCC.
+ * @version  V1.0
+ * @date     02. June 2020
+ ******************************************************************************/
+
+#ifndef _CSI_GCC_H_
+#define _CSI_GCC_H_
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#ifndef __ASM
+#define __ASM                   __asm    /*!< asm keyword for GNU Compiler */
+#endif
+
+#ifndef __INLINE
+#define __INLINE                inline   /*!< inline keyword for GNU Compiler */
+#endif
+
+#ifndef __ALWAYS_STATIC_INLINE
+#define __ALWAYS_STATIC_INLINE  __attribute__((always_inline)) static inline
+#endif
+
+#ifndef __STATIC_INLINE
+#define __STATIC_INLINE         static inline
+#endif
+
+#ifndef __NO_RETURN
+#define __NO_RETURN             __attribute__((__noreturn__))
+#endif
+
+#ifndef __USED
+#define __USED                  __attribute__((used))
+#endif
+
+#ifndef __WEAK
+#define __WEAK                  __attribute__((weak))
+#endif
+
+#ifndef __PACKED
+#define __PACKED                __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_STRUCT
+#define __PACKED_STRUCT         struct __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_UNION
+#define __PACKED_UNION          union __attribute__((packed, aligned(1)))
+#endif
+
+
+/* ###########################  Core Function Access  ########################### */
+/** \ingroup  CSI_Core_FunctionInterface
+    \defgroup CSI_Core_RegAccFunctions CSI Core Register Access Functions
+  @{
+ */
+/**
+  \brief   Enable IRQ Interrupts
+  \details Enables IRQ interrupts by setting the IE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_irq(void)
+{
+    __ASM volatile("psrset ie");
+}
+
+
+
+/**
+  \brief   Disable IRQ Interrupts
+  \details Disables IRQ interrupts by clearing the IE-bit in the PSR.
+  Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_irq(void)
+{
+    __ASM volatile("psrclr ie");
+}
+
+/**
+  \brief   Get PSR
+  \details Returns the content of the PSR Register.
+  \return               PSR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PSR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, psr" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set PSR
+  \details Writes the given value to the PSR Register.
+  \param [in]    psr  PSR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PSR(uint32_t psr)
+{
+    __ASM volatile("mtcr %0, psr" : : "r"(psr));
+}
+
+/**
+  \brief   Get SP
+  \details Returns the content of the SP Register.
+  \return               SP Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_SP(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mov %0, sp" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set SP
+  \details Writes the given value to the SP Register.
+  \param [in]    sp  SP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_SP(uint32_t sp)
+{
+    __ASM volatile("mov sp, %0" : : "r"(sp): "sp");
+}
+
+/**
+  \brief   Get Int SP
+  \details Returns the content of the Int SP Register.
+  \return               Int SP Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_Int_SP(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<15, 1>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set Int SP
+  \details Writes the given value to the Int SP Register.
+  \param [in]    sp  Int SP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_Int_SP(uint32_t sp)
+{
+    __ASM volatile("mtcr %0, cr<15, 1>" : : "r"(sp));
+}
+
+/**
+  \brief   Get VBR Register
+  \details Returns the content of the VBR Register.
+  \return               VBR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_VBR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, vbr" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set VBR
+  \details Writes the given value to the VBR Register.
+  \param [in]    vbr  VBR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_VBR(uint32_t vbr)
+{
+    __ASM volatile("mtcr %0, vbr" : : "r"(vbr));
+}
+
+/**
+  \brief   Get EPC Register
+  \details Returns the content of the EPC Register.
+  \return               EPC Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_EPC(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, epc" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set EPC
+  \details Writes the given value to the EPC Register.
+  \param [in]    epc  EPC Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_EPC(uint32_t epc)
+{
+    __ASM volatile("mtcr %0, epc" : : "r"(epc));
+}
+
+/**
+  \brief   Get EPSR
+  \details Returns the content of the EPSR Register.
+  \return               EPSR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_EPSR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, epsr" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set EPSR
+  \details Writes the given value to the EPSR Register.
+  \param [in]    epsr  EPSR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_EPSR(uint32_t epsr)
+{
+    __ASM volatile("mtcr %0, epsr" : : "r"(epsr));
+}
+
+/**
+  \brief   Get CPUID Register
+  \details Returns the content of the CPUID Register.
+  \return               CPUID Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CPUID(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr13" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<13, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Get CCR
+  \details Returns the current value of the CCR.
+  \return               CCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CCR(void)
+{
+    register uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr18\n"  : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<18, 0>\n"  : "=r"(result));
+#endif
+    return (result);
+}
+
+
+/**
+  \brief   Set CCR
+  \details Assigns the given value to the CCR.
+  \param [in]    ccr  CCR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CCR(uint32_t ccr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr18\n" : : "r"(ccr));
+#else
+    __ASM volatile("mtcr %0, cr<18, 0>\n" : : "r"(ccr));
+#endif
+}
+
+/**
+  \brief   Get CCR2
+  \details Returns the current value of the CCR2.
+  \return               CCR2 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CCR2(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<31, 0>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CCR2
+  \details Assigns the given value to the CCR2.
+  \param [in]    ccr2  CCR2 value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CCR2(uint32_t ccr2)
+{
+    __ASM volatile("mtcr %0, cr<31, 0>\n" : : "r"(ccr2));
+}
+
+/**
+  \brief   Get DCSR
+  \details Returns the content of the DCSR Register.
+  \return               DCSR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_DCSR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr14" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<14, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+
+/**
+  \brief   Set DCSR
+  \details Writes the given value to the DCSR Register.
+  \param [in]    dcsr  DCSR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_DCSR(uint32_t dcsr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr14" : : "r"(dcsr));
+#else
+    __ASM volatile("mtcr %0, cr<14, 0>" : : "r"(dcsr));
+#endif
+}
+
+
+/**
+  \brief   Get CFR
+  \details Returns the content of the CFR Register.
+  \return               CFR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CFR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr17" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<17, 0>" : "=r"(result));
+#endif
+
+    return (result);
+}
+
+
+/**
+  \brief   Set CFR
+  \details Writes the given value to the CFR Register.
+  \param [in]    cfr  CFR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CFR(uint32_t cfr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr17" : : "r"(cfr));
+#else
+    __ASM volatile("mtcr %0, cr<17, 0>" : : "r"(cfr));
+#endif
+}
+
+
+/**
+  \brief   Get CIR
+  \details Returns the content of the CIR Register.
+  \return               CIR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CIR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr22" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<22, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+
+/**
+  \brief   Set CIR
+  \details Writes the given value to the CIR Register.
+  \param [in]    cir  CIR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CIR(uint32_t cir)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr22" : : "r"(cir));
+#else
+    __ASM volatile("mtcr %0, cr<22, 0>" : : "r"(cir));
+#endif
+}
+
+/**
+  \brief   Get ERRLC
+  \details Returns the current value of the ERRLC.
+  \return               ERRLC Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ERRLC(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<6, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ERRLC
+  \details Assigns the given value to the ERRLC.
+  \param [in]    errlc  ERRLC value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ERRLC(uint32_t errlc)
+{
+    __ASM volatile("mtcr %0, cr<6, 1>\n" : : "r"(errlc));
+}
+
+/**
+  \brief   Get ERRADDR
+  \details Returns the current value of the ERRADDR.
+  \return               ERRADDR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ERRADDR(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<7, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ERRADDR
+  \details Assigns the given value to the ERRADDR.
+  \param [in]    erraddr  ERRADDR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ERRADDR(uint32_t erraddr)
+{
+    __ASM volatile("mtcr %0, cr<7, 1>\n" : : "r"(erraddr));
+}
+
+/**
+  \brief   Get ERRSTS
+  \details Returns the current value of the ERRSTS.
+  \return               ERRSTS Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ERRSTS(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<8, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ERRSTS
+  \details Assigns the given value to the ERRSTS.
+  \param [in]    errsts  ERRSTS value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ERRSTS(uint32_t errsts)
+{
+    __ASM volatile("mtcr %0, cr<8, 1>\n" : : "r"(errsts));
+}
+
+/**
+  \brief   Get ERRINJCR
+  \details Returns the current value of the ERRINJCR.
+  \return               ERRINJCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ERRINJCR(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<9, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ERRINJCR
+  \details Assigns the given value to the ERRINJCR.
+  \param [in]    errinjcr  ERRINJCR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ERRINJCR(uint32_t errinjcr)
+{
+    __ASM volatile("mtcr %0, cr<9, 1>\n" : : "r"(errinjcr));
+}
+
+/**
+  \brief   Get ERRINJCNT
+  \details Returns the current value of the ERRINJCNT.
+  \return               ERRINJCNT Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ERRINJCNT(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<10, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ERRINJCNT
+  \details Assigns the given value to the ERRINJCNT.
+  \param [in]    errinjcnt  ERRINJCNT value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ERRINJCNT(uint32_t errinjcnt)
+{
+    __ASM volatile("mtcr %0, cr<10, 1>\n" : : "r"(errinjcnt));
+}
+
+/**
+  \brief   Get ITCMCR
+  \details Returns the content of the ITCMCR Register.
+  \return               ITCMCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ITCMCR(void)
+{
+    uint32_t result;
+    __ASM volatile("mfcr %0, cr<22, 1>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set ITCMCR
+  \details Writes the given value to the ITCMCR Register.
+  \param [in]    itcmcr  ITCMCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ITCMCR(uint32_t itcmcr)
+{
+    __ASM volatile("mtcr %0, cr<22, 1>" : : "r"(itcmcr));
+}
+
+/**
+  \brief   Get DTCMCR
+  \details Returns the content of the DTCMCR Register.
+  \return               DTCMCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_DTCMCR(void)
+{
+    uint32_t result;
+    __ASM volatile("mfcr %0, cr<23, 1>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set DTCMCR
+  \details Writes the given value to the DTCMCR Register.
+  \param [in]    dtcmcr  DTCMCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_DTCMCR(uint32_t dtcmcr)
+{
+    __ASM volatile("mtcr %0, cr<23, 1>" : : "r"(dtcmcr));
+}
+
+/**
+  \brief   Get CINDEX
+  \details Returns the current value of the CINDEX.
+  \return               CINDEX Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CINDEX(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<26, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CINDEX
+  \details Assigns the given value to the CINDEX.
+  \param [in]    cindex  CINDEX value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CINDEX(uint32_t cindex)
+{
+    __ASM volatile("mtcr %0, cr<26, 1>\n" : : "r"(cindex));
+}
+
+/**
+  \brief   Get CDATAx
+  \details Returns the current value of the CDATAx.
+  \return               CDATAx Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CDATA0(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<27, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_CDATA1(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<28, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_CDATA2(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<29, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CDATAx
+  \details Assigns the given value to the CDATAx.
+  \param [in]    cdata  CDATAx value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CDATA0(uint32_t cdata)
+{
+    __ASM volatile("mtcr %0, cr<27, 1>\n" : : "r"(cdata));
+}
+
+__ALWAYS_STATIC_INLINE void __set_CDATA1(uint32_t cdata)
+{
+    __ASM volatile("mtcr %0, cr<28, 1>\n" : : "r"(cdata));
+}
+
+__ALWAYS_STATIC_INLINE void __set_CDATA2(uint32_t cdata)
+{
+    __ASM volatile("mtcr %0, cr<29, 1>\n" : : "r"(cdata));
+}
+
+/**
+  \brief   Get CINS
+  \details Returns the current value of the CINS.
+  \return               CINS Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CINS(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<31, 1>\n"  : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CINS
+  \details Assigns the given value to the CINS.
+  \param [in]    cins  CINS value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CINS(uint32_t cins)
+{
+    __ASM volatile("mtcr %0, cr<31, 1>\n" : : "r"(cins));
+}
+
+/**
+  \brief   Get CAPR
+  \details Returns the current value of the CAPR.
+  \return               CAPR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CAPR(void)
+{
+    register uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr19\n" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<19, 0>\n" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set CAPR
+  \details Assigns the given value to the CAPR.
+  \param [in]    capr  CAPR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CAPR(uint32_t capr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr19\n" : : "r"(capr));
+#else
+    __ASM volatile("mtcr %0, cr<19, 0>\n" : : "r"(capr));
+#endif
+}
+
+/**
+  \brief   Get CAPR1
+  \details Returns the current value of the CAPR1.
+  \return               CAPR1 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CAPR1(void)
+{
+    register uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<16, 0>\n" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CAPR1
+  \details Assigns the given value to the CAPR1.
+  \param [in]    capr1  CAPR1 value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CAPR1(uint32_t capr1)
+{
+    __ASM volatile("mtcr %0, cr<16, 0>\n" : : "r"(capr1));
+}
+
+/**
+  \brief   Set PACR
+  \details Assigns the given value to the PACR.
+
+    \param [in]    pacr  PACR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PACR(uint32_t pacr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr20\n" : : "r"(pacr));
+#else
+    __ASM volatile("mtcr %0, cr<20, 0>\n" : : "r"(pacr));
+#endif
+}
+
+
+/**
+  \brief   Get PACR
+  \details Returns the current value of PACR.
+  \return               PACR value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PACR(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr20" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<20, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set PRSR
+  \details Assigns the given value to the PRSR.
+
+    \param [in]    prsr  PRSR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PRSR(uint32_t prsr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr21\n" : : "r"(prsr));
+#else
+    __ASM volatile("mtcr %0, cr<21, 0>\n" : : "r"(prsr));
+#endif
+}
+
+/**
+  \brief   Get PRSR
+  \details Returns the current value of PRSR.
+  \return               PRSR value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PRSR(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr21" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<21, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set ATTR0
+  \details Assigns the given value to the ATTR0.
+
+    \param [in]    attr0  ATTR0 value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ATTR0(uint32_t attr0)
+{
+    __ASM volatile("mtcr %0, cr<26, 0>\n" : : "r"(attr0));
+}
+
+/**
+  \brief   Get ATTR0
+  \details Returns the current value of ATTR0.
+  \return               ATTR0 value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ATTR0(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<26, 0>" : "=r"(result));
+
+    return (result);
+}
+
+/**
+  \brief   Set ATTR1
+  \details Assigns the given value to the ATTR1.
+
+    \param [in]    attr0  ATTR1 value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_ATTR1(uint32_t attr1)
+{
+    __ASM volatile("mtcr %0, cr<27, 0>\n" : : "r"(attr1));
+}
+
+/**
+  \brief   Get ATTR1
+  \details Returns the current value of ATTR1.
+  \return               ATTR1 value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_ATTR1(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<27, 0>" : "=r"(result));
+
+    return (result);
+}
+
+/**
+  \brief   Get user sp
+  \details Returns the current value of user r14.
+  \return               UR14 value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_UR14(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mov %0, sp" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<14, 1>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set UR14
+  \details Assigns the given value to the UR14.
+  \param [in]    ur14  UR14 value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_UR14(uint32_t ur14)
+{
+#ifdef __CK610
+    __ASM volatile("mov sp, %0" : "=r"(ur14));
+#else
+    __ASM volatile("mtcr %0, cr<14, 1>\n" : : "r"(ur14));
+#endif
+}
+
+/**
+  \brief   Get CHR Register
+  \details Returns the content of the CHR Register.
+  \return               CHR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_CHR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<31, 0>\n" :"=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set CHR
+  \details Assigns the given value to the CHR.
+  \param [in]    chr  CHR value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_CHR(uint32_t chr)
+{
+    __ASM volatile("mtcr %0, cr<31, 0>\n" : : "r"(chr));
+}
+
+/**
+  \brief   Get HINT
+  \details Returns the content of the HINT Register.
+  \return               HINT Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_HINT(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr<30, 0>" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<31, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set HINT
+  \details Writes the given value to the HINT Register.
+  \param [in]    hint  HINT Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_HINT(uint32_t hint)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr<30, 0>" : "=r"(hint));
+#else
+    __ASM volatile("mtcr %0, cr<31, 0>" : : "r"(hint));
+#endif
+}
+
+/**
+  \brief   Get MIR
+  \details Returns the content of the MIR Register.
+  \return               MIR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MIR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr0" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<0, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MIR
+  \details Writes the given value to the MIR Register.
+  \param [in]    mir  MIR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MIR(uint32_t mir)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr0" : : "b"(mir));
+#else
+    __ASM volatile("mtcr %0, cr<0, 15>" : : "r"(mir));
+#endif
+}
+
+
+/**
+  \brief   Get MEL0
+  \details Returns the content of the MEL0 Register.
+  \return               MEL0 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MEL0(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr2" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<2, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MEL0
+  \details Writes the given value to the MEL0 Register.
+  \param [in]    mel0  MEL0 Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MEL0(uint32_t mel0)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr2" : : "b"(mel0));
+#else
+    __ASM volatile("mtcr %0, cr<2, 15>" : : "r"(mel0));
+#endif
+}
+
+
+/**
+  \brief   Get MEL1
+  \details Returns the content of the MEL1 Register.
+  \return               MEL1 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MEL1(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr3" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<3, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MEL1
+  \details Writes the given value to the MEL1 Register.
+  \param [in]    mel1  MEL1 Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MEL1(uint32_t mel1)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr3" : : "b"(mel1));
+#else
+    __ASM volatile("mtcr %0, cr<3, 15>" : : "r"(mel1));
+#endif
+}
+
+
+/**
+  \brief   Get MEH
+  \details Returns the content of the MEH Register.
+  \return               MEH Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MEH(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr4" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<4, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MEH
+  \details Writes the given value to the MEH Register.
+  \param [in]    meh  MEH Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MEH(uint32_t meh)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr4" : : "b"(meh));
+#else
+    __ASM volatile("mtcr %0, cr<4, 15>" : : "r"(meh));
+#endif
+}
+
+
+/**
+  \brief   Get MPR
+  \details Returns the content of the MPR Register.
+  \return               MPR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MPR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr6" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<6, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MPR
+  \details Writes the given value to the MPR Register.
+  \param [in]    mpr  MPR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MPR(uint32_t mpr)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr6" : : "b"(mpr));
+#else
+    __ASM volatile("mtcr %0, cr<6, 15>" : : "r"(mpr));
+#endif
+}
+
+
+/**
+  \brief   Get MCIR
+  \details Returns the content of the MCIR Register.
+  \return               MCIR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MCIR(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr8" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<8, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MCIR
+  \details Writes the given value to the MCIR Register.
+  \param [in]    mcir  MCIR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MCIR(uint32_t mcir)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr8" : : "b"(mcir));
+#else
+    __ASM volatile("mtcr %0, cr<8, 15>" : : "r"(mcir));
+#endif
+}
+
+
+/**
+  \brief   Get MPGD
+  \details Returns the content of the MPGD Register.
+  \return               MPGD Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MPGD(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr29" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<29, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MPGD
+  \details Writes the given value to the MPGD Register.
+  \param [in]    mpgd  MPGD Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MPGD(uint32_t mpgd)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr29" : : "b"(mpgd));
+#else
+    __ASM volatile("mtcr %0, cr<29, 15>" : : "r"(mpgd));
+#endif
+}
+
+
+/**
+  \brief   Get MSA0
+  \details Returns the content of the MSA0 Register.
+  \return               MSA0 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MSA0(void)
+{
+    uint32_t result;
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr30" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<30, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MSA0
+  \details Writes the given value to the MSA0 Register.
+  \param [in]    msa0  MSA0 Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSA0(uint32_t msa0)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr30" : : "b"(msa0));
+#else
+    __ASM volatile("mtcr %0, cr<30, 15>" : : "r"(msa0));
+#endif
+}
+
+
+/**
+  \brief   Get MSA1
+  \details Returns the content of the MSA1 Register.
+  \return               MSA1 Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MSA1(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cprcr %0, cpcr31" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<31, 15>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set MSA1
+  \details Writes the given value to the MSA1 Register.
+  \param [in]    msa1  MSA1 Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSA1(uint32_t msa1)
+{
+#ifdef __CK610
+    __ASM volatile("cpseti 15");
+    __ASM volatile("cpwcr %0, cpcr31" : : "b"(msa1));
+#else
+    __ASM volatile("mtcr %0, cr<31, 15>" : : "r"(msa1));
+#endif
+}
+
+
+/**
+  \brief   Enable interrupts and exceptions
+  \details Enables interrupts and exceptions by setting the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_excp_irq(void)
+{
+    __ASM volatile("psrset ee, ie");
+}
+
+
+/**
+  \brief   Disable interrupts and exceptions
+  \details Disables interrupts and exceptions by clearing the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_excp_irq(void)
+{
+    __ASM volatile("psrclr ee, ie");
+}
+
+/**
+  \brief   Get GSR
+  \details Returns the content of the GSR Register.
+  \return               GSR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_GSR(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr12" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<12, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Get GCR
+  \details Returns the content of the GCR Register.
+  \return               GCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_GCR(void)
+{
+    uint32_t result;
+
+#ifdef __CK610
+    __ASM volatile("mfcr %0, cr11" : "=r"(result));
+#else
+    __ASM volatile("mfcr %0, cr<11, 0>" : "=r"(result));
+#endif
+    return (result);
+}
+
+/**
+  \brief   Set GCR
+  \details Writes the given value to the GCR Register.
+  \param [in]    gcr  GCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_GCR(uint32_t gcr)
+{
+#ifdef __CK610
+    __ASM volatile("mtcr %0, cr11" : : "r"(gcr));
+#else
+    __ASM volatile("mtcr %0, cr<11, 0>" : : "r"(gcr));
+#endif
+}
+
+/**
+  \brief   Get WSSR
+  \details Returns the content of the WSSR Register, must be accessed in TEE
+  \return               WSSR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_WSSR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<0, 3>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get WRCR
+  \details Returns the content of the WRCR Register, must be accessed in TEE
+  \return               WRCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_WRCR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<1, 3>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set WRCR
+  \details Writes the given value to the WRCR Register, must be accessed in TEE
+  \param [in]    wrcr  WRCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_WRCR(uint32_t wrcr)
+{
+    __ASM volatile("mtcr %0, cr<1, 3>" : : "r"(wrcr));
+}
+
+/**
+  \brief   Get DCR
+  \details Returns the content of the DCR Register, must be accessed in TEE
+  \return               DCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_DCR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<8, 3>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set DCR
+  \details Writes the given value to the DCR Register, must be accessed in TEE
+  \param [in]    dcr  DCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_DCR(uint32_t dcr)
+{
+    __ASM volatile("mtcr %0, cr<8, 3>" : : "r"(dcr));
+}
+
+/**
+  \brief   Get PCR
+  \details Returns the content of the PCR Register, must be accessed in TEE
+  \return               PCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PCR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<9, 3>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set PCR
+  \details Writes the given value to the PCR Register, must be accessed in TEE
+  \param [in]    pcr  PCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PCR(uint32_t pcr)
+{
+    __ASM volatile("mtcr %0, cr<9, 3>" : : "r"(pcr));
+}
+
+/**
+  \brief   Get EBR
+  \details Returns the content of the EBR Register.
+  \return               EBR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_EBR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mfcr %0, cr<1, 1>" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set EBR
+  \details Writes the given value to the EBR Register.
+  \param [in]    ebr  EBR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_EBR(uint32_t ebr)
+{
+    __ASM volatile("mtcr %0, cr<1, 1>" : : "r"(ebr));
+}
+
+/*@} end of CSI_Core_RegAccFunctions */
+
+/* ##########################  Core Instruction Access  ######################### */
+/** \defgroup CSI_Core_InstructionInterface CSI Core Instruction Interface
+  Access to dedicated instructions
+  @{
+*/
+
+#define __CSI_GCC_OUT_REG(r) "=r" (r)
+#define __CSI_GCC_USE_REG(r) "r" (r)
+
+/**
+  \brief   No Operation
+  \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__ALWAYS_STATIC_INLINE void __NOP(void)
+{
+    __ASM volatile("nop");
+}
+
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WFI(void)
+{
+    __ASM volatile("wait");
+}
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WAIT(void)
+{
+    __ASM volatile("wait");
+}
+
+/**
+  \brief   Doze For Interrupt
+  \details Doze For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __DOZE(void)
+{
+    __ASM volatile("doze");
+}
+
+/**
+  \brief   Stop For Interrupt
+  \details Stop For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __STOP(void)
+{
+    __ASM volatile("stop");
+}
+
+/**
+  \brief   Instruction Synchronization Barrier
+  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+           so that all instructions following the ISB are fetched from cache or memory,
+           after the instruction has been completed.
+ */
+__ALWAYS_STATIC_INLINE void __ISB(void)
+{
+    __ASM volatile("sync"::: "memory");
+}
+
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+__ALWAYS_STATIC_INLINE void __DSB(void)
+{
+    __ASM volatile("sync"::: "memory");
+}
+
+
+/**
+  \brief   Data Memory Barrier
+  \details Ensures the apparent order of the explicit memory operations before
+           and after the instruction, without ensuring their completion.
+ */
+__ALWAYS_STATIC_INLINE void __DMB(void)
+{
+    __ASM volatile("sync"::: "memory");
+}
+
+/**
+  \brief   Search from the highest bit that the very first bit which's value is 1.
+  \param [in]    value  Value to  bit search.
+  \return               if the highest bit' value is 1,  return 0, and if lowest bit's value is 1, return 31, otherwise return 32.
+ */
+#if !defined(__CK610) || !(__CK80X == 1)
+__ALWAYS_STATIC_INLINE uint32_t __FF0(uint32_t value)
+{
+    uint32_t ret;
+
+    __ASM volatile("ff0 %0, %1" : "=r"(ret) : "r"(value));
+    return ret;
+}
+#endif
+
+/**
+  \brief   Search from the highest bit that the very first bit which's value is 0.
+  \param [in]    value  Value to  bit search.
+  \return               if the highest bit' value is 0,  return 0, and if lowest bit's value is 0, return 31, otherwise return 32.
+ */
+#if !(__CK80X == 1)
+__ALWAYS_STATIC_INLINE uint32_t __FF1(uint32_t value)
+{
+    uint32_t ret;
+#if !defined (__CK610)
+    __ASM volatile("ff1 %0, %1" : "=r"(ret) : "r"(value));
+#else
+    ret = value;
+    __ASM volatile("ff1 %0" : "=r"(ret):);
+#endif
+    return ret;
+}
+#endif
+
+/**
+  \brief   Reverse byte order (32 bit)
+  \details Reverses the byte order in integer value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __REV(uint32_t value)
+{
+    return __builtin_bswap32(value);
+}
+
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order in two unsigned short values.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+    uint32_t result;
+#if (__CK80X >= 2)
+    __ASM volatile("revh %0, %1" : __CSI_GCC_OUT_REG(result) : __CSI_GCC_USE_REG(value));
+#else
+    result = ((value & 0xFF000000) >> 8) | ((value & 0x00FF0000) << 8) |
+             ((value & 0x0000FF00) >> 8) | ((value & 0x000000FF) << 8);
+#endif
+    return (result);
+}
+
+
+/**
+  \brief   Reverse byte order in signed short value
+  \details Reverses the byte order in a signed short value with sign extension to integer.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+    return (short)(((value & 0xFF00) >> 8) | ((value & 0x00FF) << 8));
+}
+
+
+/**
+  \brief   Rotate Right in unsigned value (32 bit)
+  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+  \param [in]    op1  Value to rotate
+  \param [in]    op2  Number of Bits to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+    return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+  \brief   Breakpoint
+  \details Causes the processor to enter Debug state
+           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+ */
+__ALWAYS_STATIC_INLINE void __BKPT(void)
+{
+    __ASM volatile("bkpt");
+}
+
+/**
+  \brief   Reverse bit order of value
+  \details Reverses the bit order of the given value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+    uint32_t result;
+
+#if (__CK80X >= 0x03U)
+    __ASM volatile("brev %0, %1" : "=r"(result) : "r"(value));
+#else
+    int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
+
+    result = value;                      /* r will be reversed bits of v; first get LSB of v */
+
+    for (value >>= 1U; value; value >>= 1U) {
+        result <<= 1U;
+        result |= value & 1U;
+        s--;
+    }
+
+    result <<= s;                        /* shift when v's highest bits are zero */
+#endif
+    return (result);
+}
+
+
+/**
+  \brief   Count leading zeros
+  \details Counts the number of leading zeros of a data value.
+  \param [in]  value  Value to count the leading zeros
+  \return             number of leading zeros in value
+ */
+#define __CLZ             __builtin_clz
+/**
+  \details This function saturates a signed value.
+  \param [in]    x   Value to be saturated
+  \param [in]    y   Bit position to saturate to [1..32]
+  \return            Saturated value.
+ */
+__ALWAYS_STATIC_INLINE int32_t __SSAT(int32_t x, uint32_t y)
+{
+    int32_t posMax, negMin;
+    uint32_t i;
+
+    posMax = 1;
+
+    for (i = 0; i < (y - 1); i++) {
+        posMax = posMax * 2;
+    }
+
+    if (x > 0) {
+        posMax = (posMax - 1);
+
+        if (x > posMax) {
+            x = posMax;
+        }
+
+//    x &= (posMax * 2 + 1);
+    } else {
+        negMin = -posMax;
+
+        if (x < negMin) {
+            x = negMin;
+        }
+
+//    x &= (posMax * 2 - 1);
+    }
+
+    return (x);
+}
+
+/**
+  \brief   Unsigned Saturate
+  \details Saturates an unsigned value.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Unsigned Saturate for internal use
+  \details Saturates an unsigned value, should not call directly.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __IUSAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if (value & 0x80000000) { /* only overflow set bit-31 */
+        result = 0;
+    } else if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Rotate Right with Extend
+  \details This function moves each bit of a bitstring right by one bit.
+           The carry input is shifted in at the left end of the bitstring.
+  \note    carry input will always 0.
+  \param [in]    op1  Value to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RRX(uint32_t op1)
+{
+#if (__CK80X >= 2)
+    uint32_t res = 0;
+    __ASM volatile("bgeni    t0, 31\n\t"
+                   "lsri     %0, 1\n\t"
+                   "movt     %1, t0\n\t"
+                   "or       %1, %1, %0\n\t"
+               : "=r"(op1), "=r"(res): "0"(op1), "1"(res): "t0");
+    return res;
+#else
+    uint32_t res = 0;
+    __ASM volatile("movi     r7, 0\n\t"
+                   "bseti    r7, 31\n\t"
+                   "lsri     %0, 1\n\t"
+                   "bf       1f\n\t"
+                   "mov     %1, r7\n\t"
+                   "1:\n\t"
+                   "or       %1, %1, %0\n\t"
+               : "=r"(op1), "=r"(res): "0"(op1), "1"(res): "r7");
+    return res;
+#endif
+}
+
+/**
+  \brief   LDRT Unprivileged (8 bit)
+  \details Executes a Unprivileged LDRT instruction for 8 bit value.
+  \param [in]    addr  Pointer to location
+  \return             value of type uint8_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+    uint32_t result;
+//#warning "__LDRBT"
+    __ASM volatile("ldb %0, (%1, 0)" : "=r"(result) : "r"(addr));
+    return ((uint8_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (16 bit)
+  \details Executes a Unprivileged LDRT instruction for 16 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint16_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+    uint32_t result;
+
+//#warning "__LDRHT"
+    __ASM volatile("ldh %0, (%1, 0)" : "=r"(result) : "r"(addr));
+    return ((uint16_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (32 bit)
+  \details Executes a Unprivileged LDRT instruction for 32 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint32_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+    uint32_t result;
+
+//#warning "__LDRT"
+    __ASM volatile("ldw %0, (%1, 0)" : "=r"(result) : "r"(addr));
+    return (result);
+}
+
+
+/**
+  \brief   STRT Unprivileged (8 bit)
+  \details Executes a Unprivileged STRT instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+//#warning "__STRBT"
+    __ASM volatile("stb %1, (%0, 0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (16 bit)
+  \details Executes a Unprivileged STRT instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+//#warning "__STRHT"
+    __ASM volatile("sth %1, (%0, 0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (32 bit)
+  \details Executes a Unprivileged STRT instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+//#warning "__STRT"
+    __ASM volatile("stw %1, (%0, 0)" :: "r"(addr), "r"(value) : "memory");
+}
+
+/*@}*/ /* end of group CSI_Core_InstructionInterface */
+
+
+/* ##########################  FPU functions  #################################### */
+/**
+  \ingroup  CSI_Core_FunctionInterface
+  \defgroup CSI_Core_FpuFunctions FPU Functions
+  \brief    Function that provides FPU type.
+  @{
+ */
+
+/**
+  \brief   get FPU type
+  \details returns the FPU type, always 0.
+  \returns
+   - \b  0: No FPU
+   - \b  1: Single precision FPU
+   - \b  2: Double + Single precision FPU
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_FPUType(void)
+{
+//FIXME:
+    return 0;
+}
+
+/*@} end of CSI_Core_FpuFunctions */
+
+/* ###################  Compiler specific Intrinsics  ########################### */
+/** \defgroup CSI_SIMD_intrinsics CSI SIMD Intrinsics
+  Access to dedicated SIMD instructions \n
+  Single Instruction Multiple Data (SIMD) extensions are provided to simplify development of application software. SIMD extensions increase the processing capability without materially increasing the power consumption. The SIMD extensions are completely transparent to the operating system (OS), allowing existing OS ports to be used.
+
+  @{
+*/
+
+/**
+  \brief   Halfword packing instruction. Combines bits[15:0] of val1 with bits[31:16]
+           of val2 levitated with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be left-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for left-shifting val2. Value range [0..31].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val1[15:0]              \n
+                 res[31:16] = val2[31:16] << val3
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHBT(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0x0000FFFF) | (((int32_t)(val2) << val3) & (int32_t)0xFFFF0000));
+}
+
+/**
+  \brief   Halfword packing instruction. Combines bits[31:16] of val1 with bits[15:0]
+           of val2 right-shifted with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be right-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for right-shifting val2. Value range [1..32].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val2[15:0] >> val3        \n
+                 res[31:16] = val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHTB(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0xFFFF0000) | (((int32_t)(val2) >> val3) & (int32_t)0x0000FFFF));
+}
+
+/**
+  \brief   Dual 16-bit signed saturate.
+  \details This function saturates a signed value.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:\n
+                 the signed saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the signed saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAT16(int32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT((((int32_t)x << 16) >> 16), y) & (int32_t)0x0000FFFF;
+    s = __SSAT((((int32_t)x) >> 16), y) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturate.
+  \details This function enables you to saturate two signed 16-bit values to a selected unsigned range.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the saturation of the two signed 16-bit values, as non-negative values:
+                 the saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT16(uint32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT(((x << 16) >> 16), y) & 0x0000FFFF;
+    s = __IUSAT(((x) >> 16), y) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating addition.
+  \details This function enables you to perform four 8-bit integer additions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) + (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating addition.
+  \details This function enables you to perform four unsigned 8-bit integer additions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) + ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) + ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) + ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) + ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition.
+  \details This function performs four 8-bit signed integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition.
+  \details This function performs four unsigned 8-bit integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) + ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) + ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) + ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) + ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating subtract.
+  \details This function enables you to perform four 8-bit integer subtractions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) - (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating subtraction.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit unsigned integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) - ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) - ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) - ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) - ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed subtraction.
+  \details This function enables you to perform four 8-bit signed integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtract.
+  \details This function enables you to perform four 8-bit unsigned integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences together, returning the result as a single unsigned integer.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.\n
+                 The sum is returned as a single unsigned integer.
+  \remark
+                 absdiff1   = val1[7:0]   - val2[7:0]        \n
+                 absdiff2   = val1[15:8]  - val2[15:8]       \n
+                 absdiff3   = val1[23:16] - val2[23:16]      \n
+                 absdiff4   = val1[31:24] - val2[31:24]      \n
+                 res[31:0]  = absdiff1 + absdiff2 + absdiff3 + absdiff4
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return (u + t + s + r);
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference with 32-bit accumulate.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences to a 32-bit accumulate operand.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \param [in]  sum  accumulation value.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:
+                 the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 absdiff1 = val1[7:0]   - val2[7:0]        \n
+                 absdiff2 = val1[15:8]  - val2[15:8]       \n
+                 absdiff3 = val1[23:16] - val2[23:16]      \n
+                 absdiff4 = val1[31:24] - val2[31:24]      \n
+                 sum = absdiff1 + absdiff2 + absdiff3 + absdiff4 \n
+                 res[31:0] = sum[31:0] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USADA8(uint32_t x, uint32_t y, uint32_t sum)
+{
+    int32_t r, s, t, u;
+
+#ifdef __cplusplus
+    r = (abs((long long)((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs((long long)((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs((long long)((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs((long long)((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#else
+    r = (abs(((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs(((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs(((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs(((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#endif
+    return (u + t + s + r + sum);
+}
+
+/**
+  \brief   Dual 16-bit saturating addition.
+  \details This function enables you to perform two 16-bit integer arithmetic additions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition.
+  \details This function enables you to perform two unsigned 16-bit integer additions, saturating
+           the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The results are saturated to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition.
+  \details This function enables you to perform two 16-bit signed integer additions.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition
+  \details This function enables you to perform two 16-bit unsigned integer additions.
+  \param [in]    x   first two 16-bit summands for each addition.
+  \param [in]    y   second two 16-bit summands for each addition.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = (((x << 16) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+
+/**
+  \brief   Dual 16-bit signed addition with halved results.
+  \details This function enables you to perform two signed 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) + ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) + ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) + ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) + ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit saturating subtract.
+  \details This function enables you to perform two 16-bit integer subtractions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions,
+           saturating the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit operands for each subtraction.
+  \param [in]    y   second two 16-bit operands for each subtraction.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction.
+  \details This function enables you to perform two 16-bit signed integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract.
+  \details This function enables you to perform two 16-bit unsigned integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction with halved results.
+  \details This function enables you to perform two signed 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtraction with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) - ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) - ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) - ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) - ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit add and subtract with exchange.
+  \details This function enables you to exchange the halfwords of the one operand,
+           then add the high halfwords and subtract the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition and subtraction with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and
+           perform one unsigned 16-bit integer addition and one unsigned 16-bit subtraction,
+           saturating the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit addition and subtraction with exchange.
+  \details It enables you to exchange the halfwords of the second operand, add the high halfwords
+           and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with exchange.
+  \details This function enables you to exchange the two halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition and subtraction with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one
+           signed 16-bit integer addition and one signed 16-bit subtraction, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords, halving the results.
+  \param [in]    x   first operand for the subtraction in the low halfword, and
+                     the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword, and
+                     the second operand for the addition in the low halfword.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of one operand,
+           then subtract the high halfwords and add the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction and addition with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and perform
+           one unsigned 16-bit integer subtraction and one unsigned 16-bit addition, saturating
+           the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with exchange.
+  \details This function enables you to exchange the two halfwords of one operand and perform one
+           16-bit integer subtraction and one 16-bit addition.
+  \param [in]    x   first operand for the addition in the low halfword, and the first operand
+                     for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and the second
+                     operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one signed
+           16-bit integer subtraction and one signed 16-bit addition, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction and addition with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords, halving the results.
+  \param [in]    x   first operand for the addition in the low halfword, and
+                     the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and
+                     the second operand for the subtraction in the low halfword.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, subtracting
+           one of the products from the other. The halfwords of the second operand are exchanged
+           before performing the arithmetic. This produces top * bottom and bottom * top multiplication.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSDX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply with exchange.
+  \details This function enables you to perform two 16-bit signed multiplications with exchanged
+           halfwords of the second operand, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications with exchanged halfwords of the second operand.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUADX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+
+/**
+  \brief   Saturating add.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 + SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QADD(int32_t x, int32_t y)
+{
+    int32_t result;
+
+    if (y >= 0) {
+        if (x + y >= x) {
+            result = x + y;
+        } else {
+            result = 0x7FFFFFFF;
+        }
+    } else {
+        if (x + y < x) {
+            result = x + y;
+        } else {
+            result = 0x80000000;
+        }
+    }
+
+    return result;
+}
+
+/**
+  \brief   Saturating subtract.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 - SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QSUB(int32_t x, int32_t y)
+{
+    int64_t tmp;
+    int32_t result;
+
+    tmp = (int64_t)x - (int64_t)y;
+
+    if (tmp > 0x7fffffff) {
+        tmp = 0x7fffffff;
+    } else if (tmp < (-2147483647 - 1)) {
+        tmp = -2147483647 - 1;
+    }
+
+    result = tmp;
+    return result;
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications,
+           adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLAD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Pre-exchanged dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications with exchanged
+           halfwords of the second operand, adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication with exchanged halfwords of the second
+                 operand added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLADX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to perform two 16-bit signed multiplications, take the
+           difference of the products, subtracting the high halfword product from the low
+           halfword product, and add the difference to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]       \n
+                 p2 = val1[31:16] * val2[31:16]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to exchange the halfwords in the second operand, then perform two 16-bit
+           signed multiplications. The difference of the products is added to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSDX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 64-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications, adding both results
+           to a 64-bit accumulate operand. Overflow is only possible as a result of the 64-bit addition.
+           This overflow is not detected if it occurs. Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange with single 64-bit accumulator.
+  \details This function enables you to exchange the halfwords of the second operand, and perform two
+           signed 16-bit multiplications, adding both results to a 64-bit accumulate operand. Overflow
+           is only possible as a result of the 64-bit addition. This overflow is not detected if it occurs.
+           Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   dual 16-bit signed multiply subtract with 64-bit accumulate.
+  \details This function It enables you to perform two 16-bit signed multiplications, take the difference
+           of the products, subtracting the high halfword product from the low halfword product, and add the
+           difference to a 64-bit accumulate operand. Overflow cannot occur during the multiplications or the
+           subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow is not
+           detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 64-bit accumulate.
+  \details This function enables you to exchange the halfwords of the second operand, perform two 16-bit multiplications,
+           adding the difference of the products to a 64-bit accumulate operand. Overflow cannot occur during the
+           multiplications or the subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow
+           is not detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]      \n
+                 p2 = val1[31:16] * val2[15:0]       \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   32-bit signed multiply with 32-bit truncated accumulator.
+  \details This function enables you to perform a signed 32-bit multiplications, adding the most
+           significant 32 bits of the 64-bit result to a 32-bit accumulate operand.
+  \param [in]    x   first operand for multiplication.
+  \param [in]    y   second operand for multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of multiplication (most significant 32 bits) is added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p = val1 * val2      \n
+                 res[31:0] = p[63:32] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMMLA(int32_t x, int32_t y, int32_t sum)
+{
+    return (uint32_t)((int32_t)((int64_t)((int64_t)x * (int64_t)y) >> 32) + sum);
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply.
+  \details This function enables you to perform two 16-bit signed multiplications, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUAD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, taking the difference
+           of the products by subtracting the high halfword product from the low halfword product.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual extracted 8-bit to 16-bit signed addition.
+  \details This function enables you to extract two 8-bit values from the second operand (at bit positions
+           [7:0] and [23:16]), sign-extend them to 16-bits each, and add the results to the first operand.
+  \param [in]    x   values added to the sign-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and sign-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and sign-extended prior to the addition.
+  \remark
+                 res[15:0]  = val1[15:0] + SignExtended(val2[7:0])      \n
+                 res[31:16] = val1[31:16] + SignExtended(val2[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)((((((int32_t)y << 24) >> 24) + (((int32_t)x << 16) >> 16)) & (int32_t)0x0000FFFF) |
+                       (((((int32_t)y <<  8) >>  8)  + (((int32_t)x >> 16) << 16)) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Extracted 16-bit to 32-bit unsigned addition.
+  \details This function enables you to extract two 8-bit values from one operand, zero-extend
+           them to 16 bits each, and add the results to two 16-bit values from another operand.
+  \param [in]    x   values added to the zero-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and zero-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and zero-extended prior to the addition.
+  \remark
+                 res[15:0]  = ZeroExt(val2[7:0]   to 16 bits) + val1[15:0]      \n
+                 res[31:16] = ZeroExt(val2[31:16] to 16 bits) + val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((y << 24) >> 24) + ((x << 16) >> 16)) & 0x0000FFFF) |
+                       ((((y <<  8) >>  8) + ((x >> 16) << 16)) & 0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and sign extend each to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and sign-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be sign-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTB16(uint32_t x)
+{
+    return ((uint32_t)(((((int32_t)x << 24) >> 24) & (int32_t)0x0000FFFF) |
+                       ((((int32_t)x <<  8) >>  8) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and zero-extend to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and zero-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be zero-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTB16(uint32_t x)
+{
+    return ((uint32_t)((((x << 24) >> 24) & 0x0000FFFF) |
+                       (((x <<  8) >>  8) & 0xFFFF0000)));
+}
+
+#endif /* _CSI_GCC_H_ */

+ 2830 - 0
lib/sec_library/include/core/csi_rv32_gcc.h

@@ -0,0 +1,2830 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     csi_rv32_gcc.h
+ * @brief    CSI Header File for GCC.
+ * @version  V1.0
+ * @date     01. Sep 2020
+ ******************************************************************************/
+
+#ifndef _CSI_RV32_GCC_H_
+#define _CSI_RV32_GCC_H_
+
+#include <stdlib.h>
+
+#ifndef __ASM
+#define __ASM                   __asm     /*!< asm keyword for GNU Compiler */
+#endif
+
+#ifndef __INLINE
+#define __INLINE                inline    /*!< inline keyword for GNU Compiler */
+#endif
+
+#ifndef __ALWAYS_STATIC_INLINE
+#define __ALWAYS_STATIC_INLINE  __attribute__((always_inline)) static inline
+#endif
+
+#ifndef __STATIC_INLINE
+#define __STATIC_INLINE         static inline
+#endif
+
+#ifndef __NO_RETURN
+#define __NO_RETURN             __attribute__((__noreturn__))
+#endif
+
+#ifndef __USED
+#define __USED                  __attribute__((used))
+#endif
+
+#ifndef __WEAK
+#define __WEAK                  __attribute__((weak))
+#endif
+
+#ifndef __PACKED
+#define __PACKED                __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_STRUCT
+#define __PACKED_STRUCT         struct __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_UNION
+#define __PACKED_UNION          union __attribute__((packed, aligned(1)))
+#endif
+
+
+/* ###########################  Core Function Access  ########################### */
+/** \ingroup  CSI_Core_FunctionInterface
+    \defgroup CSI_Core_RegAccFunctions CSI Core Register Access Functions
+  @{
+ */
+/**
+  \brief   Enable IRQ Interrupts
+  \details Enables IRQ interrupts by setting the IE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_irq(void)
+{
+    __ASM volatile("csrs mstatus, 8");
+}
+
+/**
+  \brief   Disable IRQ Interrupts
+  \details Disables IRQ interrupts by clearing the IE-bit in the PSR.
+  Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_irq(void)
+{
+    __ASM volatile("csrc mstatus, 8");
+}
+
+/**
+  \brief   Get MXSTATUS
+  \details Returns the content of the MXSTATUS Register.
+  \return               MXSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MXSTATUS(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mxstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MXSTATUS
+  \details Writes the given value to the MXSTATUS Register.
+  \param [in]    mxstatus  MXSTATUS Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MXSTATUS(uint32_t mxstatus)
+{
+    __ASM volatile("csrw mxstatus, %0" : : "r"(mxstatus));
+}
+
+/**
+  \brief   Get MSTATUS
+  \details Returns the content of the MSTATUS Register.
+  \return               MSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MSTATUS(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MSTATUS
+  \details Writes the given value to the MSTATUS Register.
+  \param [in]    mstatus  MSTATUS Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSTATUS(uint32_t mstatus)
+{
+    __ASM volatile("csrw mstatus, %0" : : "r"(mstatus));
+}
+
+/**
+  \brief   Get MHCR
+  \details Returns the content of the MHCR Register.
+  \return               MHCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MHCR(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mhcr" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MHCR
+  \details Writes the given value to the MHCR Register.
+  \param [in]    mstatus  MHCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MHCR(uint32_t mhcr)
+{
+    __ASM volatile("csrw mhcr, %0" : : "r"(mhcr));
+}
+
+/**
+  \brief   Get MISA Register
+  \details Returns the content of the MISA Register.
+  \return               MISA Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MISA(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, misa" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MISA
+  \details Writes the given value to the MISA Register.
+  \param [in]    misa  MISA Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MISA(uint32_t misa)
+{
+    __ASM volatile("csrw misa, %0" : : "r"(misa));
+}
+
+/**
+  \brief   Get MIE Register
+  \details Returns the content of the MIE Register.
+  \return               MIE Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MIE(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mie" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MIE
+  \details Writes the given value to the MIE Register.
+  \param [in]    mie  MIE Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MIE(uint32_t mie)
+{
+    __ASM volatile("csrw mie, %0" : : "r"(mie));
+}
+
+/**
+  \brief   Get MTVEC Register
+  \details Returns the content of the MTVEC Register.
+  \return               MTVEC Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MTVEC(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mtvec" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MTVEC
+  \details Writes the given value to the MTVEC Register.
+  \param [in]    mtvec  MTVEC Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MTVEC(uint32_t mtvec)
+{
+    __ASM volatile("csrw mtvec, %0" : : "r"(mtvec));
+}
+
+/**
+  \brief   Set MTVT
+  \details Writes the given value to the MTVT Register.
+  \param [in]    mtvt  MTVT Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MTVT(uint32_t mtvt)
+{
+    __ASM volatile("csrw mtvt, %0" : : "r"(mtvt));
+}
+
+/**
+  \brief   Get MTVT Register
+  \details Returns the content of the MTVT Register.
+  \return               MTVT Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MTVT(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mtvt" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get SP
+  \details Returns the content of the SP Register.
+  \return               SP Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_SP(void)
+{
+    uint32_t result;
+
+    __ASM volatile("mv %0, sp" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set SP
+  \details Writes the given value to the SP Register.
+  \param [in]    sp  SP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_SP(uint32_t sp)
+{
+    __ASM volatile("mv sp, %0" : : "r"(sp): "sp");
+}
+
+/**
+  \brief   Get MSCRATCH Register
+  \details Returns the content of the MSCRATCH Register.
+  \return               MSCRATCH Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MSCRATCH(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mscratch" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MSCRATCH
+  \details Writes the given value to the MSCRATCH Register.
+  \param [in]    mscratch  MSCRATCH Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSCRATCH(uint32_t mscratch)
+{
+    __ASM volatile("csrw mscratch, %0" : : "r"(mscratch));
+}
+
+/**
+  \brief   Get MEPC Register
+  \details Returns the content of the MEPC Register.
+  \return               MEPC Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MEPC(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mepc" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MEPC
+  \details Writes the given value to the MEPC Register.
+  \param [in]    mepc  MEPC Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MEPC(uint32_t mepc)
+{
+    __ASM volatile("csrw mepc, %0" : : "r"(mepc));
+}
+
+/**
+  \brief   Get MCAUSE Register
+  \details Returns the content of the MCAUSE Register.
+  \return               MCAUSE Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MCAUSE(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mcause" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MNXTI Register
+  \details Returns the content of the MNXTI Register.
+  \return               MNXTI Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MNXTI(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mnxti" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MNXTI
+  \details Writes the given value to the MNXTI Register.
+  \param [in]    mnxti  MNXTI Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MNXTI(uint32_t mnxti)
+{
+    __ASM volatile("csrw mnxti, %0" : : "r"(mnxti));
+}
+
+/**
+  \brief   Get MINTSTATUS Register
+  \details Returns the content of the MINTSTATUS Register.
+  \return               MINTSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MINTSTATUS(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mintstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MTVAL Register
+  \details Returns the content of the MTVAL Register.
+  \return               MTVAL Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MTVAL(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mtval" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MIP Register
+  \details Returns the content of the MIP Register.
+  \return               MIP Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MIP(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mip" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MIP
+  \details Writes the given value to the MIP Register.
+  \param [in]    mip  MIP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MIP(uint32_t mip)
+{
+    __ASM volatile("csrw mip, %0" : : "r"(mip));
+}
+
+/**
+  \brief   Get MCYCLEL Register
+  \details Returns the content of the MCYCLEL Register.
+  \return               MCYCLE Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MCYCLE(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mcycle" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MCYCLEH Register
+  \details Returns the content of the MCYCLEH Register.
+  \return               MCYCLEH Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MCYCLEH(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mcycleh" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MINSTRET Register
+  \details Returns the content of the MINSTRET Register.
+  \return               MINSTRET Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MINSTRET(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, minstret" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MINSTRETH Register
+  \details Returns the content of the MINSTRETH Register.
+  \return               MINSTRETH Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MINSTRETH(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, minstreth" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MVENDORID Register
+  \details Returns the content of the MVENDROID Register.
+  \return               MVENDORID Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MVENDORID(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mvendorid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MARCHID Register
+  \details Returns the content of the MARCHID Register.
+  \return               MARCHID Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MARCHID(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, marchid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MIMPID Register
+  \details Returns the content of the MIMPID Register.
+  \return               MIMPID Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MIMPID(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mimpid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MHARTID Register
+  \details Returns the content of the MHARTID Register.
+  \return               MHARTID Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_MHARTID(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, mhartid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPCFGx Register
+  \details Returns the content of the PMPCFGx Register.
+  \return               PMPCFGx Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPCFG0(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpcfg0" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPCFG1(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpcfg1" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPCFG2(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpcfg2" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPCFG3(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpcfg3" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPxCFG Register by index
+  \details Returns the content of the PMPxCFG Register.
+  \param [in]    idx    PMP region index
+  \return               PMPxCFG Register value
+ */
+__STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t idx)
+{
+    uint32_t pmpcfgx = 0;
+
+    if (idx < 4) {
+        pmpcfgx = __get_PMPCFG0();
+    } else if (idx >=4 && idx < 8) {
+        idx -= 4;
+        pmpcfgx = __get_PMPCFG1();
+    } else if (idx >=8 && idx < 12) {
+        idx -= 8;
+        pmpcfgx = __get_PMPCFG2();
+    } else if (idx >=12 && idx < 16) {
+        idx -= 12;
+        pmpcfgx = __get_PMPCFG3();
+    } else {
+        return 0;
+    }
+
+    return (uint8_t)((pmpcfgx & (0xFF << (idx << 3))) >> (idx << 3));
+}
+
+/**
+  \brief   Set PMPCFGx
+  \details Writes the given value to the PMPCFGx Register.
+  \param [in]    pmpcfg  PMPCFGx Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PMPCFG0(uint32_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg0, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG1(uint32_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg1, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG2(uint32_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg2, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG3(uint32_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg3, %0" : : "r"(pmpcfg));
+}
+
+/**
+  \brief   Set PMPxCFG by index
+  \details Writes the given value to the PMPxCFG Register.
+  \param [in]    idx      PMPx region index
+  \param [in]    pmpxcfg  PMPxCFG Register value to set
+ */
+__STATIC_INLINE void __set_PMPxCFG(uint32_t idx, uint8_t pmpxcfg)
+{
+    uint32_t pmpcfgx = 0;
+
+    if (idx < 4) {
+        pmpcfgx = __get_PMPCFG0();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | (pmpxcfg << (idx << 3));
+        __set_PMPCFG0(pmpcfgx);
+    } else if (idx >=4 && idx < 8) {
+        idx -= 4;
+        pmpcfgx = __get_PMPCFG1();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | (pmpxcfg << (idx << 3));
+        __set_PMPCFG1(pmpcfgx);
+    } else if (idx >=8 && idx < 12) {
+        idx -= 8;
+        pmpcfgx = __get_PMPCFG2();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | (pmpxcfg << (idx << 3));
+        __set_PMPCFG2(pmpcfgx);
+    } else if (idx >=12 && idx < 16) {
+        idx -= 12;
+        pmpcfgx = __get_PMPCFG3();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | (pmpxcfg << (idx << 3));
+        __set_PMPCFG3(pmpcfgx);
+    } else {
+        return;
+    }
+}
+
+/**
+  \brief   Get PMPADDRx Register
+  \details Returns the content of the PMPADDRx Register.
+  \return               PMPADDRx Register value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR0(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr0" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR1(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr1" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR2(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr2" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR3(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr3" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR4(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr4" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR5(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr5" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR6(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr6" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR7(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr7" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR8(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr8" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR9(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr9" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR10(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr10" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR11(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr11" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR12(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr12" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR13(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr13" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR14(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr14" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint32_t __get_PMPADDR15(void)
+{
+    uint32_t result;
+
+    __ASM volatile("csrr %0, pmpaddr15" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPADDRx Register by index
+  \details Returns the content of the PMPADDRx Register.
+  \param [in]    idx    PMP region index
+  \return               PMPADDRx Register value
+ */
+__STATIC_INLINE uint32_t __get_PMPADDRx(uint32_t idx)
+{
+    switch (idx) {
+    case 0: return __get_PMPADDR0();
+    case 1: return __get_PMPADDR1();
+    case 2: return __get_PMPADDR2();
+    case 3: return __get_PMPADDR3();
+    case 4: return __get_PMPADDR4();
+    case 5: return __get_PMPADDR5();
+    case 6: return __get_PMPADDR6();
+    case 7: return __get_PMPADDR7();
+    case 8: return __get_PMPADDR8();
+    case 9: return __get_PMPADDR9();
+    case 10: return __get_PMPADDR10();
+    case 11: return __get_PMPADDR11();
+    case 12: return __get_PMPADDR12();
+    case 13: return __get_PMPADDR13();
+    case 14: return __get_PMPADDR14();
+    case 15: return __get_PMPADDR15();
+    default: return 0;
+    }
+}
+
+/**
+  \brief   Set PMPADDRx
+  \details Writes the given value to the PMPADDRx Register.
+  \param [in]    pmpaddr  PMPADDRx Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PMPADDR0(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr0, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR1(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr1, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR2(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr2, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR3(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr3, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR4(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr4, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR5(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr5, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR6(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr6, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR7(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr7, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR8(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr8, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR9(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr9, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR10(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr10, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR11(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr11, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR12(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr12, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR13(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr13, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR14(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr14, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR15(uint32_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr15, %0" : : "r"(pmpaddr));
+}
+
+/**
+  \brief   Set PMPADDRx by index
+  \details Writes the given value to the PMPADDRx Register.
+  \param [in]    idx      PMP region index
+  \param [in]    pmpaddr  PMPADDRx Register value to set
+ */
+__STATIC_INLINE void __set_PMPADDRx(uint32_t idx, uint32_t pmpaddr)
+{
+    switch (idx) {
+    case 0: __set_PMPADDR0(pmpaddr); break;
+    case 1: __set_PMPADDR1(pmpaddr); break;
+    case 2: __set_PMPADDR2(pmpaddr); break;
+    case 3: __set_PMPADDR3(pmpaddr); break;
+    case 4: __set_PMPADDR4(pmpaddr); break;
+    case 5: __set_PMPADDR5(pmpaddr); break;
+    case 6: __set_PMPADDR6(pmpaddr); break;
+    case 7: __set_PMPADDR7(pmpaddr); break;
+    case 8: __set_PMPADDR8(pmpaddr); break;
+    case 9: __set_PMPADDR9(pmpaddr); break;
+    case 10: __set_PMPADDR10(pmpaddr); break;
+    case 11: __set_PMPADDR11(pmpaddr); break;
+    case 12: __set_PMPADDR12(pmpaddr); break;
+    case 13: __set_PMPADDR13(pmpaddr); break;
+    case 14: __set_PMPADDR14(pmpaddr); break;
+    case 15: __set_PMPADDR15(pmpaddr); break;
+    default: return;
+    }
+}
+
+/**
+  \brief   Enable interrupts and exceptions
+  \details Enables interrupts and exceptions by setting the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_excp_irq(void)
+{
+    __enable_irq();
+}
+
+
+/**
+  \brief   Disable interrupts and exceptions
+  \details Disables interrupts and exceptions by clearing the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_excp_irq(void)
+{
+    __disable_irq();
+}
+
+#define __CSI_GCC_OUT_REG(r) "=r" (r)
+#define __CSI_GCC_USE_REG(r) "r" (r)
+
+/**
+  \brief   No Operation
+  \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__ALWAYS_STATIC_INLINE void __NOP(void)
+{
+    __ASM volatile("nop");
+}
+
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WFI(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WAIT(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Doze For Interrupt
+  \details Doze For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __DOZE(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Stop For Interrupt
+  \details Stop For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __STOP(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Instruction Synchronization Barrier
+  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+           so that all instructions following the ISB are fetched from cache or memory,
+           after the instruction has been completed.
+ */
+__ALWAYS_STATIC_INLINE void __ISB(void)
+{
+    __ASM volatile("fence");
+}
+
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+__ALWAYS_STATIC_INLINE void __DSB(void)
+{
+    __ASM volatile("fence");
+}
+
+/**
+  \brief   Invalid all icache
+  \details invalid all icache.
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IALL(void)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("icache.iall");
+#endif
+}
+
+/**
+  \brief   Invalid Icache by addr
+  \details Invalid Icache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IPA(uint32_t addr)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("icache.ipa %0" : : "r"(addr));
+#endif
+}
+
+/**
+  \brief   Invalid all dcache
+  \details invalid all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_IALL(void)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.iall");
+#endif
+}
+
+/**
+  \brief   Clear all dcache
+  \details clear all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CALL(void)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.call");
+#endif
+}
+
+/**
+  \brief   Clear&invalid all dcache
+  \details clear & invalid all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CIALL(void)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.ciall");
+#endif
+}
+
+/**
+  \brief   Invalid Dcache by addr
+  \details Invalid Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_IPA(uint32_t addr)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.ipa %0" : : "r"(addr));
+#endif
+}
+
+/**
+  \brief   Clear Dcache by addr
+  \details Clear Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CPA(uint32_t addr)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.cpa %0" : : "r"(addr));
+#endif
+}
+
+/**
+  \brief   Clear & Invalid Dcache by addr
+  \details Clear & Invalid Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CIPA(uint32_t addr)
+{
+#if defined(__riscv_xtheade) || defined(__riscv_xtheadc)
+    __ASM volatile("dcache.cipa %0" : : "r"(addr));
+#endif
+}
+
+
+/**
+  \brief   Data Memory Barrier
+  \details Ensures the apparent order of the explicit memory operations before
+           and after the instruction, without ensuring their completion.
+ */
+__ALWAYS_STATIC_INLINE void __DMB(void)
+{
+    __ASM volatile("fence");
+}
+
+/**
+  \brief   Reverse byte order (32 bit)
+  \details Reverses the byte order in integer value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __REV(uint32_t value)
+{
+    return __builtin_bswap32(value);
+}
+
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order in two unsigned short values.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+    uint32_t result;
+
+    result = ((value & 0xFF000000) >> 8) | ((value & 0x00FF0000) << 8) |
+             ((value & 0x0000FF00) >> 8) | ((value & 0x000000FF) << 8);
+
+    return (result);
+}
+
+
+/**
+  \brief   Reverse byte order in signed short value
+  \details Reverses the byte order in a signed short value with sign extension to integer.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+    return (short)(((value & 0xFF00) >> 8) | ((value & 0x00FF) << 8));
+}
+
+
+/**
+  \brief   Rotate Right in unsigned value (32 bit)
+  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+  \param [in]    op1  Value to rotate
+  \param [in]    op2  Number of Bits to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+    return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+  \brief   Breakpoint
+  \details Causes the processor to enter Debug state
+           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+ */
+__ALWAYS_STATIC_INLINE void __BKPT(void)
+{
+    __ASM volatile("ebreak");
+}
+
+/**
+  \brief   Reverse bit order of value
+  \details Reverses the bit order of the given value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+    uint32_t result;
+
+    int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
+
+    result = value;                      /* r will be reversed bits of v; first get LSB of v */
+
+    for (value >>= 1U; value; value >>= 1U) {
+        result <<= 1U;
+        result |= value & 1U;
+        s--;
+    }
+
+    result <<= s;                        /* shift when v's highest bits are zero */
+
+    return (result);
+}
+
+
+/**
+  \brief   Count leading zeros
+  \details Counts the number of leading zeros of a data value.
+  \param [in]  value  Value to count the leading zeros
+  \return             number of leading zeros in value
+ */
+#define __CLZ             __builtin_clz
+/**
+  \details This function saturates a signed value.
+  \param [in]    x   Value to be saturated
+  \param [in]    y   Bit position to saturate to [1..32]
+  \return            Saturated value.
+ */
+__ALWAYS_STATIC_INLINE int32_t __SSAT(int32_t x, uint32_t y)
+{
+    int32_t posMax, negMin;
+    uint32_t i;
+
+    posMax = 1;
+
+    for (i = 0; i < (y - 1); i++) {
+        posMax = posMax * 2;
+    }
+
+    if (x > 0) {
+        posMax = (posMax - 1);
+
+        if (x > posMax) {
+            x = posMax;
+        }
+
+//    x &= (posMax * 2 + 1);
+    } else {
+        negMin = -posMax;
+
+        if (x < negMin) {
+            x = negMin;
+        }
+
+//    x &= (posMax * 2 - 1);
+    }
+
+    return (x);
+}
+
+/**
+  \brief   Unsigned Saturate
+  \details Saturates an unsigned value.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Unsigned Saturate for internal use
+  \details Saturates an unsigned value, should not call directly.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __IUSAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if (value & 0x80000000) { /* only overflow set bit-31 */
+        result = 0;
+    } else if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Rotate Right with Extend
+  \details This function moves each bit of a bitstring right by one bit.
+           The carry input is shifted in at the left end of the bitstring.
+  \note    carry input will always 0.
+  \param [in]    op1  Value to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RRX(uint32_t op1)
+{
+    return 0;
+}
+
+/**
+  \brief   LDRT Unprivileged (8 bit)
+  \details Executes a Unprivileged LDRT instruction for 8 bit value.
+  \param [in]    addr  Pointer to location
+  \return             value of type uint8_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lb %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return ((uint8_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (16 bit)
+  \details Executes a Unprivileged LDRT instruction for 16 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint16_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lh %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return ((uint16_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (32 bit)
+  \details Executes a Unprivileged LDRT instruction for 32 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint32_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lw %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return (result);
+}
+
+
+/**
+  \brief   STRT Unprivileged (8 bit)
+  \details Executes a Unprivileged STRT instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+    __ASM volatile("sb %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (16 bit)
+  \details Executes a Unprivileged STRT instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+    __ASM volatile("sh %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (32 bit)
+  \details Executes a Unprivileged STRT instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+    __ASM volatile("sw %1, 0(%0)" :: "r"(addr), "r"(value) : "memory");
+}
+
+/*@}*/ /* end of group CSI_Core_InstructionInterface */
+
+/* ###################  Compiler specific Intrinsics  ########################### */
+/** \defgroup CSI_SIMD_intrinsics CSI SIMD Intrinsics
+  Access to dedicated SIMD instructions \n
+  Single Instruction Multiple Data (SIMD) extensions are provided to simplify development of application software. SIMD extensions increase the processing capability without materially increasing the power consumption. The SIMD extensions are completely transparent to the operating system (OS), allowing existing OS ports to be used.
+
+  @{
+*/
+
+/**
+  \brief   Halfword packing instruction. Combines bits[15:0] of val1 with bits[31:16]
+           of val2 levitated with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be left-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for left-shifting val2. Value range [0..31].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val1[15:0]              \n
+                 res[31:16] = val2[31:16] << val3
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHBT(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0x0000FFFF) | (((int32_t)(val2) << val3) & (int32_t)0xFFFF0000));
+}
+
+/**
+  \brief   Halfword packing instruction. Combines bits[31:16] of val1 with bits[15:0]
+           of val2 right-shifted with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be right-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for right-shifting val2. Value range [1..32].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val2[15:0] >> val3        \n
+                 res[31:16] = val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHTB(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0xFFFF0000) | (((int32_t)(val2) >> val3) & (int32_t)0x0000FFFF));
+}
+
+/**
+  \brief   Dual 16-bit signed saturate.
+  \details This function saturates a signed value.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:\n
+                 the signed saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the signed saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAT16(int32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT((((int32_t)x << 16) >> 16), y) & (int32_t)0x0000FFFF;
+    s = __SSAT((((int32_t)x) >> 16), y) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturate.
+  \details This function enables you to saturate two signed 16-bit values to a selected unsigned range.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the saturation of the two signed 16-bit values, as non-negative values:
+                 the saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT16(uint32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT(((x << 16) >> 16), y) & 0x0000FFFF;
+    s = __IUSAT(((x) >> 16), y) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating addition.
+  \details This function enables you to perform four 8-bit integer additions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) + (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating addition.
+  \details This function enables you to perform four unsigned 8-bit integer additions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) + ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) + ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) + ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) + ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition.
+  \details This function performs four 8-bit signed integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition.
+  \details This function performs four unsigned 8-bit integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) + ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) + ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) + ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) + ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating subtract.
+  \details This function enables you to perform four 8-bit integer subtractions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) - (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating subtraction.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit unsigned integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) - ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) - ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) - ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) - ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed subtraction.
+  \details This function enables you to perform four 8-bit signed integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtract.
+  \details This function enables you to perform four 8-bit unsigned integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences together, returning the result as a single unsigned integer.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.\n
+                 The sum is returned as a single unsigned integer.
+  \remark
+                 absdiff1   = val1[7:0]   - val2[7:0]        \n
+                 absdiff2   = val1[15:8]  - val2[15:8]       \n
+                 absdiff3   = val1[23:16] - val2[23:16]      \n
+                 absdiff4   = val1[31:24] - val2[31:24]      \n
+                 res[31:0]  = absdiff1 + absdiff2 + absdiff3 + absdiff4
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return (u + t + s + r);
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference with 32-bit accumulate.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences to a 32-bit accumulate operand.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \param [in]  sum  accumulation value.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:
+                 the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 absdiff1 = val1[7:0]   - val2[7:0]        \n
+                 absdiff2 = val1[15:8]  - val2[15:8]       \n
+                 absdiff3 = val1[23:16] - val2[23:16]      \n
+                 absdiff4 = val1[31:24] - val2[31:24]      \n
+                 sum = absdiff1 + absdiff2 + absdiff3 + absdiff4 \n
+                 res[31:0] = sum[31:0] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USADA8(uint32_t x, uint32_t y, uint32_t sum)
+{
+    int32_t r, s, t, u;
+
+#ifdef __cplusplus
+    r = (abs((long long)((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs((long long)((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs((long long)((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs((long long)((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#else
+    r = (abs(((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs(((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs(((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs(((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#endif
+    return (u + t + s + r + sum);
+}
+
+/**
+  \brief   Dual 16-bit saturating addition.
+  \details This function enables you to perform two 16-bit integer arithmetic additions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition.
+  \details This function enables you to perform two unsigned 16-bit integer additions, saturating
+           the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The results are saturated to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition.
+  \details This function enables you to perform two 16-bit signed integer additions.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition
+  \details This function enables you to perform two 16-bit unsigned integer additions.
+  \param [in]    x   first two 16-bit summands for each addition.
+  \param [in]    y   second two 16-bit summands for each addition.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = (((x << 16) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+
+/**
+  \brief   Dual 16-bit signed addition with halved results.
+  \details This function enables you to perform two signed 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) + ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) + ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) + ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) + ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit saturating subtract.
+  \details This function enables you to perform two 16-bit integer subtractions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions,
+           saturating the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit operands for each subtraction.
+  \param [in]    y   second two 16-bit operands for each subtraction.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction.
+  \details This function enables you to perform two 16-bit signed integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract.
+  \details This function enables you to perform two 16-bit unsigned integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction with halved results.
+  \details This function enables you to perform two signed 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtraction with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) - ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) - ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) - ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) - ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit add and subtract with exchange.
+  \details This function enables you to exchange the halfwords of the one operand,
+           then add the high halfwords and subtract the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition and subtraction with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and
+           perform one unsigned 16-bit integer addition and one unsigned 16-bit subtraction,
+           saturating the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit addition and subtraction with exchange.
+  \details It enables you to exchange the halfwords of the second operand, add the high halfwords
+           and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with exchange.
+  \details This function enables you to exchange the two halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition and subtraction with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one
+           signed 16-bit integer addition and one signed 16-bit subtraction, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords, halving the results.
+  \param [in]    x   first operand for the subtraction in the low halfword, and
+                     the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword, and
+                     the second operand for the addition in the low halfword.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of one operand,
+           then subtract the high halfwords and add the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction and addition with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and perform
+           one unsigned 16-bit integer subtraction and one unsigned 16-bit addition, saturating
+           the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with exchange.
+  \details This function enables you to exchange the two halfwords of one operand and perform one
+           16-bit integer subtraction and one 16-bit addition.
+  \param [in]    x   first operand for the addition in the low halfword, and the first operand
+                     for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and the second
+                     operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one signed
+           16-bit integer subtraction and one signed 16-bit addition, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction and addition with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords, halving the results.
+  \param [in]    x   first operand for the addition in the low halfword, and
+                     the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and
+                     the second operand for the subtraction in the low halfword.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, subtracting
+           one of the products from the other. The halfwords of the second operand are exchanged
+           before performing the arithmetic. This produces top * bottom and bottom * top multiplication.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSDX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply with exchange.
+  \details This function enables you to perform two 16-bit signed multiplications with exchanged
+           halfwords of the second operand, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications with exchanged halfwords of the second operand.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUADX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+
+/**
+  \brief   Saturating add.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 + SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QADD(int32_t x, int32_t y)
+{
+    int32_t result;
+
+    if (y >= 0) {
+        if ((int32_t)((uint32_t)x + (uint32_t)y) >= x) {
+            result = x + y;
+        } else {
+            result = 0x7FFFFFFF;
+        }
+    } else {
+        if ((int32_t)((uint32_t)x + (uint32_t)y) < x) {
+            result = x + y;
+        } else {
+            result = 0x80000000;
+        }
+    }
+
+    return result;
+}
+
+/**
+  \brief   Saturating subtract.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 - SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QSUB(int32_t x, int32_t y)
+{
+    int64_t tmp;
+    int32_t result;
+
+    tmp = (int64_t)x - (int64_t)y;
+
+    if (tmp > 0x7fffffff) {
+        tmp = 0x7fffffff;
+    } else if (tmp < (-2147483647 - 1)) {
+        tmp = -2147483647 - 1;
+    }
+
+    result = tmp;
+    return result;
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications,
+           adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLAD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Pre-exchanged dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications with exchanged
+           halfwords of the second operand, adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication with exchanged halfwords of the second
+                 operand added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLADX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to perform two 16-bit signed multiplications, take the
+           difference of the products, subtracting the high halfword product from the low
+           halfword product, and add the difference to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]       \n
+                 p2 = val1[31:16] * val2[31:16]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to exchange the halfwords in the second operand, then perform two 16-bit
+           signed multiplications. The difference of the products is added to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSDX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 64-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications, adding both results
+           to a 64-bit accumulate operand. Overflow is only possible as a result of the 64-bit addition.
+           This overflow is not detected if it occurs. Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange with single 64-bit accumulator.
+  \details This function enables you to exchange the halfwords of the second operand, and perform two
+           signed 16-bit multiplications, adding both results to a 64-bit accumulate operand. Overflow
+           is only possible as a result of the 64-bit addition. This overflow is not detected if it occurs.
+           Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   dual 16-bit signed multiply subtract with 64-bit accumulate.
+  \details This function It enables you to perform two 16-bit signed multiplications, take the difference
+           of the products, subtracting the high halfword product from the low halfword product, and add the
+           difference to a 64-bit accumulate operand. Overflow cannot occur during the multiplications or the
+           subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow is not
+           detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 64-bit accumulate.
+  \details This function enables you to exchange the halfwords of the second operand, perform two 16-bit multiplications,
+           adding the difference of the products to a 64-bit accumulate operand. Overflow cannot occur during the
+           multiplications or the subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow
+           is not detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]      \n
+                 p2 = val1[31:16] * val2[15:0]       \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   32-bit signed multiply with 32-bit truncated accumulator.
+  \details This function enables you to perform a signed 32-bit multiplications, adding the most
+           significant 32 bits of the 64-bit result to a 32-bit accumulate operand.
+  \param [in]    x   first operand for multiplication.
+  \param [in]    y   second operand for multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of multiplication (most significant 32 bits) is added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p = val1 * val2      \n
+                 res[31:0] = p[63:32] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMMLA(int32_t x, int32_t y, int32_t sum)
+{
+    return (uint32_t)((int32_t)((int64_t)((int64_t)x * (int64_t)y) >> 32) + sum);
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply.
+  \details This function enables you to perform two 16-bit signed multiplications, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUAD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, taking the difference
+           of the products by subtracting the high halfword product from the low halfword product.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual extracted 8-bit to 16-bit signed addition.
+  \details This function enables you to extract two 8-bit values from the second operand (at bit positions
+           [7:0] and [23:16]), sign-extend them to 16-bits each, and add the results to the first operand.
+  \param [in]    x   values added to the sign-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and sign-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and sign-extended prior to the addition.
+  \remark
+                 res[15:0]  = val1[15:0] + SignExtended(val2[7:0])      \n
+                 res[31:16] = val1[31:16] + SignExtended(val2[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)((((((int32_t)y << 24) >> 24) + (((int32_t)x << 16) >> 16)) & (int32_t)0x0000FFFF) |
+                       (((((int32_t)y <<  8) >>  8)  + (((int32_t)x >> 16) << 16)) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Extracted 16-bit to 32-bit unsigned addition.
+  \details This function enables you to extract two 8-bit values from one operand, zero-extend
+           them to 16 bits each, and add the results to two 16-bit values from another operand.
+  \param [in]    x   values added to the zero-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and zero-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and zero-extended prior to the addition.
+  \remark
+                 res[15:0]  = ZeroExt(val2[7:0]   to 16 bits) + val1[15:0]      \n
+                 res[31:16] = ZeroExt(val2[31:16] to 16 bits) + val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((y << 24) >> 24) + ((x << 16) >> 16)) & 0x0000FFFF) |
+                       ((((y <<  8) >>  8) + ((x >> 16) << 16)) & 0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and sign extend each to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and sign-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be sign-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTB16(uint32_t x)
+{
+    return ((uint32_t)(((((int32_t)x << 24) >> 24) & (int32_t)0x0000FFFF) |
+                       ((((int32_t)x <<  8) >>  8) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and zero-extend to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and zero-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be zero-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTB16(uint32_t x)
+{
+    return ((uint32_t)((((x << 24) >> 24) & 0x0000FFFF) |
+                       (((x <<  8) >>  8) & 0xFFFF0000)));
+}
+
+#endif /* _CSI_RV32_GCC_H_ */

+ 3271 - 0
lib/sec_library/include/core/csi_rv64_gcc.h

@@ -0,0 +1,3271 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     csi_rv64_gcc.h
+ * @brief    CSI Header File for GCC.
+ * @version  V1.0
+ * @date     01. Sep 2018
+ ******************************************************************************/
+
+#ifndef _CSI_RV64_GCC_H_
+#define _CSI_RV64_GCC_H_
+
+#include <stdlib.h>
+
+#ifndef __ASM
+#define __ASM                   __asm     /*!< asm keyword for GNU Compiler */
+#endif
+
+#ifndef __INLINE
+#define __INLINE                inline    /*!< inline keyword for GNU Compiler */
+#endif
+
+#ifndef __ALWAYS_STATIC_INLINE
+#define __ALWAYS_STATIC_INLINE  __attribute__((always_inline)) static inline
+#endif
+
+#ifndef __STATIC_INLINE
+#define __STATIC_INLINE         static inline
+#endif
+
+#ifndef __NO_RETURN
+#define __NO_RETURN             __attribute__((__noreturn__))
+#endif
+
+#ifndef __USED
+#define __USED                  __attribute__((used))
+#endif
+
+#ifndef __WEAK
+#define __WEAK                  __attribute__((weak))
+#endif
+
+#ifndef __PACKED
+#define __PACKED                __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_STRUCT
+#define __PACKED_STRUCT         struct __attribute__((packed, aligned(1)))
+#endif
+
+#ifndef __PACKED_UNION
+#define __PACKED_UNION          union __attribute__((packed, aligned(1)))
+#endif
+
+
+/* ###########################  Core Function Access  ########################### */
+/** \ingroup  CSI_Core_FunctionInterface
+    \defgroup CSI_Core_RegAccFunctions CSI Core Register Access Functions
+  @{
+ */
+/**
+  \brief   Enable IRQ Interrupts
+  \details Enables IRQ interrupts by setting the IE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_irq(void)
+{
+    __ASM volatile("csrs mstatus, 8");
+    __ASM volatile("li a0, 0x888");
+    __ASM volatile("csrs mie, a0");
+
+}
+
+/**
+  \brief   Enable supervisor IRQ Interrupts
+  \details Enables IRQ interrupts by setting the IE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_supervisor_irq(void)
+{
+    __ASM volatile("csrs sstatus, 2");
+    __ASM volatile("li a0, 0x222");
+    __ASM volatile("csrs sie, a0");
+}
+
+/**
+  \brief   Disable IRQ Interrupts
+  \details Disables IRQ interrupts by clearing the IE-bit in the PSR.
+  Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_irq(void)
+{
+    __ASM volatile("csrc mstatus, 8");
+}
+
+/**
+  \brief   Disable supervisor IRQ Interrupts
+  \details Disables supervisor IRQ interrupts by clearing the IE-bit in the PSR.
+  Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_supervisor_irq(void)
+{
+    __ASM volatile("csrc sstatus, 2");
+}
+
+/**
+  \brief   Get MXSTATUS
+  \details Returns the content of the MXSTATUS Register.
+  \return               MXSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MXSTATUS(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mxstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get SXSTATUS
+  \details Returns the content of the SXSTATUS Register.
+  \return               SXSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SXSTATUS(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, sxstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get CPU WORK MODE
+  \details Returns CPU WORK MODE.
+  \return  CPU WORK MODE
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_CPU_WORK_MODE(void)
+{
+    uint64_t result;
+    __ASM volatile("csrr %0, sxstatus" : "=r"(result));
+    return ((result >> 30U) & 0x3U);
+}
+
+/**
+  \brief   Get SATP
+  \details Returns the content of the SATP Register.
+  \return               SATP Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SATP(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, satp" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set SATP
+  \details Writes the given value to the SATP Register.
+  \param [in]    satp  SATP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_SATP(uint64_t satp)
+{
+    __ASM volatile("csrw satp, %0" : : "r"(satp));
+}
+
+/**
+  \brief   Set MEPC
+  \details Writes the given value to the MEPC Register.
+  \param [in]    mstatus  MEPC Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MEPC(uint64_t mepc)
+{
+    __ASM volatile("csrw mepc, %0" : : "r"(mepc));
+}
+
+
+/**
+  \brief   Set MXSTATUS
+  \details Writes the given value to the MXSTATUS Register.
+  \param [in]    mxstatus  MXSTATUS Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MXSTATUS(uint64_t mxstatus)
+{
+    __ASM volatile("csrw mxstatus, %0" : : "r"(mxstatus));
+}
+
+/**
+  \brief   Get MSTATUS
+  \details Returns the content of the MSTATUS Register.
+  \return               MSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MSTATUS(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MSTATUS
+  \details Writes the given value to the MSTATUS Register.
+  \param [in]    mstatus  MSTATUS Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSTATUS(uint64_t mstatus)
+{
+    __ASM volatile("csrw mstatus, %0" : : "r"(mstatus));
+}
+
+/**
+  \brief   Get MCOR
+  \details Returns the content of the MCOR Register.
+  \return               MCOR Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MCOR(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mcor" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MCOR
+  \details Writes the given value to the MCOR Register.
+  \param [in]    mstatus  MCOR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MCOR(uint64_t mcor)
+{
+    __ASM volatile("csrw mcor, %0" : : "r"(mcor));
+}
+
+/**
+  \brief   Get MHCR
+  \details Returns the content of the MHCR Register.
+  \return               MHCR Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MHCR(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mhcr" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MHCR
+  \details Writes the given value to the MHCR Register.
+  \param [in]    mstatus  MHCR Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MHCR(uint64_t mhcr)
+{
+    __ASM volatile("csrw mhcr, %0" : : "r"(mhcr));
+}
+
+/**
+  \brief   Get MHINT
+  \details Returns the content of the MHINT Register.
+  \return               MHINT Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MHINT(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mhint" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MHINT
+  \details Writes the given value to the MHINT Register.
+  \param [in]    mstatus  MHINT Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MHINT(uint64_t mhint)
+{
+    __ASM volatile("csrw mhint, %0" : : "r"(mhint));
+}
+
+/**
+  \brief   Get MCCR2
+  \details Returns the content of the MCCR2 Register.
+  \return               MCCR2 Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MCCR2(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mccr2" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MCCR2
+  \details Writes the given value to the MCCR2 Register.
+  \param [in]    mstatus  MCCR2 Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MCCR2(uint64_t mccr2)
+{
+    __ASM volatile("csrw mccr2, %0" : : "r"(mccr2));
+}
+
+/**
+  \brief   Get MISA Register
+  \details Returns the content of the MISA Register.
+  \return               MISA Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MISA(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, misa" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MISA
+  \details Writes the given value to the MISA Register.
+  \param [in]    misa  MISA Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MISA(uint64_t misa)
+{
+    __ASM volatile("csrw misa, %0" : : "r"(misa));
+}
+
+/**
+  \brief   Get MIE Register
+  \details Returns the content of the MIE Register.
+  \return               MIE Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MIE(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mie" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MIE
+  \details Writes the given value to the MIE Register.
+  \param [in]    mie  MIE Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MIE(uint64_t mie)
+{
+    __ASM volatile("csrw mie, %0" : : "r"(mie));
+}
+
+/**
+  \brief   Get MTVEC Register
+  \details Returns the content of the MTVEC Register.
+  \return               MTVEC Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MTVEC(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mtvec" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MTVEC
+  \details Writes the given value to the MTVEC Register.
+  \param [in]    mtvec  MTVEC Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MTVEC(uint64_t mtvec)
+{
+    __ASM volatile("csrw mtvec, %0" : : "r"(mtvec));
+}
+
+/**
+  \brief   Set MTVT
+  \details Writes the given value to the MTVT Register.
+  \param [in]    mtvt  MTVT Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MTVT(uint64_t mtvt)
+{
+    __ASM volatile("csrw mtvt, %0" : : "r"(mtvt));
+}
+
+/**
+  \brief   Get MTVT Register
+  \details Returns the content of the MTVT Register.
+  \return               MTVT Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MTVT(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mtvt" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get SP
+  \details Returns the content of the SP Register.
+  \return               SP Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SP(void)
+{
+    uint64_t result;
+
+    __ASM volatile("mv %0, sp" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set SP
+  \details Writes the given value to the SP Register.
+  \param [in]    sp  SP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_SP(uint64_t sp)
+{
+    __ASM volatile("mv sp, %0" : : "r"(sp): "sp");
+}
+
+/**
+  \brief   Get MSCRATCH Register
+  \details Returns the content of the MSCRATCH Register.
+  \return               MSCRATCH Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MSCRATCH(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mscratch" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MSCRATCH
+  \details Writes the given value to the MSCRATCH Register.
+  \param [in]    mscratch  MSCRATCH Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MSCRATCH(uint64_t mscratch)
+{
+    __ASM volatile("csrw mscratch, %0" : : "r"(mscratch));
+}
+
+/**
+  \brief   Get MCAUSE Register
+  \details Returns the content of the MCAUSE Register.
+  \return               MCAUSE Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MCAUSE(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mcause" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get SCAUSE Register
+  \details Returns the content of the SCAUSE Register.
+  \return               SCAUSE Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SCAUSE(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, scause" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MNXTI Register
+  \details Returns the content of the MNXTI Register.
+  \return               MNXTI Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MNXTI(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mnxti" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MNXTI
+  \details Writes the given value to the MNXTI Register.
+  \param [in]    mnxti  MNXTI Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MNXTI(uint64_t mnxti)
+{
+    __ASM volatile("csrw mnxti, %0" : : "r"(mnxti));
+}
+
+/**
+  \brief   Get MINTSTATUS Register
+  \details Returns the content of the MINTSTATUS Register.
+  \return               MINTSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MINTSTATUS(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mintstatus" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MTVAL Register
+  \details Returns the content of the MTVAL Register.
+  \return               MTVAL Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MTVAL(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mtval" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MIP Register
+  \details Returns the content of the MIP Register.
+  \return               MIP Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MIP(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mip" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Set MIP
+  \details Writes the given value to the MIP Register.
+  \param [in]    mip  MIP Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_MIP(uint64_t mip)
+{
+    __ASM volatile("csrw mip, %0" : : "r"(mip));
+}
+
+/**
+  \brief   Get MCYCLEL Register
+  \details Returns the content of the MCYCLEL Register.
+  \return               MCYCLE Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MCYCLE(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mcycle" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MCYCLEH Register
+  \details Returns the content of the MCYCLEH Register.
+  \return               MCYCLEH Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MCYCLEH(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mcycleh" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MINSTRET Register
+  \details Returns the content of the MINSTRET Register.
+  \return               MINSTRET Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MINSTRET(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, minstret" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MINSTRETH Register
+  \details Returns the content of the MINSTRETH Register.
+  \return               MINSTRETH Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MINSTRETH(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, minstreth" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MVENDORID Register
+  \details Returns the content of the MVENDROID Register.
+  \return               MVENDORID Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MVENDORID(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mvendorid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MARCHID Register
+  \details Returns the content of the MARCHID Register.
+  \return               MARCHID Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MARCHID(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, marchid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MIMPID Register
+  \details Returns the content of the MIMPID Register.
+  \return               MIMPID Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MIMPID(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mimpid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get MHARTID Register
+  \details Returns the content of the MHARTID Register.
+  \return               MHARTID Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MHARTID(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, mhartid" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPCFGx Register
+  \details Returns the content of the PMPCFGx Register.
+  \return               PMPCFGx Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG0(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpcfg0" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG1(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpcfg1" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG2(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpcfg2" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG3(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpcfg3" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPxCFG Register by index
+  \details Returns the content of the PMPxCFG Register.
+  \param [in]    idx    PMP region index
+  \return               PMPxCFG Register value
+ */
+__STATIC_INLINE uint8_t __get_PMPxCFG(uint64_t idx)
+{
+    uint64_t pmpcfgx = 0;
+
+    if (idx < 4) {
+        pmpcfgx = __get_PMPCFG0();
+    } else if (idx >= 4 && idx < 8) {
+        idx -= 4;
+        pmpcfgx = __get_PMPCFG1();
+    } else if (idx >= 8 && idx < 12) {
+        idx -= 8;
+        pmpcfgx = __get_PMPCFG2();
+    } else if (idx >= 12 && idx < 16) {
+        idx -= 12;
+        pmpcfgx = __get_PMPCFG3();
+    } else {
+        return 0;
+    }
+
+    return (uint8_t)((pmpcfgx & (0xFF << (idx << 3))) >> (idx << 3));
+}
+
+/**
+  \brief   Set PMPCFGx
+  \details Writes the given value to the PMPCFGx Register.
+  \param [in]    pmpcfg  PMPCFGx Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PMPCFG0(uint64_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg0, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG1(uint64_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg1, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG2(uint64_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg2, %0" : : "r"(pmpcfg));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPCFG3(uint64_t pmpcfg)
+{
+    __ASM volatile("csrw pmpcfg3, %0" : : "r"(pmpcfg));
+}
+
+/**
+  \brief   Set PMPxCFG by index
+  \details Writes the given value to the PMPxCFG Register.
+  \param [in]    idx      PMPx region index
+  \param [in]    pmpxcfg  PMPxCFG Register value to set
+ */
+__STATIC_INLINE void __set_PMPxCFG(uint64_t idx, uint8_t pmpxcfg)
+{
+    uint64_t pmpcfgx = 0;
+
+    if (idx < 4) {
+        pmpcfgx = __get_PMPCFG0();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
+        __set_PMPCFG0(pmpcfgx);
+    } else if (idx >= 4 && idx < 8) {
+        idx -= 4;
+        pmpcfgx = __get_PMPCFG1();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
+        __set_PMPCFG1(pmpcfgx);
+    } else if (idx >= 8 && idx < 12) {
+        idx -= 8;
+        pmpcfgx = __get_PMPCFG2();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
+        __set_PMPCFG2(pmpcfgx);
+    } else if (idx >= 12 && idx < 16) {
+        idx -= 12;
+        pmpcfgx = __get_PMPCFG3();
+        pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
+        __set_PMPCFG3(pmpcfgx);
+    } else {
+        return;
+    }
+}
+
+/**
+  \brief   Get PMPADDRx Register
+  \details Returns the content of the PMPADDRx Register.
+  \return               PMPADDRx Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR0(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr0" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR1(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr1" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR2(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr2" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR3(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr3" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR4(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr4" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR5(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr5" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR6(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr6" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR7(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr7" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR8(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr8" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR9(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr9" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR10(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr10" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR11(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr11" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR12(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr12" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR13(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr13" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR14(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr14" : "=r"(result));
+    return (result);
+}
+
+__ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR15(void)
+{
+    uint64_t result;
+
+    __ASM volatile("csrr %0, pmpaddr15" : "=r"(result));
+    return (result);
+}
+
+/**
+  \brief   Get PMPADDRx Register by index
+  \details Returns the content of the PMPADDRx Register.
+  \param [in]    idx    PMP region index
+  \return               PMPADDRx Register value
+ */
+__STATIC_INLINE uint64_t __get_PMPADDRx(uint64_t idx)
+{
+    switch (idx) {
+        case 0:
+            return __get_PMPADDR0();
+
+        case 1:
+            return __get_PMPADDR1();
+
+        case 2:
+            return __get_PMPADDR2();
+
+        case 3:
+            return __get_PMPADDR3();
+
+        case 4:
+            return __get_PMPADDR4();
+
+        case 5:
+            return __get_PMPADDR5();
+
+        case 6:
+            return __get_PMPADDR6();
+
+        case 7:
+            return __get_PMPADDR7();
+
+        case 8:
+            return __get_PMPADDR8();
+
+        case 9:
+            return __get_PMPADDR9();
+
+        case 10:
+            return __get_PMPADDR10();
+
+        case 11:
+            return __get_PMPADDR11();
+
+        case 12:
+            return __get_PMPADDR12();
+
+        case 13:
+            return __get_PMPADDR13();
+
+        case 14:
+            return __get_PMPADDR14();
+
+        case 15:
+            return __get_PMPADDR15();
+
+        default:
+            return 0;
+    }
+}
+
+/**
+  \brief   Set PMPADDRx
+  \details Writes the given value to the PMPADDRx Register.
+  \param [in]    pmpaddr  PMPADDRx Register value to set
+ */
+__ALWAYS_STATIC_INLINE void __set_PMPADDR0(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr0, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR1(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr1, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR2(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr2, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR3(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr3, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR4(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr4, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR5(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr5, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR6(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr6, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR7(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr7, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR8(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr8, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR9(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr9, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR10(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr10, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR11(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr11, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR12(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr12, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR13(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr13, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR14(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr14, %0" : : "r"(pmpaddr));
+}
+
+__ALWAYS_STATIC_INLINE void __set_PMPADDR15(uint64_t pmpaddr)
+{
+    __ASM volatile("csrw pmpaddr15, %0" : : "r"(pmpaddr));
+}
+
+/**
+  \brief   Set PMPADDRx by index
+  \details Writes the given value to the PMPADDRx Register.
+  \param [in]    idx      PMP region index
+  \param [in]    pmpaddr  PMPADDRx Register value to set
+ */
+__STATIC_INLINE void __set_PMPADDRx(uint64_t idx, uint64_t pmpaddr)
+{
+    switch (idx) {
+        case 0:
+            __set_PMPADDR0(pmpaddr);
+            break;
+
+        case 1:
+            __set_PMPADDR1(pmpaddr);
+            break;
+
+        case 2:
+            __set_PMPADDR2(pmpaddr);
+            break;
+
+        case 3:
+            __set_PMPADDR3(pmpaddr);
+            break;
+
+        case 4:
+            __set_PMPADDR4(pmpaddr);
+            break;
+
+        case 5:
+            __set_PMPADDR5(pmpaddr);
+            break;
+
+        case 6:
+            __set_PMPADDR6(pmpaddr);
+            break;
+
+        case 7:
+            __set_PMPADDR7(pmpaddr);
+            break;
+
+        case 8:
+            __set_PMPADDR8(pmpaddr);
+            break;
+
+        case 9:
+            __set_PMPADDR9(pmpaddr);
+            break;
+
+        case 10:
+            __set_PMPADDR10(pmpaddr);
+            break;
+
+        case 11:
+            __set_PMPADDR11(pmpaddr);
+            break;
+
+        case 12:
+            __set_PMPADDR12(pmpaddr);
+            break;
+
+        case 13:
+            __set_PMPADDR13(pmpaddr);
+            break;
+
+        case 14:
+            __set_PMPADDR14(pmpaddr);
+            break;
+
+        case 15:
+            __set_PMPADDR15(pmpaddr);
+            break;
+
+        default:
+            return;
+    }
+}
+
+/**
+  \brief   Set MEDELEG Register
+  \details Writes the given value to the MEDELEG Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_MEDELEG(uint64_t x)
+{
+    asm volatile("csrw medeleg, %0"::"r"(x));
+}
+
+/**
+  \brief   Set MEDELEG Register
+  \details Writes the given value to the MEDELEG Register.
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MEDELEG(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, medeleg":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Set MIDELEG Register
+  \details Writes the given value to the MIDELEG Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_MIDELEG(uint64_t x)
+{
+    asm volatile("csrw mideleg, %0"::"r"(x));
+}
+
+/**
+  \brief   Get MIDELEG Register
+  \details Returns the content of the MIDELEG Register.
+  \return               MIDELEG Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_MIDELEG(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, mideleg":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Set SSTATUS Register
+  \details Writes the given value to the SSTATUS Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_SSTATUS(uint64_t x)
+{
+    asm volatile("csrw sstatus, %0"::"r"(x));
+}
+
+/**
+  \brief   Get SSTATUS Register
+  \details Returns the content of the SSTATUS Register.
+  \return               SSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SSTATUS(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, sstatus":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Set SXSTATUS Register
+  \details Writes the given value to the SXSTATUS Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_SXSTATUS(uint64_t x)
+{
+    asm volatile("csrw sxstatus, %0"::"r"(x));
+}
+
+/**
+  \brief   Get SXSTATUS Register
+  \details Returns the content of the SXSTATUS Register.
+  \return               SXSTATUS Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get__SXSTATUS(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, sxstatus":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Set SIE Register
+  \details Writes the given value to the SIE Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_SIE(uint64_t x)
+{
+    asm volatile("csrw sie, %0"::"r"(x));
+}
+
+/**
+  \brief   Get SIE Register
+  \details Returns the content of the SIE Register.
+  \return               SIE Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_SIE(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, sie":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Set STVAC Register
+  \details Writes the given value to the STVEC Register.
+ */
+__ALWAYS_STATIC_INLINE void __set_STVEC(uint64_t x)
+{
+    asm volatile("csrw stvec, %0"::"r"(x));
+}
+
+/**
+  \brief   Get STVAC Register
+  \details Returns the content of the STVAC Register.
+  \return               STVAC Register value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __get_STVEC(void)
+{
+    uint64_t x;
+    asm volatile("csrr %0, stvec":"=r"(x));
+    return x;
+}
+
+/**
+  \brief   Enable interrupts and exceptions
+  \details Enables interrupts and exceptions by setting the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __enable_excp_irq(void)
+{
+#ifdef CONFIG_MMU
+    __enable_supervisor_irq();
+#else
+    __enable_irq();
+#endif
+}
+
+
+/**
+  \brief   Disable interrupts and exceptions
+  \details Disables interrupts and exceptions by clearing the IE-bit and EE-bit in the PSR.
+           Can only be executed in Privileged modes.
+ */
+__ALWAYS_STATIC_INLINE void __disable_excp_irq(void)
+{
+#ifdef CONFIG_MMU
+    __disable_supervisor_irq();
+#else
+    __disable_irq();
+#endif
+}
+
+#define __CSI_GCC_OUT_REG(r) "=r" (r)
+#define __CSI_GCC_USE_REG(r) "r" (r)
+
+/**
+  \brief   No Operation
+  \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__ALWAYS_STATIC_INLINE void __NOP(void)
+{
+    __ASM volatile("nop");
+}
+
+
+/**
+  \brief   return from M-MODE
+  \details return from M-MODE.
+ */
+__ALWAYS_STATIC_INLINE void __MRET(void)
+{
+    __ASM volatile("mret");
+}
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WFI(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Wait For Interrupt
+  \details Wait For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __WAIT(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Doze For Interrupt
+  \details Doze For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __DOZE(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Stop For Interrupt
+  \details Stop For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
+ */
+__ALWAYS_STATIC_INLINE void __STOP(void)
+{
+    __ASM volatile("wfi");
+}
+
+/**
+  \brief   Instruction Synchronization Barrier
+  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+           so that all instructions following the ISB are fetched from cache or memory,
+           after the instruction has been completed.
+ */
+__ALWAYS_STATIC_INLINE void __ISB(void)
+{
+    __ASM volatile("fence");
+}
+
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+__ALWAYS_STATIC_INLINE void __DSB(void)
+{
+    __ASM volatile("fence");
+}
+
+/**
+  \brief   Data Synchronization Barrier
+  \details Acts as a special kind of Data Memory Barrier.
+           It completes when all explicit memory accesses before this instruction complete.
+ */
+__ALWAYS_STATIC_INLINE void __SYNC_IS(void)
+{
+    __ASM volatile("sync.is");
+}
+
+/**
+  \brief   Invalid all icache
+  \details invalid all icache.
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IALL(void)
+{
+    __ASM volatile("icache.iall");
+}
+
+/**
+  \brief   Invalid all cpu icache
+  \details invalid all cpu icache.
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IALLS(void)
+{
+    __ASM volatile("icache.ialls");
+}
+
+/**
+  \brief   Invalid Icache by phy addr
+  \details Invalid Icache by phy addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IPA(uint64_t addr)
+{
+    __ASM volatile("icache.ipa %0" : : "r"(addr));
+}
+
+/**
+  \brief   Invalid Icache by virt address
+  \details Invalid Icache by virt address
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __ICACHE_IVA(uint64_t addr)
+{
+    __ASM volatile("icache.iva %0" : : "r"(addr));
+}
+
+/**
+  \brief   Invalid all dcache
+  \details invalid all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_IALL(void)
+{
+    __ASM volatile("dcache.iall");
+}
+
+/**
+  \brief   Clear all dcache
+  \details clear all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CALL(void)
+{
+    __ASM volatile("dcache.call");
+}
+
+/**
+  \brief   Clear&invalid all dcache
+  \details clear & invalid all dcache.
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CIALL(void)
+{
+    __ASM volatile("dcache.ciall");
+}
+
+#if (__L2CACHE_PRESENT == 1U)
+/**
+  \brief   Invalid L2 cache
+  \details invalid L2 cache.
+ */
+__ALWAYS_STATIC_INLINE void __L2CACHE_IALL(void)
+{
+    __ASM volatile("l2cache.iall");
+}
+
+/**
+  \brief   Clear L2cache
+  \details clear L2cache.
+ */
+__ALWAYS_STATIC_INLINE void __L2CACHE_CALL(void)
+{
+    __ASM volatile("l2cache.call");
+}
+
+/**
+  \brief   Clear&invalid L2cache
+  \details clear & invalid L2cache.
+ */
+__ALWAYS_STATIC_INLINE void __L2CACHE_CIALL(void)
+{
+    __ASM volatile("l2cache.ciall");
+}
+#endif
+
+
+/**
+  \brief   Invalid Dcache by addr
+  \details Invalid Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_IPA(uint64_t addr)
+{
+    __ASM volatile("dcache.ipa %0" : : "r"(addr));
+}
+
+/**
+  \brief   Invalid Dcache by virt addr
+  \details Invalid Dcache by virt addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_IVA(uint64_t addr)
+{
+    __ASM volatile("dcache.iva %0" : : "r"(addr));
+}
+
+/**
+  \brief   Clear Dcache by addr
+  \details Clear Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CPA(uint64_t addr)
+{
+    __ASM volatile("dcache.cpa %0" : : "r"(addr));
+}
+
+/**
+  \brief   Clear Dcache by virt addr
+  \details Clear Dcache by virt addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CVA(uint64_t addr)
+{
+    __ASM volatile("dcache.cva %0" : : "r"(addr));
+}
+
+/**
+  \brief   Clear & Invalid Dcache by addr
+  \details Clear & Invalid Dcache by addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CIPA(uint64_t addr)
+{
+    __ASM volatile("dcache.cipa %0" : : "r"(addr));
+}
+
+/**
+  \brief   Clear & Invalid Dcache by virt addr
+  \details Clear & Invalid Dcache by virt addr.
+  \param [in] addr  operate addr
+ */
+__ALWAYS_STATIC_INLINE void __DCACHE_CIVA(uint64_t addr)
+{
+    __ASM volatile("dcache.civa %0" : : "r"(addr));
+}
+
+
+/**
+  \brief   Data Memory Barrier
+  \details Ensures the apparent order of the explicit memory operations before
+           and after the instruction, without ensuring their completion.
+ */
+__ALWAYS_STATIC_INLINE void __DMB(void)
+{
+    __ASM volatile("fence");
+}
+
+/**
+  \brief   Reverse byte order (32 bit)
+  \details Reverses the byte order in integer value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint64_t __REV(uint64_t value)
+{
+    return __builtin_bswap32(value);
+}
+
+
+/**
+  \brief   Reverse byte order (16 bit)
+  \details Reverses the byte order in two unsigned short values.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+    uint32_t result;
+
+    result = ((value & 0xFF000000) >> 8) | ((value & 0x00FF0000) << 8) |
+             ((value & 0x0000FF00) >> 8) | ((value & 0x000000FF) << 8);
+
+    return (result);
+}
+
+
+/**
+  \brief   Reverse byte order in signed short value
+  \details Reverses the byte order in a signed short value with sign extension to integer.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+    return (short)(((value & 0xFF00) >> 8) | ((value & 0x00FF) << 8));
+}
+
+
+/**
+  \brief   Rotate Right in unsigned value (32 bit)
+  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+  \param [in]    op1  Value to rotate
+  \param [in]    op2  Number of Bits to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+    return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+  \brief   Breakpoint
+  \details Causes the processor to enter Debug state
+           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+ */
+__ALWAYS_STATIC_INLINE void __BKPT(void)
+{
+    __ASM volatile("ebreak");
+}
+
+/**
+  \brief   Reverse bit order of value
+  \details Reverses the bit order of the given value.
+  \param [in]    value  Value to reverse
+  \return               Reversed value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+    uint32_t result;
+
+    int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
+
+    result = value;                      /* r will be reversed bits of v; first get LSB of v */
+
+    for (value >>= 1U; value; value >>= 1U) {
+        result <<= 1U;
+        result |= value & 1U;
+        s--;
+    }
+
+    result <<= s;                        /* shift when v's highest bits are zero */
+
+    return (result);
+}
+
+
+/**
+  \brief   Count leading zeros
+  \details Counts the number of leading zeros of a data value.
+  \param [in]  value  Value to count the leading zeros
+  \return             number of leading zeros in value
+ */
+#define __CLZ             __builtin_clz
+/**
+  \details This function saturates a signed value.
+  \param [in]    x   Value to be saturated
+  \param [in]    y   Bit position to saturate to [1..32]
+  \return            Saturated value.
+ */
+__ALWAYS_STATIC_INLINE int32_t __SSAT(int32_t x, uint32_t y)
+{
+    int32_t posMax, negMin;
+    uint32_t i;
+
+    posMax = 1;
+
+    for (i = 0; i < (y - 1); i++) {
+        posMax = posMax * 2;
+    }
+
+    if (x > 0) {
+        posMax = (posMax - 1);
+
+        if (x > posMax) {
+            x = posMax;
+        }
+
+//    x &= (posMax * 2 + 1);
+    } else {
+        negMin = -posMax;
+
+        if (x < negMin) {
+            x = negMin;
+        }
+
+//    x &= (posMax * 2 - 1);
+    }
+
+    return (x);
+}
+
+/**
+  \brief   Unsigned Saturate
+  \details Saturates an unsigned value.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Unsigned Saturate for internal use
+  \details Saturates an unsigned value, should not call directly.
+  \param [in]  value  Value to be saturated
+  \param [in]    sat  Bit position to saturate to (0..31)
+  \return             Saturated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __IUSAT(uint32_t value, uint32_t sat)
+{
+    uint32_t result;
+
+    if (value & 0x80000000) { /* only overflow set bit-31 */
+        result = 0;
+    } else if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
+        result = 0xFFFFFFFF >> (32 - sat);
+    } else {
+        result = value;
+    }
+
+    return (result);
+}
+
+/**
+  \brief   Rotate Right with Extend
+  \details This function moves each bit of a bitstring right by one bit.
+           The carry input is shifted in at the left end of the bitstring.
+  \note    carry input will always 0.
+  \param [in]    op1  Value to rotate
+  \return               Rotated value
+ */
+__ALWAYS_STATIC_INLINE uint32_t __RRX(uint32_t op1)
+{
+    return 0;
+}
+
+/**
+  \brief   LDRT Unprivileged (8 bit)
+  \details Executes a Unprivileged LDRT instruction for 8 bit value.
+  \param [in]    addr  Pointer to location
+  \return             value of type uint8_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lb %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return ((uint8_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (16 bit)
+  \details Executes a Unprivileged LDRT instruction for 16 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint16_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lh %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return ((uint16_t) result);    /* Add explicit type cast here */
+}
+
+
+/**
+  \brief   LDRT Unprivileged (32 bit)
+  \details Executes a Unprivileged LDRT instruction for 32 bit values.
+  \param [in]    addr  Pointer to location
+  \return        value of type uint32_t at (*ptr)
+ */
+__ALWAYS_STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+    uint32_t result;
+
+    __ASM volatile("lw %0, 0(%1)" : "=r"(result) : "r"(addr));
+
+    return (result);
+}
+
+
+/**
+  \brief   STRT Unprivileged (8 bit)
+  \details Executes a Unprivileged STRT instruction for 8 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+    __ASM volatile("sb %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (16 bit)
+  \details Executes a Unprivileged STRT instruction for 16 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+    __ASM volatile("sh %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
+}
+
+
+/**
+  \brief   STRT Unprivileged (32 bit)
+  \details Executes a Unprivileged STRT instruction for 32 bit values.
+  \param [in]  value  Value to store
+  \param [in]    addr  Pointer to location
+ */
+__ALWAYS_STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+    __ASM volatile("sw %1, 0(%0)" :: "r"(addr), "r"(value) : "memory");
+}
+
+/*@}*/ /* end of group CSI_Core_InstructionInterface */
+
+/* ###################  Compiler specific Intrinsics  ########################### */
+/** \defgroup CSI_SIMD_intrinsics CSI SIMD Intrinsics
+  Access to dedicated SIMD instructions \n
+  Single Instruction Multiple Data (SIMD) extensions are provided to simplify development of application software. SIMD extensions increase the processing capability without materially increasing the power consumption. The SIMD extensions are completely transparent to the operating system (OS), allowing existing OS ports to be used.
+
+  @{
+*/
+
+/**
+  \brief   Halfword packing instruction. Combines bits[15:0] of val1 with bits[31:16]
+           of val2 levitated with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be left-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for left-shifting val2. Value range [0..31].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val1[15:0]              \n
+                 res[31:16] = val2[31:16] << val3
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHBT(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0x0000FFFF) | (((int32_t)(val2) << val3) & (int32_t)0xFFFF0000));
+}
+
+/**
+  \brief   Halfword packing instruction. Combines bits[31:16] of val1 with bits[15:0]
+           of val2 right-shifted with the val3.
+  \details Combine a halfword from one register with a halfword from another register.
+           The second argument can be right-shifted before extraction of the halfword.
+  \param [in]    val1   first 16-bit operands
+  \param [in]    val2   second 16-bit operands
+  \param [in]    val3   value for right-shifting val2. Value range [1..32].
+  \return               the combination of halfwords.
+  \remark
+                 res[15:0]  = val2[15:0] >> val3        \n
+                 res[31:16] = val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __PKHTB(uint32_t val1, uint32_t val2, uint32_t val3)
+{
+    return ((((int32_t)(val1) << 0) & (int32_t)0xFFFF0000) | (((int32_t)(val2) >> val3) & (int32_t)0x0000FFFF));
+}
+
+/**
+  \brief   Dual 16-bit signed saturate.
+  \details This function saturates a signed value.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:\n
+                 the signed saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the signed saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAT16(int32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT((((int32_t)x << 16) >> 16), y) & (int32_t)0x0000FFFF;
+    s = __SSAT((((int32_t)x) >> 16), y) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturate.
+  \details This function enables you to saturate two signed 16-bit values to a selected unsigned range.
+  \param [in]    x   two signed 16-bit values to be saturated.
+  \param [in]    y   bit position for saturation, an integral constant expression in the range 1 to 16.
+  \return        the saturation of the two signed 16-bit values, as non-negative values:
+                 the saturation of the low halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the low halfword of the return value.\n
+                 the saturation of the high halfword in val1, saturated to the bit position specified in
+                 val2 and returned in the high halfword of the return value.
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAT16(uint32_t x, const uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT(((x << 16) >> 16), y) & 0x0000FFFF;
+    s = __IUSAT(((x) >> 16), y) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating addition.
+  \details This function enables you to perform four 8-bit integer additions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) + (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating addition.
+  \details This function enables you to perform four unsigned 8-bit integer additions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the saturated addition of the first byte of each operand in the first byte of the return value.\n
+                 the saturated addition of the second byte of each operand in the second byte of the return value.\n
+                 the saturated addition of the third byte of each operand in the third byte of the return value.\n
+                 the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) + ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) + ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) + ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) + ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition.
+  \details This function performs four 8-bit signed integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition.
+  \details This function performs four unsigned 8-bit integer additions.
+  \param [in]    x  first four 8-bit summands.
+  \param [in]    y  second four 8-bit summands.
+  \return        the addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the addition of the second bytes of each operand, in the second byte of the return value.\n
+                 the addition of the third bytes of each operand, in the third byte of the return value.\n
+                 the addition of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   + val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  + val2[15:8]       \n
+                 res[23:16] = val1[23:16] + val2[23:16]      \n
+                 res[31:24] = val1[31:24] + val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) + ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) + ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) + ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) + ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit saturating subtract.
+  \details This function enables you to perform four 8-bit integer subtractions,
+           saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __SSAT(((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
+    s = __SSAT(((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
+    t = __SSAT(((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)), 8) & (int32_t)0x000000FF;
+    u = __SSAT(((((int32_t)x) >> 24) - (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned saturating subtraction.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions,
+           saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the subtraction of the first byte of each operand in the first byte of the return value.\n
+                 the subtraction of the second byte of each operand in the second byte of the return value.\n
+                 the subtraction of the third byte of each operand in the third byte of the return value.\n
+                 the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
+                 The returned results are saturated to the 8-bit unsigned integer range 0 <= x <= 2^8 - 1.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = __IUSAT((((x << 24) >> 24) - ((y << 24) >> 24)), 8) & 0x000000FF;
+    s = __IUSAT((((x << 16) >> 24) - ((y << 16) >> 24)), 8) & 0x000000FF;
+    t = __IUSAT((((x <<  8) >> 24) - ((y <<  8) >> 24)), 8) & 0x000000FF;
+    u = __IUSAT((((x) >> 24) - ((y) >> 24)), 8) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed subtraction.
+  \details This function enables you to perform four 8-bit signed integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
+    s = ((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
+    t = ((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) & (int32_t)0x000000FF;
+    u = ((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtract.
+  \details This function enables you to perform four 8-bit unsigned integer subtractions.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = val1[7:0]   - val2[7:0]        \n
+                 res[15:8]  = val1[15:8]  - val2[15:8]       \n
+                 res[23:16] = val1[23:16] - val2[23:16]      \n
+                 res[31:24] = val1[31:24] - val2[31:24]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences together, returning the result as a single unsigned integer.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \return        the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.\n
+                 The sum is returned as a single unsigned integer.
+  \remark
+                 absdiff1   = val1[7:0]   - val2[7:0]        \n
+                 absdiff2   = val1[15:8]  - val2[15:8]       \n
+                 absdiff3   = val1[23:16] - val2[23:16]      \n
+                 absdiff4   = val1[31:24] - val2[31:24]      \n
+                 res[31:0]  = absdiff1 + absdiff2 + absdiff3 + absdiff4
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
+    s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
+    t = (((x <<  8) >> 24) - ((y <<  8) >> 24)) & 0x000000FF;
+    u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
+
+    return (u + t + s + r);
+}
+
+/**
+  \brief   Unsigned sum of quad 8-bit unsigned absolute difference with 32-bit accumulate.
+  \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
+           of the differences to a 32-bit accumulate operand.
+  \param [in]    x  first four 8-bit operands of each subtraction.
+  \param [in]    y  second four 8-bit operands of each subtraction.
+  \param [in]  sum  accumulation value.
+  \return        the sum of the absolute differences of the following bytes, added to the accumulation value:
+                 the subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the subtraction of the second bytes of each operand, in the second byte of the return value.\n
+                 the subtraction of the third bytes of each operand, in the third byte of the return value.\n
+                 the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
+  \remark
+                 absdiff1 = val1[7:0]   - val2[7:0]        \n
+                 absdiff2 = val1[15:8]  - val2[15:8]       \n
+                 absdiff3 = val1[23:16] - val2[23:16]      \n
+                 absdiff4 = val1[31:24] - val2[31:24]      \n
+                 sum = absdiff1 + absdiff2 + absdiff3 + absdiff4 \n
+                 res[31:0] = sum[31:0] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USADA8(uint32_t x, uint32_t y, uint32_t sum)
+{
+    int32_t r, s, t, u;
+
+#ifdef __cplusplus
+    r = (abs((long long)((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs((long long)((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs((long long)((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs((long long)((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#else
+    r = (abs(((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
+    s = (abs(((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
+    t = (abs(((x <<  8) >> 24) - ((y <<  8) >> 24))) & 0x000000FF;
+    u = (abs(((x) >> 24) - ((y) >> 24))) & 0x000000FF;
+#endif
+    return (u + t + s + r + sum);
+}
+
+/**
+  \brief   Dual 16-bit saturating addition.
+  \details This function enables you to perform two 16-bit integer arithmetic additions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition.
+  \details This function enables you to perform two unsigned 16-bit integer additions, saturating
+           the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated addition of the low halfwords, in the low halfword of the return value.\n
+                 the saturated addition of the high halfwords, in the high halfword of the return value.\n
+                 The results are saturated to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition.
+  \details This function enables you to perform two 16-bit signed integer additions.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition
+  \details This function enables you to perform two 16-bit unsigned integer additions.
+  \param [in]    x   first two 16-bit summands for each addition.
+  \param [in]    y   second two 16-bit summands for each addition.
+  \return        the addition of the low halfwords in the low halfword of the return value.\n
+                 the addition of the high halfwords in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[15:0]        \n
+                 res[31:16] = val1[31:16] + val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UADD16(uint32_t x, uint32_t y)
+{
+    int32_t r = 0, s = 0;
+
+    r = (((x << 16) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+
+/**
+  \brief   Dual 16-bit signed addition with halved results.
+  \details This function enables you to perform two signed 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer additions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved addition of the low halfwords, in the low halfword of the return value.\n
+                 the halved addition of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) + (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned addition with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer additions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved addition of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved addition of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved addition of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   + val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  + val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] + val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] + val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHADD8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) + ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) + ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) + ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) + ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit saturating subtract.
+  \details This function enables you to perform two 16-bit integer subtractions in parallel,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions,
+           saturating the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
+  \param [in]    x   first two 16-bit operands for each subtraction.
+  \param [in]    y   second two 16-bit operands for each subtraction.
+  \return        the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction.
+  \details This function enables you to perform two 16-bit signed integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract.
+  \details This function enables you to perform two 16-bit unsigned integer subtractions.
+  \param [in]    x   first two 16-bit operands of each subtraction.
+  \param [in]    y   second two 16-bit operands of each subtraction.
+  \return        the subtraction of the low halfword in the second operand from the low
+                 halfword in the first operand, in the low halfword of the return value. \n
+                 the subtraction of the high halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[15:0]        \n
+                 res[31:16] = val1[31:16] - val2[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction with halved results.
+  \details This function enables you to perform two signed 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction with halved results.
+  \details This function enables you to perform two unsigned 16-bit integer subtractions, halving the results.
+  \param [in]    x   first two 16-bit summands.
+  \param [in]    y   second two 16-bit summands.
+  \return        the halved subtraction of the low halfwords, in the low halfword of the return value.\n
+                 the halved subtraction of the high halfwords, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[15:0]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[31:16]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB16(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Quad 8-bit signed addition with halved results.
+  \details This function enables you to perform four signed 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = (((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
+    s = (((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
+    t = (((((int32_t)x <<  8) >> 24) - (((int32_t)y <<  8) >> 24)) >> 1) & (int32_t)0x000000FF;
+    u = (((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
+
+    return ((uint32_t)((u << 24) | (t << 16) | (s <<  8) | (r)));
+}
+
+/**
+  \brief   Quad 8-bit unsigned subtraction with halved results.
+  \details This function enables you to perform four unsigned 8-bit integer subtractions, halving the results.
+  \param [in]    x   first four 8-bit summands.
+  \param [in]    y   second four 8-bit summands.
+  \return        the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
+                 the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
+                 the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
+                 the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
+  \remark
+                 res[7:0]   = (val1[7:0]   - val2[7:0]  ) >> 1    \n
+                 res[15:8]  = (val1[15:8]  - val2[15:8] ) >> 1    \n
+                 res[23:16] = (val1[23:16] - val2[23:16]) >> 1    \n
+                 res[31:24] = (val1[31:24] - val2[31:24]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSUB8(uint32_t x, uint32_t y)
+{
+    int32_t r, s, t, u;
+
+    r = ((((x << 24) >> 24) - ((y << 24) >> 24)) >> 1) & 0x000000FF;
+    s = ((((x << 16) >> 24) - ((y << 16) >> 24)) >> 1) & 0x000000FF;
+    t = ((((x <<  8) >> 24) - ((y <<  8) >> 24)) >> 1) & 0x000000FF;
+    u = ((((x) >> 24) - ((y) >> 24)) >> 1) & 0x000000FF;
+
+    return ((u << 24) | (t << 16) | (s <<  8) | (r));
+}
+
+/**
+  \brief   Dual 16-bit add and subtract with exchange.
+  \details This function enables you to exchange the halfwords of the one operand,
+           then add the high halfwords and subtract the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating addition and subtraction with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and
+           perform one unsigned 16-bit integer addition and one unsigned 16-bit subtraction,
+           saturating the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the saturated subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the saturated addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit addition and subtraction with exchange.
+  \details It enables you to exchange the halfwords of the second operand, add the high halfwords
+           and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with exchange.
+  \details This function enables you to exchange the two halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords.
+  \param [in]    x   first operand for the subtraction in the low halfword,
+                     and the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword,
+                     and the second operand for the addition in the low halfword.
+  \return        the subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the addition of the high halfword in the first operand and the
+                 low halfword in the second operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = val1[15:0]  - val2[31:16]        \n
+                 res[31:16] = val1[31:16] + val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) - ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed addition and subtraction with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one
+           signed 16-bit integer addition and one signed 16-bit subtraction, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned addition and subtraction with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           add the high halfwords and subtract the low halfwords, halving the results.
+  \param [in]    x   first operand for the subtraction in the low halfword, and
+                     the first operand for the addition in the high halfword.
+  \param [in]    y   second operand for the subtraction in the high halfword, and
+                     the second operand for the addition in the low halfword.
+  \return        the halved subtraction of the high halfword in the second operand from the
+                 low halfword in the first operand, in the low halfword of the return value.\n
+                 the halved addition of the low halfword in the second operand from the high
+                 halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  - val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] + val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHASX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of one operand,
+           then subtract the high halfwords and add the low halfwords,
+           saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __QSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
+    s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned saturating subtraction and addition with exchange.
+  \details This function enables you to exchange the halfwords of the second operand and perform
+           one unsigned 16-bit integer subtraction and one unsigned 16-bit addition, saturating
+           the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the saturated addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the saturated subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+                 The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UQSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = __IUSAT((((x << 16) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
+    s = __IUSAT((((x) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtract and add with exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords.
+  \param [in]    x   first operand for the addition in the low halfword,
+                     and the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword,
+                     and the second operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __USAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((x << 16) >> 16) + ((y) >> 16)) & 0x0000FFFF;
+    s = (((x) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with exchange.
+  \details This function enables you to exchange the two halfwords of one operand and perform one
+           16-bit integer subtraction and one 16-bit addition.
+  \param [in]    x   first operand for the addition in the low halfword, and the first operand
+                     for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and the second
+                     operand for the subtraction in the low halfword.
+  \return        the addition of the low halfword of the first operand and the high
+                 halfword of the second operand, in the low halfword of the return value.\n
+                 the subtraction of the low halfword of the second operand from the
+                 high halfword of the first operand, in the high halfword of the return value.\n
+  \remark
+                 res[15:0]  = val1[15:0]  + val2[31:16]        \n
+                 res[31:16] = val1[31:16] - val2[15:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
+    s = ((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+
+/**
+  \brief   Dual 16-bit signed subtraction and addition with halved results.
+  \details This function enables you to exchange the two halfwords of one operand, perform one signed
+           16-bit integer subtraction and one signed 16-bit addition, and halve the results.
+  \param [in]    x   first 16-bit operands.
+  \param [in]    y   second 16-bit operands.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = (((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+    s = (((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
+
+    return ((uint32_t)((s << 16) | (r)));
+}
+
+/**
+  \brief   Dual 16-bit unsigned subtraction and addition with halved results and exchange.
+  \details This function enables you to exchange the halfwords of the second operand,
+           subtract the high halfwords and add the low halfwords, halving the results.
+  \param [in]    x   first operand for the addition in the low halfword, and
+                     the first operand for the subtraction in the high halfword.
+  \param [in]    y   second operand for the addition in the high halfword, and
+                     the second operand for the subtraction in the low halfword.
+  \return        the halved addition of the low halfword in the first operand and the
+                 high halfword in the second operand, in the low halfword of the return value.\n
+                 the halved subtraction of the low halfword in the second operand from the
+                 high halfword in the first operand, in the high halfword of the return value.
+  \remark
+                 res[15:0]  = (val1[15:0]  + val2[31:16]) >> 1        \n
+                 res[31:16] = (val1[31:16] - val2[15:0]) >> 1
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UHSAX(uint32_t x, uint32_t y)
+{
+    int32_t r, s;
+
+    r = ((((x << 16) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
+    s = ((((x) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
+
+    return ((s << 16) | (r));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, subtracting
+           one of the products from the other. The halfwords of the second operand are exchanged
+           before performing the arithmetic. This produces top * bottom and bottom * top multiplication.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSDX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply with exchange.
+  \details This function enables you to perform two 16-bit signed multiplications with exchanged
+           halfwords of the second operand, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications with exchanged halfwords of the second operand.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]       \n
+                 p2 = val1[31:16] * val2[15:0]        \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUADX(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
+}
+
+
+/**
+  \brief   Saturating add.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 + SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QADD(int32_t x, int32_t y)
+{
+    int32_t result;
+
+    if (y >= 0) {
+        if ((int32_t)((uint32_t)x + (uint32_t)y) >= x) {
+            result = x + y;
+        } else {
+            result = 0x7FFFFFFF;
+        }
+    } else {
+        if ((int32_t)((uint32_t)x + (uint32_t)y) < x) {
+            result = x + y;
+        } else {
+            result = 0x80000000;
+        }
+    }
+
+    return result;
+}
+
+/**
+  \brief   Saturating subtract.
+  \details This function enables you to obtain the saturating add of two integers.
+  \param [in]    x   first summand of the saturating add operation.
+  \param [in]    y   second summand of the saturating add operation.
+  \return        the saturating addition of val1 and val2.
+  \remark
+                 res[31:0] = SAT(val1 - SAT(val2))
+ */
+__ALWAYS_STATIC_INLINE int32_t __QSUB(int32_t x, int32_t y)
+{
+    int64_t tmp;
+    int32_t result;
+
+    tmp = (int64_t)x - (int64_t)y;
+
+    if (tmp > 0x7fffffff) {
+        tmp = 0x7fffffff;
+    } else if (tmp < (-2147483647 - 1)) {
+        tmp = -2147483647 - 1;
+    }
+
+    result = tmp;
+    return result;
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications,
+           adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLAD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Pre-exchanged dual 16-bit signed multiply with single 32-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications with exchanged
+           halfwords of the second operand, adding both results to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication with exchanged halfwords of the second
+                 operand added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 + p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLADX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to perform two 16-bit signed multiplications, take the
+           difference of the products, subtracting the high halfword product from the low
+           halfword product, and add the difference to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]       \n
+                 p2 = val1[31:16] * val2[31:16]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSD(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
+  \details This function enables you to exchange the halfwords in the second operand, then perform two 16-bit
+           signed multiplications. The difference of the products is added to a 32-bit accumulate operand.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 res[31:0] = p1 - p2 + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMLSDX(uint32_t x, uint32_t y, uint32_t sum)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((int32_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with single 64-bit accumulator.
+  \details This function enables you to perform two signed 16-bit multiplications, adding both results
+           to a 64-bit accumulate operand. Overflow is only possible as a result of the 64-bit addition.
+           This overflow is not detected if it occurs. Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange with single 64-bit accumulator.
+  \details This function enables you to exchange the halfwords of the second operand, and perform two
+           signed 16-bit multiplications, adding both results to a 64-bit accumulate operand. Overflow
+           is only possible as a result of the 64-bit addition. This overflow is not detected if it occurs.
+           Instead, the result wraps around modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of each multiplication added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]     \n
+                 p2 = val1[31:16] * val2[15:0]      \n
+                 sum = p1 + p2 + val3[63:32][31:0]  \n
+                 res[63:32] = sum[63:32]            \n
+                 res[31:0]  = sum[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLALDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   dual 16-bit signed multiply subtract with 64-bit accumulate.
+  \details This function It enables you to perform two 16-bit signed multiplications, take the difference
+           of the products, subtracting the high halfword product from the low halfword product, and add the
+           difference to a 64-bit accumulate operand. Overflow cannot occur during the multiplications or the
+           subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow is not
+           detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLD(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply with exchange subtract with 64-bit accumulate.
+  \details This function enables you to exchange the halfwords of the second operand, perform two 16-bit multiplications,
+           adding the difference of the products to a 64-bit accumulate operand. Overflow cannot occur during the
+           multiplications or the subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow
+           is not detected. Instead, the result wraps round to modulo2^64.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the difference of the product of each multiplication, added to the accumulate value.
+  \remark
+                 p1 = val1[15:0]  * val2[31:16]      \n
+                 p2 = val1[31:16] * val2[15:0]       \n
+                 res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
+ */
+__ALWAYS_STATIC_INLINE uint64_t __SMLSLDX(uint32_t x, uint32_t y, uint64_t sum)
+{
+    return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       (((uint64_t)sum))));
+}
+
+/**
+  \brief   32-bit signed multiply with 32-bit truncated accumulator.
+  \details This function enables you to perform a signed 32-bit multiplications, adding the most
+           significant 32 bits of the 64-bit result to a 32-bit accumulate operand.
+  \param [in]    x   first operand for multiplication.
+  \param [in]    y   second operand for multiplication.
+  \param [in]  sum   accumulate value.
+  \return        the product of multiplication (most significant 32 bits) is added to the accumulate value, as a 32-bit integer.
+  \remark
+                 p = val1 * val2      \n
+                 res[31:0] = p[63:32] + val3[31:0]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMMLA(int32_t x, int32_t y, int32_t sum)
+{
+    return (uint32_t)((int32_t)((int64_t)((int64_t)x * (int64_t)y) >> 32) + sum);
+}
+
+/**
+  \brief   Sum of dual 16-bit signed multiply.
+  \details This function enables you to perform two 16-bit signed multiplications, adding the products together.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the sum of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 + p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUAD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual 16-bit signed multiply returning difference.
+  \details This function enables you to perform two 16-bit signed multiplications, taking the difference
+           of the products by subtracting the high halfword product from the low halfword product.
+  \param [in]    x   first 16-bit operands for each multiplication.
+  \param [in]    y   second 16-bit operands for each multiplication.
+  \return        the difference of the products of the two 16-bit signed multiplications.
+  \remark
+                 p1 = val1[15:0]  * val2[15:0]      \n
+                 p2 = val1[31:16] * val2[31:16]     \n
+                 res[31:0] = p1 - p2
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SMUSD(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
+                       ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
+}
+
+/**
+  \brief   Dual extracted 8-bit to 16-bit signed addition.
+  \details This function enables you to extract two 8-bit values from the second operand (at bit positions
+           [7:0] and [23:16]), sign-extend them to 16-bits each, and add the results to the first operand.
+  \param [in]    x   values added to the sign-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and sign-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and sign-extended prior to the addition.
+  \remark
+                 res[15:0]  = val1[15:0] + SignExtended(val2[7:0])      \n
+                 res[31:16] = val1[31:16] + SignExtended(val2[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)((((((int32_t)y << 24) >> 24) + (((int32_t)x << 16) >> 16)) & (int32_t)0x0000FFFF) |
+                       (((((int32_t)y <<  8) >>  8)  + (((int32_t)x >> 16) << 16)) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Extracted 16-bit to 32-bit unsigned addition.
+  \details This function enables you to extract two 8-bit values from one operand, zero-extend
+           them to 16 bits each, and add the results to two 16-bit values from another operand.
+  \param [in]    x   values added to the zero-extended to 16-bit values.
+  \param [in]    y   two 8-bit values to be extracted and zero-extended.
+  \return        the addition of val1 and val2, where the 8-bit values in val2[7:0] and
+                 val2[23:16] have been extracted and zero-extended prior to the addition.
+  \remark
+                 res[15:0]  = ZeroExt(val2[7:0]   to 16 bits) + val1[15:0]      \n
+                 res[31:16] = ZeroExt(val2[31:16] to 16 bits) + val1[31:16]
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTAB16(uint32_t x, uint32_t y)
+{
+    return ((uint32_t)(((((y << 24) >> 24) + ((x << 16) >> 16)) & 0x0000FFFF) |
+                       ((((y <<  8) >>  8) + ((x >> 16) << 16)) & 0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and sign extend each to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and sign-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be sign-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __SXTB16(uint32_t x)
+{
+    return ((uint32_t)(((((int32_t)x << 24) >> 24) & (int32_t)0x0000FFFF) |
+                       ((((int32_t)x <<  8) >>  8) & (int32_t)0xFFFF0000)));
+}
+
+/**
+  \brief   Dual extract 8-bits and zero-extend to 16-bits.
+  \details This function enables you to extract two 8-bit values from an operand and zero-extend them to 16 bits each.
+  \param [in]    x   two 8-bit values in val[7:0] and val[23:16] to be zero-extended.
+  \return        the 8-bit values sign-extended to 16-bit values.\n
+                 sign-extended value of val[7:0] in the low halfword of the return value.\n
+                 sign-extended value of val[23:16] in the high halfword of the return value.
+  \remark
+                 res[15:0]  = SignExtended(val[7:0])       \n
+                 res[31:16] = SignExtended(val[23:16])
+ */
+__ALWAYS_STATIC_INLINE uint32_t __UXTB16(uint32_t x)
+{
+    return ((uint32_t)((((x << 24) >> 24) & 0x0000FFFF) |
+                       (((x <<  8) >>  8) & 0xFFFF0000)));
+}
+
+#endif /* _CSI_RV32_GCC_H_ */

+ 62 - 0
lib/sec_library/include/csi_core.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017-2019 Alibaba Group Holding Limited
+ */
+
+
+/******************************************************************************
+ * @file     csi_core.h
+ * @brief    CSI Core Layer Header File
+ * @version  V1.0
+ * @date     02. June 2017
+ ******************************************************************************/
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <stdint.h>
+
+#if defined(__CK801__) || defined(__E801__)
+#include "core/core_801.h"
+#elif defined(__CK802__) || defined(__E802__) || defined(__E802T__) || defined(__S802__) || defined(__S802T__)
+#include "core/core_802.h"
+#elif defined(__CK804__) || defined(__E804D__) || defined(__E804DT__) || defined(__E804F__)  || defined(__E804FT__) || defined (__E804DF__) || defined(__E804DFT__)
+#include "core/core_804.h"
+#elif defined(__CK803__) || defined(__E803__) || defined(__E803T__) || defined(__S803__) || defined(__S803T__)
+#include "core/core_803.h"
+#elif defined(__CK805__) || defined(__I805__) || defined(__I805F__)
+#include "core/core_805.h"
+#elif defined(__CK610__)
+#include "core/core_ck610.h"
+#elif defined(__CK810__) || defined(__C810__) || defined(__C810T__) || defined(__C810V__) || defined(__C810VT__)
+#include "core/core_810.h"
+#elif defined(__CK807__) || defined(__C807__) || defined(__C807F__) || defined(__C807FV__) || defined(__R807__)
+#include "core/core_807.h"
+#elif defined(__riscv) && defined(CONFIG_CSKY_CORETIM)
+#include "core/core_rv32_old.h"
+#elif defined(__riscv) && (__riscv_xlen == 32)
+#include "core/core_rv32.h"
+#elif defined(__riscv) && (__riscv_xlen == 64)
+#include "core/core_rv64.h"
+#endif
+
+#if defined(__riscv) && (__riscv_xlen == 32)
+#include "core/csi_rv32_gcc.h"
+#elif defined(__riscv) && (__riscv_xlen == 64)
+#include "core/csi_rv64_gcc.h"
+#elif defined(__csky__)
+#include "core/csi_gcc.h"
+#endif
+
+#ifdef __arm__
+#include "csi_core_cmsis.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CORE_H_ */

+ 3 - 4
lib/sec_library/include/csi_efuse_api.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2019-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
  */
 #ifndef __CSI_EFUSE_API_H__
 #define __CSI_EFUSE_API_H__
@@ -22,8 +22,7 @@ typedef enum {
 } img_encrypt_st_t;
 
 int csi_efuse_api_int(void);
-
-int csi_efuse_api_uninit(void);
+void csi_efuse_api_uninit(void);
 
 int csi_efuse_get_secure_boot_st(sboot_st_t *sboot_st);
 
@@ -53,4 +52,4 @@ int csi_efuse_write_raw(uint32_t addr, const void *data, uint32_t cnt);
 }
 #endif
 
-#endif	/* __CSI_EFUSE_API_H__ */
+#endif	/* __CSI_EFUSE_API_H__ */

+ 2 - 2
lib/sec_library/include/csi_sec_img_verify.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2019-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
  */
 #ifndef __CSI_SEC_IMG_VERIFY_H__
 #define __CSI_SEC_IMG_VERIFY_H__
@@ -28,4 +28,4 @@ int csi_sec_get_lib_version(char ** p_version);
 }
 #endif
 
-#endif	/* __CSI_SEC_IMG_VERIFY_H__ */
+#endif	/* __CSI_SEC_IMG_VERIFY_H__ */

+ 87 - 0
lib/sec_library/include/dev_tag.h

@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     drv/dev_tag.h
+ * @brief    Header File for DEV TAG Driver
+ * @version  V1.0
+ * @date     31. March 2020
+ * @model    common
+ ******************************************************************************/
+
+#ifndef _DRV_DEV_TAG_H_
+#define _DRV_DEV_TAG_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+    DEV_BLANK_TAG = 0U,
+    DEV_DW_UART_TAG,
+    DEV_DW_DMA_TAG,
+    DEV_DW_GPIO_TAG,
+    DEV_DW_IIC_TAG,
+    DEV_DW_QSPI_TAG,
+    DEV_DW_SDMMC_TAG,
+    DEV_DW_SDHCI_TAG,
+    DEV_DW_SPI_TAG,
+    DEV_DW_TIMER_TAG,
+    DEV_DW_WDT_TAG,
+    DEV_WJ_ADC_TAG,
+    DEV_WJ_AES_TAG,
+    DEV_WJ_CODEC_TAG,
+    DEV_WJ_CRC_TAG,
+    DEV_WJ_DMA_TAG,
+    DEV_WJ_EFLASH_TAG,
+    DEV_WJ_EFUSE_TAG,
+    DEV_WJ_ETB_TAG,
+    DEV_WJ_FFT_TAG,
+    DEV_WJ_I2S_TAG,
+    DEV_WJ_MBOX_TAG,
+    DEV_WJ_PADREG_TAG,
+    DEV_WJ_PDM_TAG,
+    DEV_WJ_PINMUX_TAG,
+    DEV_WJ_PMU_TAG,
+    DEV_WJ_PWM_TAG,
+    DEV_WJ_PWMR_TAG,
+    DEV_WJ_RNG_TAG,
+    DEV_WJ_ROM_TAG,
+    DEV_WJ_RSA_TAG,
+    DEV_WJ_RTC_TAG,
+    DEV_WJ_SASC_TAG,
+    DEV_WJ_SHA_TAG,
+    DEV_WJ_SPDIF_TAG,
+    DEV_WJ_SPIDF_TAG,
+    DEV_WJ_TDM_TAG,
+    DEV_WJ_TIPC_TAG,
+    DEV_WJ_USB_TAG,
+    DEV_WJ_USI_TAG,
+    DEV_WJ_VAD_TAG,
+    DEV_CD_QSPI_TAG,
+    DEV_DCD_ISO7816_TAG,
+    DEV_OSR_RNG_TAG,
+    DEV_QX_RTC_TAG,
+    DEV_RCHBAND_CODEC_TAG,
+    DEV_CMSDK_UART_TAG,
+    DEV_RAMBUS_150B_PKA_TAG,
+    DEV_RAMBUS_150B_TRNG_TAG,
+    DEV_RAMBUS_120SI_TAG,
+    DEV_RAMBUS_120SII_TAG,
+    DEV_RAMBUS_120SIII_TAG,
+	DEV_WJ_AVFS_TAG,
+    DEV_WJ_BMU_TAG,
+} csi_dev_tag_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DRV_TAG_H_ */
+

+ 73 - 0
lib/sec_library/include/device_types.h

@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+/* device_types.h
+ *
+ * Driver Framework, Device API, Type Definitions
+ *
+ * The document "Driver Framework Porting Guide" contains the detailed
+ * specification of this API. The information contained in this header file
+ * is for reference only.
+ */
+
+
+
+#ifndef INCLUDE_GUARD_DEVICE_TYPES_H
+#define INCLUDE_GUARD_DEVICE_TYPES_H
+
+/*----------------------------------------------------------------------------
+ * Device_Handle_t
+ *
+ * This handle represents a device, typically one hardware block instance.
+ *
+ * The Device API can access the static device resources (registers and RAM
+ * inside the device) using offsets inside the device. This abstracts memory
+ * map knowledge and simplifies device instantiation.
+ *
+ * Each device has its own configuration, including the endianness swapping
+ * need for the words transferred. Endianness swapping can thus be performed
+ * on the fly and transparent to the caller.
+ *
+ * The details of the handle are implementation specific and must not be
+ * relied on, with one exception: NULL is guaranteed to be a non-existing
+ * handle.
+ */
+
+
+
+typedef void * Device_Handle_t;
+
+
+/*----------------------------------------------------------------------------
+ * Device_Reference_t
+ *
+ * This is an implementation-specific reference for the device. It can
+ * be passed from the implementation of the Device API to other modules
+ * for use, for example, with OS services that require such a reference.
+ *
+ * The details of the handle are implementation specific and must not be
+ * relied on, with one exception: NULL is guaranteed to be a non-existing
+ * handle.
+ */
+typedef void * Device_Reference_t;
+
+
+/*----------------------------------------------------------------------------
+ * Device_Data_t
+ *
+ * This is an implementation-specific reference for the device. It can
+ * be passed from the implementation of the Device API to other modules
+ * for use, for example, with OS services that require such a reference.
+ */
+typedef struct
+{
+    // Physical address of the device mapped in memory
+    void * PhysAddr;
+
+} Device_Data_t;
+
+
+#endif /* Include Guard */
+
+
+/* end of file device_types.h */

+ 285 - 0
lib/sec_library/include/ecc.h

@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2017-2022 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file       drv/ecc.h
+ * @brief      Header File for ECC Driver
+ * @version    V3.3
+ * @date       30. May 2022
+ * @model      ECC
+ ******************************************************************************/
+
+#ifndef _DRV_ECC_H_
+#define _DRV_ECC_H_
+
+#include <stdint.h>
+#include "common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ECC_PRIME_CURVE_G_BYTES 64
+#define ECC_PRIME_CURVE_P_BYTES 70
+
+typedef struct {
+    uint32_t ecc_curve : 1; ///< supports 256bits curve
+} ecc_capabilities_t;
+
+/**
+\brief ECC ciphertext order
+*/
+typedef enum {
+    ECC_C1C3C2 = 0,
+    ECC_C1C2C3,
+} ecc_cipher_order_e;
+
+typedef enum {
+    ECC_ENDIAN_LITTLE = 0, ///< Little Endian
+    ECC_ENDIAN_BIG         ///< Big Endian
+} ecc_endian_mode_e;
+
+typedef enum {
+    ECC_PRIME256V1 = 0,
+} ecc_prime_curve_type;
+
+/**
+\brief ECC key exchange role
+*/
+typedef enum { ECC_Role_Sponsor = 0, ECC_Role_Responsor } ecc_exchange_role_e;
+
+/****** ECC Event *****/
+typedef enum {
+    ECC_EVENT_MAKE_KEY_COMPLETE = 0, ///< Make key completed
+    ECC_EVENT_ENCRYPT_COMPLETE,      ///< Encrypt completed
+    ECC_EVENT_DECRYPT_COMPLETE,      ///< Decrypt completed
+    ECC_EVENT_SIGN_COMPLETE,         ///< Sign completed
+    ECC_EVENT_VERIFY_COMPLETE,       ///< Verify completed
+    ECC_EVENT_EXCHANGE_KEY_COMPLETE, ///< Exchange key completed
+} ecc_event_e;
+
+typedef struct {
+    ecc_prime_curve_type type;
+    uint32_t *p;
+} csi_ecc_prime_curve_t;
+
+typedef struct {
+    ecc_prime_curve_type type;
+    uint8_t *G;
+    uint8_t *n;
+} csi_ecc_curve_g_t;
+
+/**
+\brief ECC status
+*/
+typedef struct {
+    uint32_t busy : 1; ///< Calculate busy flag
+} csi_ecc_state_t;
+
+typedef struct {
+    csi_dev_t       dev;
+    void *          cb;
+    void *          arg;
+    csi_ecc_state_t state;
+    ecc_prime_curve_type  type;
+} csi_ecc_t;
+
+///< Pointer to \ref csi_ecc_callback_t : ECC Event call back.
+typedef void (*csi_ecc_callback_t)(ecc_event_e event);
+
+/**
+  \brief       Initialize ECC.
+  \param[in]   ecc  ecc handle to operate.
+  \param[in]   idx  device id
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_init(csi_ecc_t *ecc, uint32_t idx);
+
+/**
+  \brief       De-initialize ECC Interface. stops operation and releases the software resources used by the interface
+  \param[in]   ecc  ecc handle to operate.
+  \return      none
+*/
+void csi_ecc_uninit(csi_ecc_t *ecc);
+
+/**
+  \brief       ecc get capability.
+  \param[in]   ecc  Operate handle.
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_config(csi_ecc_t *ecc, ecc_cipher_order_e co,
+                           ecc_endian_mode_e endian);
+
+/**
+  \brief       Attach the callback handler to ECC
+  \param[in]   ecc  Operate handle.
+  \param[in]   cb    Callback function
+  \param[in]   arg   User can define it by himself as callback's param
+  \return      Error code \ref csi_error_t
+*/
+csi_error_t csi_ecc_attach_callback(csi_ecc_t *ecc, csi_ecc_callback_t cb,
+                                    void *arg);
+
+/**
+  \brief       Detach the callback handler
+  \param[in]   ecc  Operate handle.
+*/
+csi_error_t csi_ecc_detach_callback(csi_ecc_t *ecc);
+
+/**
+  \brief       ecc get capability.
+  \param[in]   ecc  Operate handle.
+  \param[out]   cap  Pointer of ecc_capabilities_t.
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_get_capabilities(csi_ecc_t *ecc, ecc_capabilities_t *cap);
+
+csi_error_t csi_ecc_check_keypair(csi_ecc_t *ecc, uint8_t pubkey[65], uint8_t prikey[32]);
+
+/**
+  \brief       generate ecc key.
+  \param[in]   ecc       ecc handle to operate.
+  \param[out]  private   Pointer to the ecc private key, alloc by caller.
+  \param[out]  public   Pointer to the ecc public key, alloc by caller.
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_gen_key(csi_ecc_t *ecc, uint8_t pubkey[65],
+                            uint8_t prikey[32]);
+
+/**
+  \brief       generate ecc pubkey by privkey.
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   private   Pointer to the ecc private key, alloc by caller.
+  \param[out]  public   Pointer to the ecc public key, alloc by caller.
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_gen_pubkey(csi_ecc_t *ecc, uint8_t pubkey[65],
+                            uint8_t prikey[32]);
+
+/**
+  \brief       ecc sign
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_sign(csi_ecc_t *ecc, uint8_t d[32], uint8_t prikey[32],
+                         uint8_t s[64]);
+
+/**
+  \brief       ecc sign
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      \ref uint32_t
+*/
+csi_error_t csi_ecc_sign_async(csi_ecc_t *ecc, uint8_t d[32],
+                               uint8_t prikey[32], uint8_t s[64]);
+
+/* TODO */
+/**
+  \brief       ecc verify
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      verify result
+*/
+bool csi_ecc_verify(csi_ecc_t *ecc, uint8_t d[32], uint8_t pubkey[65],
+                    uint8_t s[64]);
+
+/**
+  \brief       ecc verify
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      verify result
+*/
+bool csi_ecc_verify_async(csi_ecc_t *ecc, uint8_t d[32], uint8_t pubkey[65],
+                          uint8_t s[64]);
+
+/**
+  \brief       ecc encrypto
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   Plain       Pointer to the plaintext.
+  \param[in]  PlainByteLen plaintext len
+  \param[in]  pubKey public key.
+  \param[out]  Cipher Pointer to the chipher
+  \param[out]  CipherByteLen Pointer to the chipher len.
+  \return      uint32_t
+*/
+csi_error_t csi_ecc_encrypt(csi_ecc_t *ecc, uint8_t *Plain,
+                            uint32_t PlainByteLen, uint8_t pubKey[65],
+                            uint8_t *Cipher, uint32_t *CipherByteLen);
+
+/**
+  \brief       ecc encrypto
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]  Cipher Pointer to the chipher
+  \param[in]  CipherByteLen chipher len.
+  \param[in]  prikey private key.
+  \param[out]   Plain       Pointer to the plaintext.
+  \param[out]  PlainByteLen plaintext len
+  \return      uint32_t
+*/
+csi_error_t csi_ecc_decrypt(csi_ecc_t *ecc, uint8_t *Cipher,
+                            uint32_t CipherByteLen, uint8_t prikey[32],
+                            uint8_t *Plain, uint32_t *PlainByteLen);
+
+/**
+  \brief       ecc key exchange
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+csi_error_t csi_ecc_exchangekey(csi_ecc_t *ecc, ecc_exchange_role_e role,
+                                uint8_t *dA, uint8_t *PB, uint8_t *rA,
+                                uint8_t *RA, uint8_t *RB, uint8_t *ZA,
+                                uint8_t *ZB, uint32_t kByteLen, uint8_t *KA,
+                                uint8_t *S1, uint8_t *SA);
+
+/**
+  \brief       ecc key exchange get Z.
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+csi_error_t csi_ecc_getZ(csi_ecc_t *ecc, uint8_t *ID, uint32_t byteLenofID,
+                         uint8_t pubKey[65], uint8_t Z[32]);
+
+/**
+  \brief       ecc key exchange get E
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+csi_error_t csi_ecc_getE(csi_ecc_t *ecc, uint8_t *M, uint32_t byteLen,
+                         uint8_t Z[32], uint8_t E[32]);
+
+/**
+  \brief       Get ECC state.
+  \param[in]   ecc      ECC handle to operate.
+  \param[out]  state    ECC state \ref csi_ecc_state_t.
+  \return      Error code \ref csi_error_t
+*/
+csi_error_t csi_ecc_get_state(csi_ecc_t *ecc, csi_ecc_state_t *state);
+
+/**
+  \brief       Enable ecc power manage
+  \param[in]   ecc  ECC handle to operate.
+  \return      Error code \ref csi_error_t
+*/
+csi_error_t csi_ecc_enable_pm(csi_ecc_t *ecc);
+
+/**
+  \brief       Disable ecc power manage
+  \param[in]   ecc  ECC handle to operate.
+*/
+void csi_ecc_disable_pm(csi_ecc_t *ecc);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#endif

+ 41 - 0
lib/sec_library/include/ecdh.h

@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017-2022 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file       drv/ecdh.h
+ * @brief      Header File for ECDH Driver
+ * @version    V3.3
+ * @date       10.June 2022
+ * @model      ECC
+ ******************************************************************************/
+
+#ifndef _DRV_ECDH_H_
+#define _DRV_ECDH_H_
+
+#include <stdint.h>
+#include "common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+  \brief       ecdh cacl share secret
+  \param[in]  ecc ecc handle to operate.
+  \param[in]  pubkey Pointer to the A public key.
+  \param[in]  privkey Pointer to the B private key.
+  \param[out] shareKey Pointer to the share secret.
+  \param[out] len length of the share secret.
+  \return     \ref uint32_t
+*/
+csi_error_t csi_ecdh_calc_secret(csi_ecc_t *ecc, uint8_t privkey[32],
+                            uint8_t pubkey[65], uint8_t shareKey[32], 
+                            uint32_t *len);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#endif

+ 20 - 3
lib/sec_library/include/kdf.h

@@ -1,12 +1,19 @@
 /*
- * Copyright (C) 2019-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
  */
 
 #ifndef __KDF_H__
 #define __KDF_H__
+#ifdef SEC_LIB_VERSION
 #include "drv/aes.h"
 #include "drv/sm4.h"
 #include "drv/common.h"
+#else
+#include "aes.h"
+#include "sm4.h"
+#include "common.h"
+#endif
+
 #include <stdint.h>
 
 typedef enum {
@@ -50,6 +57,9 @@ typedef enum {
         KDF_KEY_TYPE_TDES_192,
         KDF_KEY_TYPE_TDES_128,
         KDF_KEY_TYPE_DES,
+	/* for rpmb, str */
+/* 	KDF_KEY_TYPE_HMAC_SHA256,
+ */
         KDF_KEY_TYPE_MAX,
 } csi_kdf_key_type_t;
 
@@ -113,12 +123,12 @@ csi_error_t csi_kdf_destory_key(csi_kdf_t *kdf, csi_kdf_derived_key_t dkey);
 
 /**
   \brief       Set key to algorithim engine.
-  \param[in]   handle    Handle to cipher.
   \param[in]   kdf    Handle to operate.
+  \param[in]   handle    Handle to cipher.
   \param[in]   dkey derived key type.
   \return      error code
 */
-csi_error_t csi_kdf_set_key(csi_kdf_key_handle_t *handle, csi_kdf_t *kdf,
+csi_error_t csi_kdf_set_key(csi_kdf_t *kdf, csi_kdf_key_handle_t *handle,
                             csi_kdf_derived_key_t dkey);
 
 /**
@@ -139,4 +149,11 @@ csi_error_t csi_kdf_clear_key(csi_kdf_t *kdf, csi_kdf_derived_key_t dkey);
 csi_error_t csi_kdf_get_key_attr(csi_kdf_t *kdf, csi_kdf_derived_key_t dkey,
                                  csi_kdf_key_attr_t *attr);
 
+
+/**
+  \brief       kdf generate hmac key.
+  \param[in]   kdf    Handle to operate
+*/
+csi_error_t csi_kdf_gen_hmac_key(uint8_t* key,uint32_t * length);
+
 #endif

+ 5 - 5
lib/sec_library/include/keyram.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -9,7 +9,7 @@
  * @date     12. MAR 2021
  ******************************************************************************/
 
-#include "drv/kdf.h"
+#include "kdf.h"
 #include <stdio.h>
 #include <string.h>
 
@@ -45,7 +45,7 @@
  *
  * @return uint32_t
  */
-uint32_t keyram_init();
+uint32_t keyram_init(void);
 
 /**
  * @brief keyram set key.
@@ -70,9 +70,9 @@ uint32_t keyram_get_key_addr(csi_kdf_derived_key_t key, uint64_t *addr);
  *
  * @return uint32_t
  */
-uint32_t keyram_clear();
+uint32_t keyram_clear(void);
 
 /**
  * @brief Uninit. This function will lock KDF.
  */
-void keyram_uninit();
+void keyram_uninit(void);

+ 367 - 0
lib/sec_library/include/list.h

@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2015-2020 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file       drv/list.h
+ * @brief      Header File for LIST Driver
+ * @version    V1.0
+ * @date       10. Oct 2020
+ * @model      list
+ ******************************************************************************/
+
+#ifndef AOS_LIST_H
+#define AOS_LIST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ \brief       Get offset of a member variable
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the variable within the struct
+ \return      None
+*/
+#define aos_offsetof(type, member)   ((size_t)&(((type *)0)->member))
+
+/**
+ \brief       Get the struct for this entry
+ \param[in]   ptr       The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the variable within the struct
+ \return      None
+*/
+#define aos_container_of(ptr, type, member) \
+    ((type *) ((char *) (ptr) - aos_offsetof(type, member)))
+
+/* For double link list */
+typedef struct dlist_s {
+    struct dlist_s *prev;
+    struct dlist_s *next;
+} dlist_t;
+
+static inline void __dlist_add(dlist_t *node, dlist_t *prev, dlist_t *next)
+{
+    node->next = next;
+    node->prev = prev;
+
+    prev->next = node;
+    next->prev = node;
+}
+
+/**
+ \brief       Get the struct for this entry
+ \param[in]   addr      The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the dlist_t within the struct
+ \return      None
+*/
+#define dlist_entry(addr, type, member) \
+    ((type *)((long)addr - aos_offsetof(type, member)))
+
+
+static inline void dlist_add(dlist_t *node, dlist_t *queue)
+{
+    __dlist_add(node, queue, queue->next);
+}
+
+static inline void dlist_add_tail(dlist_t *node, dlist_t *queue)
+{
+    __dlist_add(node, queue->prev, queue);
+}
+
+static inline void dlist_del(dlist_t *node)
+{
+    dlist_t *prev = node->prev;
+    dlist_t *next = node->next;
+
+    prev->next = next;
+    next->prev = prev;
+}
+
+static inline void dlist_init(dlist_t *node)
+{
+    node->next = (node->prev = node);
+}
+
+static inline void INIT_AOS_DLIST_HEAD(dlist_t *list)
+{
+    list->next = list;
+    list->prev = list;
+}
+
+static inline int dlist_empty(const dlist_t *head)
+{
+    return head->next == head;
+}
+
+/**
+ \brief       Initialise the list
+ \param[in]   list    The list to be inited
+ \return      None
+*/
+#define AOS_DLIST_INIT(list)  {&(list), &(list)}
+
+/**
+ \brief       Get the first element from a list
+ \param[in]   ptr       The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the dlist_t within the struct
+ \return      None
+*/
+#define dlist_first_entry(ptr, type, member) \
+    dlist_entry((ptr)->next, type, member)
+
+/**
+ \brief       Iterate over a list
+ \param[in]   pos     The &struct dlist_t to use as a loop cursor
+ \param[in]   head    The head for your list
+ \return      None
+*/
+#define dlist_for_each(pos, head) \
+    for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ \brief       Iterate over a list safe against removal of list entry
+ \param[in]   pos     The &struct dlist_t to use as a loop cursor
+ \param[in]   n       Another &struct dlist_t to use as temporary storage
+ \param[in]   head    The head for your list
+ \return      None
+*/
+#define dlist_for_each_safe(pos, n, head) \
+    for (pos = (head)->next, n = pos->next; pos != (head); \
+         pos = n, n = pos->next)
+
+/**
+ \brief       Iterate over list of given type
+ \param[in]   queue     The head for your list
+ \param[in]   node      The &struct dlist_t to use as a loop cursor
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the dlist_t within the struct
+ \return      None
+*/
+#define dlist_for_each_entry(queue, node, type, member) \
+    for (node = aos_container_of((queue)->next, type, member); \
+         &node->member != (queue); \
+         node = aos_container_of(node->member.next, type, member))
+
+/**
+ \brief       Iterate over list of given type safe against removal of list entry
+ \param[in]   queue     The head for your list
+ \param[in]   n         The type * to use as a temp
+ \param[in]   node      The type * to use as a loop cursor
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the dlist_t within the struct
+ \return      None
+*/
+#define dlist_for_each_entry_safe(queue, n, node, type, member) \
+    for (node = aos_container_of((queue)->next, type, member),  \
+         n = (queue)->next ? (queue)->next->next : NULL;        \
+         &node->member != (queue);                              \
+         node = aos_container_of(n, type, member), n = n ? n->next : NULL)
+
+/**
+ \brief       Get the struct for this entry
+ \param[in]   ptr       The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the variable within the struct
+ \return      None
+ */
+#define list_entry(ptr, type, member) \
+    aos_container_of(ptr, type, member)
+
+
+/**
+ \brief       Iterate backwards over list of given type
+ \param[in]   pos       The type * to use as a loop cursor
+ \param[in]   head      The head for your list
+ \param[in]   member    The name of the dlist_t within the struct
+ \param[in]   type      The type of the struct this is embedded in
+ \return      None
+*/
+#define dlist_for_each_entry_reverse(pos, head, member, type) \
+    for (pos = list_entry((head)->prev, type, member);        \
+         &pos->member != (head);                              \
+         pos = list_entry(pos->member.prev, type, member))
+
+/**
+ \brief       Get the list length
+ \param[in]   queue     The head for your list
+ \return      None
+*/
+int dlist_entry_number(dlist_t *queue);
+
+/**
+ \brief       Initialise the list
+ \param[in]   name    The list to be initialized
+ \return      None
+*/
+#define AOS_DLIST_HEAD_INIT(name) { &(name), &(name) }
+
+/**
+ \brief       Initialise the list
+ \param[in]   name    The list to be initialized
+ \return      None
+*/
+#define AOS_DLIST_HEAD(name) \
+    dlist_t name = AOS_DLIST_HEAD_INIT(name)
+
+/* For single link list */
+typedef struct slist_s {
+    struct slist_s *next;
+} slist_t;
+
+static inline void slist_add(slist_t *node, slist_t *head)
+{
+    node->next = head->next;
+    head->next = node;
+}
+
+void slist_add_tail(slist_t *node, slist_t *head);
+
+static inline void slist_del(slist_t *node, slist_t *head)
+{
+    while (head->next) {
+        if (head->next == node) {
+            head->next = node->next;
+            break;
+        }
+
+        head = head->next;
+    }
+}
+
+static inline int slist_empty(const slist_t *head)
+{
+    return !head->next;
+}
+
+static inline void slist_init(slist_t *head)
+{
+    head->next = 0;
+}
+
+static inline slist_t *slist_remove(slist_t *l, slist_t *n)
+{
+    /* Remove slist head */
+    struct slist_s *node = l;
+
+    while (node->next && (node->next != n)) {
+        node = node->next;
+    }
+
+    /* Remove node */
+    if (node->next != (slist_t *)0) {
+        node->next = node->next->next;
+    }
+
+    return l;
+}
+
+static inline slist_t *slist_first(slist_t *l)
+{
+    return l->next;
+}
+
+static inline slist_t *slist_tail(slist_t *l)
+{
+    while (l->next) {
+        l = l->next;
+    }
+
+    return l;
+}
+
+static inline slist_t *slist_next(slist_t *n)
+{
+    return n->next;
+}
+
+/**
+  \brief       Iterate over list of given type
+  \param[in]   node      The type * to use as a loop cursor
+  \param[in]   type      The type of the struct this is embedded in
+  \param[in]   member    The name of the slist_t within the struct
+  \param[in]   queue     The head for your list
+  \return      None
+*/
+#define slist_for_each_entry(queue, node, type, member)        \
+    for (node = aos_container_of((queue)->next, type, member); \
+         &node->member;                                        \
+         node = aos_container_of(node->member.next, type, member))
+
+/**
+ \brief       Iterate over list of given type safe against removal of list entry
+ \param[in]   queue     The head for your list
+ \param[in]   tmp       The type * to use as a temp
+ \param[in]   node      The type * to use as a loop cursor
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the slist_t within the struct
+ \return      None
+*/
+#define slist_for_each_entry_safe(queue, tmp, node, type, member) \
+    for (node = aos_container_of((queue)->next, type, member),    \
+         tmp = (queue)->next ? (queue)->next->next : NULL;        \
+         &node->member;                                           \
+         node = aos_container_of(tmp, type, member), tmp = tmp ? tmp->next : tmp)
+
+/**
+ \brief       Initialise the list
+ \param[in]   name    The list to be initialized
+ \return      None
+*/
+#define AOS_SLIST_HEAD_INIT(name) {0}
+
+/**
+ \brief       Initialise the list
+ \param[in]   name    The list to be initialized
+ \return      None
+*/
+#define AOS_SLIST_HEAD(name) \
+    slist_t name = AOS_SLIST_HEAD_INIT(name)
+
+/**
+ \brief       Get the struct for this entry
+ \param[in]   addr      The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the slist_t within the struct
+ \return      None
+*/
+#define slist_entry(addr, type, member) (                                   \
+        addr ? (type *)((long)addr - aos_offsetof(type, member)) : (type *)addr \
+                                        )
+
+/**
+ \brief       Get the first element from a list
+ \param[in]   ptr       The list head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the slist_t within the struct
+ \return      None
+*/
+#define slist_first_entry(ptr, type, member) \
+    slist_entry((ptr)->next, type, member)
+
+/**
+ \brief       Slist_tail_entry - get the tail element from a slist
+ \param[in]   ptr       The slist head to take the element from
+ \param[in]   type      The type of the struct this is embedded in
+ \param[in]   member    The name of the slist_struct within the struct
+ \return      None
+ \note        That slist is expected to be not empty
+*/
+#define slist_tail_entry(ptr, type, member) \
+    slist_entry(slist_tail(ptr), type, member)
+
+/**
+ \brief       Get the list length
+ \param[in]   queue    The head for your list
+ \return      None
+*/
+int slist_entry_number(slist_t *queue);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AOS_LIST_H */

+ 149 - 0
lib/sec_library/include/rambus.h

@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+
+#ifndef INC_RAMBUS_H
+#define INC_RAMBUS_H
+
+#include "device_types.h"
+#ifdef SEC_LIB_VERSION
+#include "drv/common.h"
+#include "device_rw.h"
+#include "rambus_log.h"
+#include "rambus_errcode.h"
+#else
+#include "common.h"
+#endif
+
+extern uint64_t g_freq_timer;
+extern uint64_t g_freq_ip;
+extern uint64_t g_start_ctr;
+extern uint64_t g_end_ctr;
+extern uint64_t g_data_len_in_bits;
+extern uint32_t g_type;
+
+enum rambus_cipher_padding_mode {
+    PADDING_ZERO,
+    PADDING_FF,
+    PADDING_RANDOM,
+};
+
+uint32_t rb_get_random_byte(uint8_t *buf, uint32_t count);
+
+uint32_t rb_get_random_byte_nozero(uint8_t *buf, uint32_t count);
+
+uint32_t kdf_get_mask(uint8_t *mask, uint32_t len);
+
+/* 1 bpc, 2 tps, 3 bps */
+void rb_perf_init(uint32_t data_len_in_bits, uint32_t type);
+void rb_perf_start(void);
+void rb_perf_end(void);
+void rb_perf_get(char *ncase);
+
+#define DEFAULT_TIMEOUT 1000U
+
+#ifdef CONFIG_ALG_PERF_TEST
+#define RB_PERF_INIT(bits, type)                                               \
+        do {                                                                   \
+                if (data_len_in_bits != 0) {                                   \
+                        g_data_len_in_bits = data_len_in_bits;                 \
+                }                                                              \
+                if (type != 0) {                                               \
+                        g_type = type;                                         \
+                }                                                              \
+        } while (0)
+
+#define RB_PERF_START_POINT()                                                  \
+        do {                                                                   \
+                g_start_ctr = ((((uint64_t)csi_coret_get_valueh() << 32U) |    \
+                                csi_coret_get_value()));                       \
+        } while (0)
+
+#define RB_PERF_END_POINT()                                                    \
+        do {                                                                   \
+                g_end_ctr = ((((uint64_t)csi_coret_get_valueh() << 32U) |      \
+                              csi_coret_get_value()));                         \
+        } while (0)
+
+#else
+#define RB_PERF_INIT(...)
+#define RB_PERF_START_POINT(...)
+#define RB_PERF_END_POINT(...)
+#endif
+
+static inline void rb_xor(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t len) {
+        for (int i = 0; i < (int)len; i++) {
+                c[i] = a[i] ^ b[i];
+        }
+}
+
+/**
+ * @brief Get the aes sca enable config
+ *
+ * @param is_en is enable
+ * @return uint32_t
+ */
+uint32_t rb_get_aes_sca(uint32_t *is_en);
+
+/**
+ * @brief Get the sm4 sca enable config
+ *
+ * @param is_en is enable
+ * @return uint32_t
+ */
+uint32_t rb_get_sm4_sca(uint32_t *is_en);
+
+/**
+ * @brief Get the pka sca enable config
+ *
+ * @param is_en is enable
+ * @return uint32_t
+ */
+uint32_t rb_get_pka_sca(uint32_t *is_en);
+
+/**
+ * @brief rb_cache_en
+ * @return uint32_t enable: 1
+ *
+ */
+uint32_t rb_cache_en(void);
+
+/**
+ * @brief trng init
+ *
+ * @return csi_error_t
+ */
+csi_error_t trng_init(void);
+
+/**
+ * @brief rb wait status
+ *
+ * @param dev
+ * @param offset
+ * @param mask
+ * @param status
+ * @return uint32_t
+ */
+csi_error_t rb_wait_status(Device_Handle_t *dev, const uint32_t offset, uint32_t mask,
+                 uint32_t status);
+
+/**
+ * \brief          rambus crypto init.
+ * \return         0 if successful, or error code
+ */
+uint32_t rambus_crypto_init(void);
+
+/**
+ * @brief rambus crypto uninit.
+ *
+ */
+void rambus_crypto_uninit(void);
+
+/**
+ * \brief              rambus set cipher padding type.
+ * @param padding_mode cipher padding mode
+ * \return             0 if successful, or error code
+ */
+uint32_t rambus_enable_cipher_padding_type(enum rambus_cipher_padding_mode padding_mode);
+
+#endif

+ 2 - 2
lib/sec_library/include/rng.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -12,7 +12,7 @@
 #ifndef _DRV_TNG_H_
 #define _DRV_TNG_H_
 
-#include "drv/common.h"
+#include "common.h"
 #include <stdint.h>
 
 #ifdef __cplusplus

+ 55 - 3
lib/sec_library/include/rsa.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     drv/rsa.h
@@ -16,7 +16,29 @@ extern "C" {
 #endif
 
 #include <stdint.h>
-#include <drv/common.h>
+#include "common.h"
+
+#define RSA_PRIME_256_BIT_LEN        128
+#define RSA_PRIME_512_BIT_LEN        256
+#define RSA_PRIME_1024_BIT_LEN       512
+#define RSA_PRIME_2048_BIT_LEN       1024
+#define RSA_PRIME_4096_BIT_LEN       2048
+
+#define RSA_256_BYTE_LEN        32
+#define RSA_512_BYTE_LEN        64
+#define RSA_1024_BYTE_LEN       128
+#define RSA_2048_BYTE_LEN       256
+#define RSA_4096_BYTE_LEN       512
+#define RSA_EM_BYTE_LEN         RSA_4096_BYTE_LEN
+
+#define SHA256_DIGEST_BYTE_LEN  32
+#define RSA_PKCS1_PADDING_SIZE  11
+#define RSA_MD5_OID_LEN         (6 + 8 + 4)
+#define RSA_SHA1_OID_LEN        (6 + 5 + 4)
+#define RSA_SHA224_OID_LEN      (6 + 9 + 4)
+#define RSA_SHA256_OID_LEN      (6 + 9 + 4)
+#define RSA_SHA384_OID_LEN      (6 + 9 + 4)
+#define RSA_SHA512_OID_LEN      (6 + 9 + 4)
 
 /*----- RSA Control Codes: Mode Parameters: Key Bits -----*/
 typedef enum {
@@ -47,11 +69,17 @@ typedef enum {
     RSA_HASH_TYPE_SHA512
 } csi_rsa_hash_type_t;
 
+typedef struct {
+    csi_rsa_hash_type_t hash_type;
+    uint32_t            oid_len;
+    uint8_t             *oid;
+}RSA_OID;
+
 typedef struct {
     void *n;                                ///< Pointer to the public modulus
     void *e;                                ///< Pointer to the public exponent
     void *d;                                ///< Pointer to the private exponent
-    csi_rsa_key_bits_t  key_bits;           ///< RSA KEY BITS
+    csi_rsa_key_bits_t     key_bits;        ///< RSA KEY BITS
     csi_rsa_padding_type_t padding_type;    ///< RSA PADDING TYPE
 } csi_rsa_context_t;
 
@@ -257,6 +285,30 @@ csi_error_t csi_rsa_enable_pm(csi_rsa_t *rsa);
 */
 void csi_rsa_disable_pm(csi_rsa_t *rsa);
 
+/**
+  \brief       Get publickey by p q prime data
+  \param[in]   rsa          rsa handle to operate.
+  \param[in]   p            Pointer to the prime p
+  \param[in]   p_byte_len   Pointer to the prime p byte length
+  \param[in]   q            Pointer to the prime q
+  \param[in]   q_byte_len   Pointer to the prime q byte length
+  \param[in]   out          Pointer to the publickey
+  \param[in]   keybits_len  Pointer to the publickey bits length
+  \return      \ref csi_error_t
+*/
+csi_error_t csi_rsa_get_publickey(csi_rsa_t *rsa, void *p, uint32_t p_byte_len, void *q, uint32_t q_byte_len, void *out, csi_rsa_key_bits_t keybits_len);
+
+/**
+  \brief       Generation rsa keyparis 
+  \param[in]   rsa          rsa handle to operate.
+  \param[in]   context   Pointer to the rsa context
+  \param[in]   keybits_len  Pointer to the publickey bits length
+  \return      \ref csi_error_t
+*/
+csi_error_t csi_rsa_gen_keypairs(csi_rsa_t *rsa, csi_rsa_context_t *context, csi_rsa_key_bits_t keybits_len);
+
+void csi_rsa_set_ignore_decrypt_error(bool checked);
+
 #ifdef __cplusplus
 }
 #endif

+ 68 - 25
lib/sec_library/include/sec_crypto_aes.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     seccrypt_aes.h
@@ -11,11 +11,16 @@
 #ifndef _SC_AES_H_
 #define _SC_AES_H_
 
+#include "sec_include_config.h"
 #include <stdint.h>
-#include <sec_crypto_errcode.h>
+#include "sec_crypto_errcode.h"
 
 #ifdef CONFIG_SYSTEM_SECURE
-#include "drv/aes.h"
+#ifdef SEC_LIB_VERSION
+#include <drv/aes.h>
+#else
+#include "aes.h"
+#endif
 #endif
 
 #ifdef CONFIG_SEC_CRYPTO_AES_SW
@@ -187,8 +192,7 @@ uint32_t sc_aes_cfb8_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size,
   \param[out]  num     the number of the 128-bit block we have used
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_cfb128_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv,
-                               uint32_t *num);
+uint32_t sc_aes_cfb128_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
 
 /**
   \brief       Aes cfb128 encrypt
@@ -200,8 +204,7 @@ uint32_t sc_aes_cfb128_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size
   \param[out]  num     the number of the 128-bit block we have used
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_cfb128_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv,
-                               uint32_t *num);
+uint32_t sc_aes_cfb128_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
 /**
   \brief       Aes ofb encrypt
   \param[in]   aes     handle to operate
@@ -209,11 +212,11 @@ uint32_t sc_aes_cfb128_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size
   \param[out]  out     Pointer to the Result data
   \param[in]   size    the Source data size
   \param[in]   iv      init vector
-  \param[out]  num     the number of the 128-bit block we have used
+  \param[in]  key_len key bits
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_ofb_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv,
-                            uint32_t *num);
+uint32_t sc_aes_ofb_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
+
 /**
   \brief       Aes ofb decrypt
   \param[in]   aes     handle to operate
@@ -221,43 +224,83 @@ uint32_t sc_aes_ofb_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, v
   \param[out]  out     Pointer to the Result data
   \param[in]   size    the Source data size
   \param[in]   iv      init vector
-  \param[out]  num     the number of the 128-bit block we have used
+  \param[in]  key_len key bits
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_ofb_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv,
-                            uint32_t *num);
+uint32_t sc_aes_ofb_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size, void *iv);
+
 /**
   \brief       Aes ctr encrypt
   \param[in]   aes              handle to operate
   \param[in]   in               Pointer to the Source data
   \param[out]  out              Pointer to the Result data
   \param[in]   size             the Source data size
-  \param[in]   nonce_counter    Pointer to the 128-bit nonce and counter
-  \param[in]   stream_block     Pointer to the saved stream-block for resuming
   \param[in]   iv               init vector
-  \param[out]  num              the number of the 128-bit block we have used
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_ctr_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size,
-                            uint8_t nonce_counter[16], uint8_t stream_block[16], void *iv,
-                            uint32_t *num);
+uint32_t sc_aes_ctr_encrypt(sc_aes_t *aes, void *in, void *out, uint32_t size,void *iv);
+
 /**
   \brief       Aes ctr decrypt
   \param[in]   aes              handle to operate
   \param[in]   in               Pointer to the Source data
   \param[out]  out              Pointer to the Result data
   \param[in]   size             the Source data size
-  \param[in]   nonce_counter    Pointer to the 128-bit nonce and counter
-  \param[in]   stream_block     Pointer to the saved stream-block for resuming
   \param[in]   iv               init vecotr
-  \param[out]  num              the number of the 128-bit block we have used
   \return      error code \ref uint32_t
 */
-uint32_t sc_aes_ctr_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size,
-                            uint8_t nonce_counter[16], uint8_t stream_block[16], void *iv,
-                            uint32_t *num);
+uint32_t sc_aes_ctr_decrypt(sc_aes_t *aes, void *in, void *out, uint32_t size,void *iv);
+
+/**
+  \brief       Aes gcm encrypt
+  \param[in]   dev_aes          dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data.
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vector
+  \return      error code \ref csi_error_t
+*/
+uint32_t sc_aes_gcm_encrypt(sc_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv);
+
+/**
+  \brief       Aes gcm decrypt
+  \param[in]   dev_aes          dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data.
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vecotr
+  \return      error code \ref csi_error_t
+*/
+uint32_t sc_aes_gcm_decrypt(sc_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv);
+
+/**
+  \brief       Aes gcm encrypt
+  \param[in]   dev_aes          dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data.
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vector
+  \param[in]   tag_out          tag output ,parse null if not needed
+  \return      error code \ref csi_error_t
+*/
+uint32_t sc_aes_ccm_encrypt(sc_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv, uint8_t* tag_out);
+
+/**
+  \brief       Aes gcm decrypt
+  \param[in]   dev_aes          dev_aes handle to operate
+  \param[in]   in               Pointer to the Source data.
+  \param[out]  out              Pointer to the Result data
+  \param[in]   size             the Source data size
+  \param[in]   iv               init vecotr
+  \param[in]   tag_out tag output,parse null if not needed
+  \return      error code \ref csi_error_t
+*/
+uint32_t sc_aes_ccm_decrypt(sc_aes_t *aes, void *in, void *out,uint32_t size, uint32_t add_len, void *iv, uint8_t* tag_out);
+
+void sc_aes_dma_enable(sc_aes_t *aes, uint8_t en);
 
 #ifdef __cplusplus
 }
 #endif
+
 #endif /* _SC_AES_H_ */

+ 1 - 1
lib/sec_library/include/sec_crypto_common.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2019-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
  */
 
 

+ 265 - 0
lib/sec_library/include/sec_crypto_ecc.h

@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2017-2022 Alibaba Group Holding Limited
+ */
+/******************************************************************************
+ * @file     sec_crypt_ecc.h
+ * @brief    Header File for ECC
+ * @version  V3.3
+ * @date     30. May 2022
+ * @model    ecc
+ ******************************************************************************/
+#ifndef _SC_ECC_H_
+#define _SC_ECC_H_
+#include "sec_include_config.h"
+
+#define CONFIG_SEC_CRYPTO_ECC
+
+#ifdef CONFIG_SEC_CRYPTO_ECC
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef SEC_LIB_VERSION
+#include "drv/ecc.h"
+#else
+#include "ecc.h"
+#endif
+
+typedef enum {
+    SC_ECC_PRIME256V1 = 0,
+} sc_ecc_curve_type;
+
+/**
+\brief ECC ciphertext order
+*/
+typedef enum {
+        SC_ECC_C1C3C2 = 0,
+        SC_ECC_C1C2C3,
+} sc_ecc_cipher_order_e;
+
+typedef enum {
+        SC_ECC_ENDIAN_LITTLE = 0, ///< Little Endian
+        SC_ECC_ENDIAN_BIG         ///< Big Endian
+} sc_ecc_endian_mode_e;
+
+/**
+\brief ECC key exchange role
+*/
+typedef enum { SC_ECC_Role_Sponsor = 0, SC_ECC_Role_Responsor } sc_ecc_exchange_role_e;
+
+/****** ECC Event *****/
+typedef enum {
+        SC_ECC_EVENT_MAKE_KEY_COMPLETE = 0, ///< Make key completed
+        SC_ECC_EVENT_ENCRYPT_COMPLETE,      ///< Encrypt completed
+        SC_ECC_EVENT_DECRYPT_COMPLETE,      ///< Decrypt completed
+        SC_ECC_EVENT_SIGN_COMPLETE,         ///< Sign completed
+        SC_ECC_EVENT_VERIFY_COMPLETE,       ///< Verify completed
+        SC_ECC_EVENT_EXCHANGE_KEY_COMPLETE, ///< Exchange key completed
+} sc_ecc_event_e;
+
+typedef struct {
+        uint32_t ecc_curve : 1; ///< supports 256bits curve
+} sc_ecc_capabilities_t;
+
+/**
+\brief ECC status
+*/
+typedef struct {
+        uint32_t busy : 1; ///< Calculate busy flag
+} sc_ecc_state_t;
+
+typedef struct {
+#ifdef CONFIG_CSI_V2
+        csi_ecc_t ecc;
+#endif
+} sc_ecc_t;
+
+///< Pointer to \ref sc_ecc_callback_t : ECC Event call back.
+typedef void (*sc_ecc_callback_t)(sc_ecc_event_e event);
+
+/**
+  \brief       Initialize ECC.
+  \param[in]   ecc  ecc handle to operate.
+  \param[in]   idx  device id
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_init(sc_ecc_t *ecc, uint32_t idx);
+
+/**
+  \brief       De-initialize ECC Interface. stops operation and releases the
+  software resources used by the interface \param[in]   ecc  ecc handle to
+  operate. \return      none
+*/
+void sc_ecc_uninit(sc_ecc_t *ecc);
+
+/**
+  \brief       ecc get capability.
+  \param[in]   ecc  Operate handle.
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_config(sc_ecc_t *ecc, sc_ecc_cipher_order_e co,
+                       sc_ecc_endian_mode_e endian);
+
+/**
+  \brief       Attach the callback handler to ECC
+  \param[in]   ecc  Operate handle.
+  \param[in]   cb    Callback function
+  \param[in]   arg   User can define it by himself as callback's param
+  \return      Error code \ref uint32_t
+*/
+uint32_t sc_ecc_attach_callback(sc_ecc_t *ecc, sc_ecc_callback_t cb, void *arg);
+
+/**
+  \brief       Detach the callback handler
+  \param[in]   ecc  Operate handle.
+*/
+uint32_t sc_ecc_detach_callback(sc_ecc_t *ecc);
+
+/**
+  \brief       ecc get capability.
+  \param[in]   ecc  Operate handle.
+  \param[out]   cap  Pointer of sc_ecc_capabilities_t.
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_get_capabilities(sc_ecc_t *ecc, sc_ecc_capabilities_t *cap);
+
+uint32_t sc_ecc_check_keypair(sc_ecc_t *ecc, uint8_t pubkey[65],
+                              uint8_t prikey[32]);
+
+/**
+  \brief       generate ecc key.
+  \param[in]   ecc       ecc handle to operate.
+  \param[out]  private   Pointer to the ecc private key, alloc by caller.
+  \param[out]  public   Pointer to the ecc public key, alloc by caller.
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_gen_key(sc_ecc_t *ecc, uint8_t pubkey[65], uint8_t prikey[32]);
+
+
+/**
+  \brief       generate ecc pubkey.
+  \param[in]   ecc      ecc handle to operate.
+  \param[in]   prikey   Pointer to the ecc private key, alloc by caller.
+  \param[out]  pubkey   Pointer to the ecc public key, alloc by caller.
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_gen_pubkey(sc_ecc_t *ecc, uint8_t pubkey[65], 
+                    uint8_t prikey[32], sc_ecc_curve_type type);
+
+/**
+  \brief       ecc sign
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_sign(sc_ecc_t *ecc, uint8_t d[32], uint8_t prikey[32],
+                     uint8_t s[64], sc_ecc_curve_type type);
+
+/**
+  \brief       ecc sign
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      \ref uint32_t
+*/
+uint32_t sc_ecc_sign_async(sc_ecc_t *ecc, uint8_t d[32], uint8_t prikey[32],
+                           uint8_t s[64], sc_ecc_curve_type type);
+
+/* TODO */
+/**
+  \brief       ecc verify
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      verify result
+*/
+bool sc_ecc_verify(sc_ecc_t *ecc, uint8_t d[32], uint8_t pubkey[65],
+                   uint8_t s[64], sc_ecc_curve_type type);
+
+/**
+  \brief       ecc verify
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   d       Pointer to the digest.
+  \param[out]  privkey Pointer to the private key
+  \param[out]  s Pointer to the signature
+  \return      verify result
+*/
+bool sc_ecc_verify_async(sc_ecc_t *ecc, uint8_t d[32], uint8_t pubkey[65],
+                         uint8_t s[64], sc_ecc_curve_type type);
+
+/**
+  \brief       ecc encrypto
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]   plain       Pointer to the plaintext.
+  \param[in]  PlainByteLen plaintext len
+  \param[in]  pubKey public key.
+  \param[out]  cipher Pointer to the chipher
+  \param[out]  cipher_byte_len Pointer to the chipher len.
+  \return      uint32_t
+*/
+uint32_t sc_ecc_encrypt(sc_ecc_t *ecc, uint8_t *plain, uint32_t plain_len,
+                        uint8_t pubKey[65], uint8_t *cipher,
+                        uint32_t *cipher_len);
+
+/**
+  \brief       ecc encrypto
+  \param[in]   ecc       ecc handle to operate.
+  \param[in]  cipher Pointer to the chipher
+  \param[in]  CipherByteLen chipher len.
+  \param[in]  prikey private key.
+  \param[out]   plain       Pointer to the plaintext.
+  \param[out]  PlainByteLen plaintext len
+  \return      uint32_t
+*/
+uint32_t sc_ecc_decrypt(sc_ecc_t *ecc, uint8_t *cipher, uint32_t cipher_len,
+                        uint8_t prikey[32], uint8_t *plain,
+                        uint32_t *plain_len);
+
+/**
+  \brief       ecc key exchange
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+uint32_t sc_ecc_exchangekey(sc_ecc_t *ecc, sc_ecc_exchange_role_e role,
+                            uint8_t *da, uint8_t *pb, uint8_t *ra1, uint8_t *ra,
+                            uint8_t *rb, uint8_t *za, uint8_t *zb,
+                            uint32_t k_len, uint8_t *ka, uint8_t *s1,
+                            uint8_t *sa);
+
+/**
+  \brief       ecc key exchange get Z.
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+uint32_t sc_ecc_getZ(sc_ecc_t *ecc, uint8_t *id, uint32_t id_len,
+                     uint8_t pubkey[65], uint8_t z[32]);
+
+/**
+  \brief       ecc key exchange get E
+  \param[in]   ecc       ecc handle to operate.
+  \return      uint32_t
+*/
+uint32_t sc_ecc_getE(sc_ecc_t *ecc, uint8_t *m, uint32_t len, uint8_t z[32],
+                     uint8_t e[32]);
+
+/**
+  \brief       Get ECC state.
+  \param[in]   ecc      ECC handle to operate.
+  \param[out]  state    ECC state \ref sc_ecc_state_t.
+  \return      Error code \ref uint32_t
+*/
+uint32_t sc_ecc_get_state(sc_ecc_t *ecc, sc_ecc_state_t *state);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#endif
+
+#endif /* _SC_ECC_H_ */
+

+ 53 - 0
lib/sec_library/include/sec_crypto_ecdh.h

@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017-2022 Alibaba Group Holding Limited
+ */
+/******************************************************************************
+ * @file     sec_crypto_ecdh.h
+ * @brief    Header File for curve25519( a state-of-the-art Diffie-Hellman function)
+ * @version  V3.3
+ * @date     10. June 2022
+ * @model    ecdh
+ ******************************************************************************/
+#ifndef _SC_ECDH_H_
+#define _SC_ECDH_H_
+#include "sec_include_config.h"
+
+#define CONFIG_SEC_CRYPTO_ECC
+
+#ifdef CONFIG_SEC_CRYPTO_ECC
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef SEC_LIB_VERSION
+#include "drv/ecdh.h"
+#include "drv/ecc.h"
+#include "sec_crypto_ecc.h"
+#else
+#include "ecdh.h"
+#include "ecc.h"
+#include "sec_crypto_ecc.h"
+#endif
+
+/**
+  \brief       ecdh calc secret
+  \param[in]   ecc ecc handle to operate.
+  \param[in]   pubkey  Pointer to the A(or B) public key.
+  \param[out]  privkey Pointer to the B(or A) private key.
+  \param[out]  out Pointer to the share secret.
+  \param[out]  len length of the share secret.
+  \return      \ref uint32_t.
+*/
+
+uint32_t sc_ecdh_calc_secret(sc_ecc_t *ecc, uint8_t privkey[32],
+                            uint8_t pubkey[65], uint8_t out[32], 
+                            uint32_t *len, sc_ecc_curve_type type) ;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#endif
+
+#endif /* _SC_CURVE15519_H_ */

+ 2 - 2
lib/sec_library/include/sec_crypto_errcode.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2019-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
  */
 
 #ifndef _SC_ERRCODE_H
@@ -89,4 +89,4 @@
 #define CHECK_PARAM CHECK_PARAM_RET
 #endif
 
-#endif
+#endif

+ 96 - 0
lib/sec_library/include/sec_crypto_kdf.h

@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
+ */
+
+#ifndef __SC_KDF_H__
+#define __SC_KDF_H__
+#include "sec_crypto_errcode.h"
+#include "sec_crypto_aes.h"
+#include "sec_crypto_sm4.h"
+#include "sec_crypto_mac.h"
+#include <stdint.h>
+
+typedef enum {
+	SC_KDF_DERIVED_DFT_CHALLENGE_EK,
+	SC_KDF_DERIVED_C910TJTAG_CHALLENGE_EK,
+	SC_KDF_DERIVED_E902JTAG_CHALLENGE_EK,
+	SC_KDF_DERIVED_IMAGE_EK,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK1,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK2,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK3,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK4,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK5,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK6,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK7,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK8,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK9,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK10,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK11,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK12,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK13,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK14,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK15,
+	SC_KDF_DERIVED_SECURE_STORAGE_EK16,
+	SC_KDF_DERIVED_RPMB_ACCESS_EK,
+	SC_KDF_DERIVED_MAX,
+} sc_kdf_derived_key_t;
+
+typedef enum {
+	SC_KDF_KEY_TYPE_AES_256,
+	SC_KDF_KEY_TYPE_AES_192,
+	SC_KDF_KEY_TYPE_AES_128,
+	SC_KDF_KEY_TYPE_SM4,
+	SC_KDF_KEY_TYPE_TDES_192,
+	SC_KDF_KEY_TYPE_TDES_128,
+	SC_KDF_KEY_TYPE_DES,
+	/* for rpmb, str */
+/* 	SC_KDF_KEY_TYPE_HMAC_SHA256,
+ */
+	SC_KDF_KEY_TYPE_MAX,
+} sc_kdf_key_type_t;
+
+/**
+\brief KDF Ctrl Block
+*/
+typedef struct {
+	union {
+		sc_aes_t *aes;
+		sc_sm4_t *sm4;
+		sc_mac_t *mac;
+
+	};
+	sc_kdf_key_type_t type;
+} sc_kdf_key_handle_t;
+
+/**
+\brief KDF Ctrl Block
+*/
+typedef struct {
+	void *priv;
+} sc_kdf_t;
+
+/**
+  \brief       kdf initialiez.
+  \param[in]   kdf    Handle to operate.
+  \param[in]   idx    Device id.
+  \return      error code
+*/
+uint32_t sc_kdf_init(sc_kdf_t *kdf, uint32_t idx);
+
+/**
+  \brief       kdf uninitialiez.
+  \param[in]   kdf    Handle to operate
+*/
+void sc_kdf_uninit(sc_kdf_t *kdf);
+
+/**
+  \brief       Set key to algorithim engine.
+  \param[in]   handle    Handle to cipher.
+  \param[in]   kdf    Handle to operate.
+  \param[in]   dkey derived key type.
+  \return      error code
+*/
+uint32_t sc_kdf_set_key(sc_kdf_t *kdf, sc_kdf_key_handle_t *handle,
+			  sc_kdf_derived_key_t dkey);
+
+#endif

+ 117 - 0
lib/sec_library/include/sec_crypto_mac.h

@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+/******************************************************************************
+ * @file     seccrypt_mac.h
+ * @brief    Header File for MAC
+ * @version  V1.0
+ * @date     20. Jul 2020
+ * @model    mac
+ ******************************************************************************/
+#ifndef _SC_MAC_H_
+#define _SC_MAC_H_
+#include "sec_include_config.h"
+#include <stdint.h>
+#include "sec_crypto_errcode.h"
+#include "sec_crypto_sha.h"
+
+#define SC_MAC_KEY_LEN_MAX 64
+#define HMAC_SHA1_BLOCK_SIZE 64
+#define HMAC_SHA224_BLOCK_SIZE 64
+#define HMAC_SM3_BLOCK_SIZE 64
+#define HMAC_SHA256_BLOCK_SIZE 64
+#define HMAC_MD5_BLOCK_SIZE 64
+#define HMAC_SHA384_BLOCK_SIZE 128
+#define HMAC_SHA512_BLOCK_SIZE 128
+#define HMAC_MAX_BLOCK_SIZE 128
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef struct sc_mac {
+	sc_sha_t sha;
+	uint8_t key[HMAC_MAX_BLOCK_SIZE];
+    sc_sha_mode_t mode;
+} sc_mac_t;
+
+#define MAC_CONTEXT_SIZE sizeof(sc_sha_context_t)
+typedef struct {
+	uint8_t ctx[MAC_CONTEXT_SIZE];
+} sc_mac_context_t;
+
+/**
+  \brief       Initialize MAC Interface. Initializes the resources needed for the MAC interface
+  \param[in]   mac  operate handle.
+  \param[in]   idx index of mac
+  \return      error code \ref uint32_t
+*/
+uint32_t sc_mac_init(sc_mac_t *mac, uint32_t idx);
+
+/**
+  \brief       De-initialize MAC Interface. stops operation and releases the software resources used by the interface
+  \param[in]   mac  mac handle to operate.
+  \return      none
+*/
+void sc_mac_uninit(sc_mac_t *mac);
+
+/**
+  \brief       MAC set key function.
+  \param[in]   mac mac handle to operate.
+  \param[in]   key Pointer to the mac key.
+  \param[in]   key_len Length of key.
+  \return      error code
+*/
+uint32_t sc_mac_set_key(sc_mac_t *mac, uint8_t *key, uint32_t key_len);
+
+/**
+  \brief       MAC operation function.
+  \param[in]   mac mac handle to operate.
+  \param[in]   mode sc_sha_mode_t.
+  \param[in]   msg Pointer to the mac input message.
+  \param[in]   msg_len Length of msg.
+  \param[out]  out mac buffer, malloc by caller.
+  \param[out]  out_len, out mac length,
+	should 32 bytes if HMAC_SHA256 mode.
+  \return      error code
+*/
+uint32_t sc_mac_calc(sc_mac_t *mac, sc_sha_mode_t mode, uint8_t *msg,
+		     uint32_t msg_len, uint8_t *out, uint32_t *out_len);
+
+/**
+  \brief       MAC start operation function.
+  \param[in]   mac mac handle to operate.
+  \param[in]   context mac context pointer.
+  \param[in]   mode sc_sha_mode_t.
+  \return      error code
+*/
+uint32_t sc_mac_start(sc_mac_t *mac, sc_mac_context_t *context,
+		      sc_sha_mode_t mode);
+
+/**
+  \brief       MAC start operation function.
+  \param[in]   mac mac handle to operate.
+  \param[in]   context mac context pointer.
+  \param[in]   msg Pointer to the mac input message.
+  \param[in]   msg_len Length of msg.
+  \return      error code
+*/
+uint32_t sc_mac_update(sc_mac_t *mac, sc_mac_context_t *context, uint8_t *msg,
+		       uint32_t msg_len);
+
+/**
+  \brief       MAC start operation function.
+  \param[in]   mac mac handle to operate.
+  \param[in]   context mac context pointer.
+  \param[out]  out mac buffer, malloc by caller.
+  \param[out]  out_len, out mac length,
+  \return      error code
+*/
+uint32_t sc_mac_finish(sc_mac_t *mac, sc_mac_context_t *context, uint8_t *out,
+		       uint32_t *out_len);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SC_MAC_H_ */

+ 4 - 4
lib/sec_library/include/sec_crypto_rng.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     seccrypt_rng.h
@@ -13,7 +13,7 @@
 
 
 #include <stdint.h>
-#include <sec_crypto_errcode.h>
+#include "sec_crypto_errcode.h"
 
 
 #ifdef __cplusplus
@@ -23,10 +23,10 @@ extern "C" {
 /**
   \brief       Get data from the TRNG engine
   \param[out]  data  Pointer to buffer with data get from TRNG
-  \param[in]   num   Number of data items,uinit in uint32
+  \param[in]   num   Number of data items in bytes
   \return      error code
 */
-uint32_t sc_rng_get_multi_word(uint32_t *data, uint32_t num);
+uint32_t sc_rng_get_multi_byte(uint8_t *data, uint32_t num);
 
 /**
   \brief       Get data from the TRNG engine

+ 37 - 3
lib/sec_library/include/sec_crypto_rsa.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     seccrypt_rsa.h
@@ -10,13 +10,18 @@
  ******************************************************************************/
 #ifndef _SC_RSA_H_
 #define _SC_RSA_H_
+#include "sec_include_config.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 #ifdef CONFIG_SYSTEM_SECURE
+#ifdef SEC_LIB_VERSION
 #include "drv/rsa.h"
+#else
+#include "rsa.h"
+#endif
 #endif
 
 
@@ -27,8 +32,12 @@ extern "C" {
 
 #include <stdint.h>
 #include <stdbool.h>
-#include <drv/common.h>
-#include <sec_crypto_errcode.h>
+#ifdef SEC_LIB_VERSION
+#include "drv/common.h"
+#else
+#include "common.h"
+#endif
+#include "sec_crypto_errcode.h"
 
 
 //TODO Del this file after updating to sc2.0
@@ -286,6 +295,31 @@ uint32_t sc_rsa_enable_pm(sc_rsa_t *rsa);
 */
 void sc_rsa_disable_pm(sc_rsa_t *rsa);
 
+/**
+  \brief       set if checked decrypt error.
+  \param[in]   checked      if checked error.
+*/
+void sc_rsa_set_ignore_decrypt_error(bool checked);
+
+/**
+  \brief       Get publickey by p q prime data
+  \param[in]   rsa          rsa handle to operate.
+  \param[in]   context      Pointer to the rsa context
+  \param[in]   p            Pointer to the prime p
+  \param[in]   p_byte_len   Pointer to the prime p byte length
+  \param[in]   q            Pointer to the prime q
+  \param[in]   q_byte_len   Pointer to the prime q byte length
+  \param[in]   out          Pointer to the publickey
+*/
+uint32_t sc_rsa_get_publickey(sc_rsa_t *rsa, sc_rsa_context_t *context, void *p, uint32_t p_byte_len, 
+                                void *q, uint32_t q_byte_len, void *out);
+
+/**
+  \brief       Generation rsa keyparis 
+  \param[in]   rsa          rsa handle to operate.
+  \param[in]   context      Pointer to the rsa context
+*/
+uint32_t sc_rsa_gen_keypairs(sc_rsa_t *rsa, sc_rsa_context_t *context);
 #ifdef __cplusplus
 }
 #endif

+ 39 - 5
lib/sec_library/include/sec_crypto_sha.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     seccrypt_sha.h
@@ -10,17 +10,26 @@
  ******************************************************************************/
 #ifndef _SC_SHA_H_
 #define _SC_SHA_H_
+#include "sec_include_config.h"
 
 #include <stdint.h>
 #ifdef CONFIG_SYSTEM_SECURE
+#ifdef SEC_LIB_VERSION
 #include "drv/sha.h"
+#else
+#include "sha.h"
+#endif
 #include "soc.h"
 #endif
 #ifdef CONFIG_SEC_CRYPTO_SM3
+#ifdef SEC_LIB_VERSION
 #include "drv/sm3.h"
+#else
+#include "sm3.h"
+#endif
 #endif
 
-#include <sec_crypto_errcode.h>
+#include "sec_crypto_errcode.h"
 
 
 #ifdef CONFIG_SEC_CRYPTO_SHA_SW
@@ -34,13 +43,14 @@ extern "C" {
 
 /*----- SHA Control Codes: Mode -----*/
 typedef enum {
-    SC_SHA_MODE_1 = 1U,  ///< SHA_1 mode
+    SC_SHA_MODE_SHA1 = 1U,  ///< SHA_1 mode
     SC_SHA_MODE_256,     ///< SHA_256 mode
     SC_SHA_MODE_224,     ///< SHA_224 mode
     SC_SHA_MODE_512,     ///< SHA_512 mode
     SC_SHA_MODE_384,     ///< SHA_384 mode
     SC_SHA_MODE_512_256, ///< SHA_512_256 mode
-    SC_SHA_MODE_512_224,  ///< SHA_512_224 mode
+    SC_SHA_MODE_512_224, ///< SHA_512_224 mode
+    SC_SHA_MODE_MD5,     ///< MD5 mode
     SC_SM3_MODE,
 } sc_sha_mode_t;
 
@@ -60,6 +70,8 @@ uint8_t ctx[SHA_CONTEXT_SIZE];
 #ifdef CONFIG_CSI_V2
   csi_sha_context_t ctx;
   csi_sm3_context_t sm3ctx;
+  csi_sha_state_t   state;
+  csi_sm3_state_t   sm3state;
 #endif
 #endif
 #if defined(CONFIG_TEE_CA)
@@ -69,7 +81,7 @@ uint8_t ctx[SHA_CONTEXT_SIZE];
   sc_mbedtls_sha1_context sha1_ctx;
   sc_mbedtls_sha256_context sha2_ctx;
 #endif
-    sc_sha_mode_t mode;        ///< sha mode
+  sc_sha_mode_t mode;        ///< sha mode
 } sc_sha_context_t;
 
 /****** SHA Event *****/
@@ -166,6 +178,28 @@ uint32_t sc_sha_update_async(sc_sha_t *sha, sc_sha_context_t *context, const voi
 */
 uint32_t sc_sha_finish(sc_sha_t *sha, sc_sha_context_t *context, void *output, uint32_t *out_size);
 
+/**
+  \brief       calculate the digest
+  \param[in]   sha      sha handle to operate.
+  \param[in]   idx      index of sha
+  \param[in]   context  Pointer to the sha context \ref sc_sha_context_t
+  \param[in]   mode     sha mode \ref sc_sha_mode_t
+  \param[in]   input    Pointer to the Source data
+  \param[in]   size     the data size
+  \param[out]  output   Pointer to the result data
+  \param[out]  out_size Pointer to the result data size(bytes)
+  \return      error code \ref uint32_t
+*/
+uint32_t sc_sha_digest(sc_sha_t *sha, uint32_t idx, sc_sha_context_t *context, sc_sha_mode_t mode,
+                        const void *input, uint32_t size, void *output, uint32_t out_size);            
+
+/**
+  \brief       finish the engine
+  \param[in]   sha      sha handle to operate.
+  \param[in]   context  Pointer to the sha context \ref sc_sha_context_t
+  \return      error code \ref uint32_t
+*/
+uint32_t sc_sha_get_state(sc_sha_t *sha,sc_sha_context_t *context);
 #ifdef __cplusplus
 }
 #endif

+ 7 - 1
lib/sec_library/include/sec_crypto_sm2.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     sec_crypt_sm2.h
@@ -10,6 +10,7 @@
  ******************************************************************************/
 #ifndef _SC_SM2_H_
 #define _SC_SM2_H_
+#include "sec_include_config.h"
 
 #ifdef CONFIG_SEC_CRYPTO_SM2
 
@@ -17,7 +18,12 @@
 extern "C" {
 #endif
 
+#ifdef SEC_LIB_VERSION
 #include "drv/sm2.h"
+#else
+#include "sm2.h"
+#endif
+
 
 typedef struct {
         uint32_t sm2_curve : 1; ///< supports 256bits curve

+ 8 - 4
lib/sec_library/include/sec_crypto_sm4.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     sec_crypt_sm4.h
@@ -11,10 +11,14 @@
 
 #ifndef _SC_SM4_H_
 #define _SC_SM4_H_
-
+#include "sec_include_config.h"
 
 #ifdef CONFIG_CSI_V2
+#ifdef SEC_LIB_VERSION
 #include "drv/sm4.h"
+#else
+#include "sm4.h"
+#endif
 #endif
 
 #ifdef __cplusplus
@@ -51,7 +55,7 @@ void sc_sm4_uninit(sc_sm4_t *sm4);
   \param[in]   key        Pointer to the key buf
   \return      error code \ref uint32_t
 */
-uint32_t sc_sm4_set_encrypt_key(sc_sm4_t *sm4, uint8_t *key);
+uint32_t sc_sm4_set_encrypt_key(sc_sm4_t *sm4, uint8_t *key, csi_sm4_key_bits_t key_len);
 
 /**
   \brief       Set decrypt key
@@ -59,7 +63,7 @@ uint32_t sc_sm4_set_encrypt_key(sc_sm4_t *sm4, uint8_t *key);
   \param[in]   key        Pointer to the key buf
   \return      error code \ref uint32_t
 */
-uint32_t sc_sm4_set_decrypt_key(sc_sm4_t *sm4, uint8_t *key);
+uint32_t sc_sm4_set_decrypt_key(sc_sm4_t *sm4, uint8_t *key, csi_sm4_key_bits_t key_len);
 
 /**
   \brief       sm4 ecb encrypt

+ 11 - 0
lib/sec_library/include/sec_include_config.h

@@ -0,0 +1,11 @@
+/*
+ * Copyright (C) 2019-2020 Alibaba Group Holding Limited
+ */
+#ifndef __SEC_INCLUDE_CONFIG__
+#define __SEC_INCLUDE_CONFIG__
+
+#define CONFIG_SYSTEM_SECURE 1
+#define CONFIG_CSI_V2        1
+#define CONFIG_SEC_CRYPTO_SM2 1
+#define CONFIG_SEC_CRYPTO_SM3 1
+#endif	/* __SEC_INCLUDE_CONFIG__ */

+ 15 - 1
lib/sec_library/include/sec_library.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 /******************************************************************************
  * @file     sec_library.h
@@ -12,4 +12,18 @@
 #ifndef _SL_H_
 #define _SL_H_
 
+#include "sec_crypto_errcode.h"
+#include "sec_crypto_common.h"
+#include "sec_crypto_aes.h"
+#include "sec_crypto_rng.h"
+#include "sec_crypto_rsa.h"
+#include "sec_crypto_sha.h"
+#include "sec_crypto_sm2.h"
+#include "sec_crypto_sm4.h"
+#include "sec_crypto_kdf.h"
+#include "csi_efuse_api.h"
+#include "csi_efuse_api.h"
+#include "csi_sec_img_verify.h"
+/* NOTE add more header */
+
 #endif /* _SL_H_ */

+ 19 - 6
lib/sec_library/include/sha.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -13,22 +13,31 @@
 #ifndef _DRV_SHA_H_
 #define _DRV_SHA_H_
 
-#include <drv/common.h>
-#include <drv/dma.h>
+#include "common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
+#define HASH_DATAIN_BLOCK_SIZE   64
+
+#define SHA1_DIGEST_OUT_SIZE     20
+#define SHA224_DIGEST_OUT_SIZE   28
+#define SHA256_DIGEST_OUT_SIZE   32
+#define SHA384_DIGEST_OUT_SIZE   48
+#define SHA512_DIGEST_OUT_SIZE   64
+#define MD5_DIGEST_OUT_SIZE      16
+
 /****** SHA mode ******/
 typedef enum {
-    SHA_MODE_1                    = 1U,   ///< SHA_1 mode
+    SHA_MODE_SHA1                 = 1U,   ///< SHA_1 mode
     SHA_MODE_256,                         ///< SHA_256 mode
     SHA_MODE_224,                         ///< SHA_224 mode
     SHA_MODE_512,                         ///< SHA_512 mode
     SHA_MODE_384,                         ///< SHA_384 mode
     SHA_MODE_512_256,                     ///< SHA_512_256 mode
-    SHA_MODE_512_224                      ///< SHA_512_224 mode
+    SHA_MODE_512_224,                     ///< SHA_512_224 mode
+    SHA_MODE_MD5                          ///< MD5 mode
 } csi_sha_mode_t;
 
 /****** SHA State ******/
@@ -42,11 +51,16 @@ typedef struct {
     uint32_t        total[2];             ///< Number of bytes processed
     uint32_t        state[16];            ///< Intermediate digest state
     uint8_t         buffer[128];          ///< Data block being processed
+    uint8_t         result[64];           ///< Data block has processed
+    uint32_t        process_len;
+    uint32_t        digest_len;
 } csi_sha_context_t;
 
 /****** SHA Event ******/
 typedef enum {
     SHA_EVENT_COMPLETE    = 0U,           ///< Calculate completed
+    SHA_EVENT_UPDATE,
+    SHA_EVENT_START,
     SHA_EVENT_ERROR                       ///< Calculate error
 } csi_sha_event_t;
 
@@ -56,7 +70,6 @@ struct csi_sha {
     csi_dev_t               dev;                                          ///< SHA hw-device info
     void (*callback)(csi_sha_t *sha, csi_sha_event_t event, void *arg);   ///< SHA event callback for user
     void                    *arg;                                         ///< SHA custom designed param passed to evt_cb
-    csi_dma_ch_t            *dma_in;                                      ///< SHA in dma handle param
     csi_sha_state_t         state;                                        ///< SHA state
     void                    *priv;
 };

+ 3 - 3
lib/sec_library/include/sm2.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -14,7 +14,7 @@
 #define _DRV_SM2_H_
 
 #include <stdint.h>
-#include <drv/common.h>
+#include "common.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -255,4 +255,4 @@ void csi_sm2_disable_pm(csi_sm2_t *sm2);
 extern "C" {
 #endif
 
-#endif
+#endif

+ 20 - 16
lib/sec_library/include/sm3.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -14,17 +14,20 @@
 #define _DRV_SM3_H_
 
 #include <stdint.h>
-#include <drv/common.h>
-#include <drv/dma.h>
+#include "common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
+#define SM3_DATAIN_BLOCK_SIZE 64
+#define SM3_DIGEST_OUT_SIZE   32
+
 typedef struct {
-        uint32_t total[2];   ///< Number of bytes processed
-        uint32_t state[16];  ///< Intermediate digest state
-        uint8_t  buffer[64]; ///< Data block  beingprocessed
+    uint32_t total[2];   ///< Number of bytes processed
+    uint32_t state[16];  ///< Intermediate digest state
+    uint8_t  buffer[SM3_DATAIN_BLOCK_SIZE]; ///< Data block  beingprocessed
+    uint8_t  result[SM3_DIGEST_OUT_SIZE]; ///< Data block has processed
 } csi_sm3_context_t;
 
 /****** SM3 State ******/
@@ -36,20 +39,21 @@ typedef struct {
 
 /****** SM3 Event ******/
 typedef enum {
-        SM3_EVENT_COMPLETE = 0U, ///< Calculate completed
-        SM3_EVENT_ERROR          ///< Calculate error
+    SM3_EVENT_COMPLETE = 0U, ///< Calculate completed
+    SM3_EVENT_UPDATE,
+    SM3_EVENT_START,
+    SM3_EVENT_ERROR          ///< Calculate error
 } csi_sm3_event_t;
 
 typedef struct csi_sm3_t csi_sm3_t;
 
 struct csi_sm3_t {
-        csi_dev_t dev; ///< SM3 hw-device info
-        void (*callback)(csi_sm3_t *sm3, csi_sm3_event_t event,
-                         void *arg); ///< SM3 event callback for user
-        void *          arg;    ///< SM3 custom designed param passed to evt_cb
-        csi_dma_ch_t *  dma_in; ///< SM3 in dma handle param
-        csi_sm3_state_t state;  ///< SM3 state
-        void *          priv;
+    csi_dev_t dev; ///< SM3 hw-device info
+    void (*callback)(csi_sm3_t *sm3, csi_sm3_event_t event,
+                        void *arg); ///< SM3 event callback for user
+    void *          arg;    ///< SM3 custom designed param passed to evt_cb
+    csi_sm3_state_t state;  ///< SM3 state
+    void *          priv;
 };
 
 // Function documentation
@@ -152,4 +156,4 @@ void csi_sm3_disable_pm(csi_sm3_t *sm3);
 extern "C" {
 #endif
 
-#endif //_DRV_SM3_H
+#endif //_DRV_SM3_H

+ 29 - 6
lib/sec_library/include/sm4.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2021 Alibaba Group Holding Limited
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
  */
 
 /******************************************************************************
@@ -14,18 +14,41 @@
 #define _DRV_SM4_H_
 
 #include <stdint.h>
-#include <drv/common.h>
+#include "common.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
+#define SM4_KEY_LEN_BYTES_32 32
+#define SM4_KEY_LEN_BYTES_24 24
+#define SM4_KEY_LEN_BYTES_16 16
+
+typedef enum {
+    SM4_KEY_LEN_BITS_128        = 0,       /*128 Data bits*/
+    SM4_KEY_LEN_BITS_256                   /*256 Data bits*/
+} csi_sm4_key_bits_t;
+
+typedef struct {
+    uint32_t busy             : 1;        ///< Calculate busy flag
+    uint32_t error            : 1;        ///< Calculate error flag
+} csi_sm4_state_t;
+
+typedef struct {
+    uint32_t            key_len_byte;
+    uint8_t             key[32];          ///< Data block being processed
+    uint32_t            sca;
+} csi_sm4_context_t;
+
 /**
 \brief SM4 Ctrl Block
 */
 typedef struct {
-        csi_dev_t dev;
-        void *    priv;
+    csi_sm4_state_t   state;
+    csi_sm4_context_t context;
+    csi_dev_t         dev;
+    void *            priv;
+    uint32_t          is_kdf;
 } csi_sm4_t;
 
 // Function documentation
@@ -50,7 +73,7 @@ void csi_sm4_uninit(csi_sm4_t *sm4);
   \param[in]   key        Pointer to the key buf
   \return      error code \ref uint32_t
 */
-csi_error_t csi_sm4_set_encrypt_key(csi_sm4_t *sm4, uint8_t *key);
+csi_error_t csi_sm4_set_encrypt_key(csi_sm4_t *sm4, uint8_t *key, csi_sm4_key_bits_t key_len);
 
 /**
   \brief       Set decrypt key
@@ -58,7 +81,7 @@ csi_error_t csi_sm4_set_encrypt_key(csi_sm4_t *sm4, uint8_t *key);
   \param[in]   key        Pointer to the key buf
   \return      error code \ref uint32_t
 */
-csi_error_t csi_sm4_set_decrypt_key(csi_sm4_t *sm4, uint8_t *key);
+csi_error_t csi_sm4_set_decrypt_key(csi_sm4_t *sm4, uint8_t *key, csi_sm4_key_bits_t key_len);
 
 /**
   \brief       sm4 ecb encrypt

+ 484 - 0
lib/sec_library/include/soc.h

@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     soc.h
+ * @brief    CSI Core Peripheral Access Layer Header File for
+ *           CSKYSOC Device Series
+ * @version  V1.0
+ * @date     7. April 2020
+ ******************************************************************************/
+
+#ifndef _SOC_H_
+#define _SOC_H_
+
+#include <stdint.h>
+#include "csi_core.h"
+#include "sys_clk.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef EHS_VALUE
+#define EHS_VALUE               20000000U
+#endif
+
+#ifndef ELS_VALUE
+#define ELS_VALUE               32768U
+#endif
+
+#ifndef IHS_VALUE
+#define IHS_VALUE               50000000U
+#endif
+
+#ifndef ILS_VALUE
+#define ILS_VALUE               32768U
+#endif
+
+#define RISCV_CORE_TIM_FREQ 3000000
+
+typedef enum {
+    Supervisor_Software_IRQn        =  1U,
+    Machine_Software_IRQn           =  3U,
+    Supervisor_Timer_IRQn           =  5U,
+    CORET_IRQn                      =  7U,
+    Supervisor_External_IRQn        =  9U,
+    Machine_External_IRQn           =  11U,
+    DW_TIMER0_IRQn                  =  16U,
+    DW_TIMER1_IRQn                  =  17U,
+    DW_TIMER2_IRQn                  =  18U,
+    DW_TIMER3_IRQn                  =  19U,
+    DW_TIMER4_IRQn                  =  20U,
+    DW_TIMER5_IRQn                  =  21U,
+    DW_TIMER6_IRQn                  =  22U,
+    DW_TIMER7_IRQn                  =  23U,
+    WJ_MBOX_IRQn                    =  28U,
+    DW_UART0_IRQn                   =  36U,
+    DW_UART1_IRQn                   =  37U,
+    DW_UART2_IRQn                   =  38U,
+    DW_UART3_IRQn                   =  39U,
+    DW_UART4_IRQn                   =  40U,
+    DW_UART5_IRQn                   =  41U,
+    DW_I2C0_IRQn                    =  44U,
+    DW_I2C2_IRQn                    =  46U,
+    DW_QSPI0_IRQn                   =  52U,
+    DW_QSPI1_IRQn                   =  53U,
+    DW_SPI0_IRQn                    =  54U,
+    DW_GPIO0_IRQn                   =  56U,
+    DW_GPIO1_IRQn                   =  57U,
+    DW_GPIO2_IRQn                   =  58U,
+    DW_GPIO3_IRQn                   =  59U,
+    DW_EMMC_IRQn                    =  62U,
+    DW_SD_IRQn                      =  64U,
+    DW_USB_IRQn                     =  68U,
+    DW_DMA0_IRQn                    =  27U,
+    DCD_ISO7816_IRQn                =  69U,
+    DW_DMA1_IRQn                    =  71U,
+    DW_DMA2_IRQn                    =  72U,
+    DW_DMA3_IRQn                    =  73U,
+	WJ_EFUSE_IRQn                   =  80U,
+    DW_WDT0_IRQn                    =  111U,
+    DW_WDT1_IRQn                    =  112U,
+    RB_120SI_AV_IRQn                =  121U,
+    RB_120SII_AV_IRQn               =  124U,
+    RB_120SIII_AV_IRQn              =  127U,
+    RB_150B_AIC_IRQn                =  128U,
+    RB_150B_PKA1_IRQn               =  130U,
+    RB_150B_ERR_IRQn                =  132U,
+    RB_150B_TRNG_IRQn               =  133U,
+} irqn_type_t;
+
+typedef enum {
+    WJ_IOCTL_Wakeupn               =  29U,     /* IOCTOL wakeup */
+} wakeupn_type_t;
+
+typedef enum {
+    WJ_USB_CLK_MANAGERN            = 28U,
+} clk_manager_type_t;
+
+typedef enum {
+	PAD_GRP_BASE1,
+	PAD_UART0_TXD = PAD_GRP_BASE1,
+	PAD_UART0_RXD,
+	PAD_QSPI0_SCLK,
+	PAD_QSPI0_CSN0,
+	PAD_QSPI0_CSN1,
+	PAD_QSPI0_D0_MOSI,
+	PAD_QSPI0_D1_MISO,
+	PAD_QSPI0_D2_WP,
+	PAD_QSPI0_D3_HOLD,
+	PAD_I2C2_SCL,
+	PAD_I2C2_SDA,
+	PAD_I2C3_SCL,
+	PAD_I2C3_SDA,
+	PAD_GPIO2_13,
+	PAD_SPI_SCLK,
+	PAD_SPI_CSN,
+	PAD_SPI_MOSI,
+	PAD_SPI_MISO,
+	PAD_GPIO2_18,
+	PAD_GPIO2_19,
+	PAD_GPIO2_20,
+	PAD_GPIO2_21,
+	PAD_GPIO2_22,
+	PAD_GPIO2_23,
+	PAD_GPIO2_24,
+	PAD_GPIO2_25,
+	PAD_SDIO0_WPRTN,
+	PAD_SDIO0_DETN,
+	PAD_SDIO1_WPRTN,
+	PAD_SDIO1_DETN,
+	PAD_GPIO2_30,
+	PAD_GPIO2_31,
+	PAD_GPIO3_0,
+	PAD_GPIO3_1,
+	PAD_GPIO3_2,
+	PAD_GPIO3_3,
+	PAD_HDMI_SCL,
+	PAD_HDMI_SDA,
+	PAD_HDMI_CEC,
+	PAD_GMAC0_TX_CLK,
+	PAD_GMAC0_RX_CLK,
+	PAD_GMAC0_TXEN,
+	PAD_GMAC0_TXD0,
+	PAD_GMAC0_TXD1,
+	PAD_GMAC0_TXD2,
+	PAD_GMAC0_TXD3,
+	PAD_GMAC0_RXDV,
+	PAD_GMAC0_RXD0,
+	PAD_GMAC0_RXD1,
+	PAD_GMAC0_RXD2,
+	PAD_GMAC0_RXD3,
+	PAD_GMAC0_MDC,
+	PAD_GMAC0_MDIO,
+	PAD_GMAC0_COL,
+	PAD_GMAC0_CRS,
+
+	PAD_GRP_BASE2,
+	PAD_QSPI1_SCLK = PAD_GRP_BASE2,
+	PAD_QSPI1_CSN0,
+	PAD_QSPI1_D0_MOSI,
+	PAD_QSPI1_D1_MISO,
+	PAD_QSPI1_D2_WP,
+	PAD_QSPI1_D3_HOLD,
+	PAD_I2C0_SCL,
+	PAD_I2C0_SDA,
+	PAD_I2C1_SCL,
+	PAD_I2C1_SDA,
+	PAD_UART1_TXD,
+	PAD_UART1_RXD,
+	PAD_UART4_TXD,
+	PAD_UART4_RXD,
+	PAD_UART4_CTSN,
+	PAD_UART4_RTSN,
+	PAD_UART3_TXD,
+	PAD_UART3_RXD,
+	PAD_GPIO0_18,
+	PAD_GPIO0_19,
+	PAD_GPIO0_20,
+	PAD_GPIO0_21,
+	PAD_GPIO0_22,
+	PAD_GPIO0_23,
+	PAD_GPIO0_24,
+	PAD_GPIO0_25,
+	PAD_GPIO0_26,
+	PAD_GPIO0_27,
+	PAD_GPIO0_28,
+	PAD_GPIO0_29,
+	PAD_GPIO0_30,
+	PAD_GPIO0_31,
+	PAD_GPIO1_0,
+	PAD_GPIO1_1,
+	PAD_GPIO1_2,
+	PAD_GPIO1_3,
+	PAD_GPIO1_4,
+	PAD_GPIO1_5,
+	PAD_GPIO1_6,
+	PAD_GPIO1_7,
+	PAD_GPIO1_8,
+	PAD_GPIO1_9,
+	PAD_GPIO1_10,
+	PAD_GPIO1_11,
+	PAD_GPIO1_12,
+	PAD_GPIO1_13,
+	PAD_GPIO1_14,
+	PAD_GPIO1_15,
+	PAD_GPIO1_16,
+	PAD_CLK_OUT_0,
+	PAD_CLK_OUT_1,
+	PAD_CLK_OUT_2,
+	PAD_CLK_OUT_3,
+	PAD_GPIO1_21,
+	PAD_GPIO1_22,
+	PAD_GPIO1_23,
+	PAD_GPIO1_24,
+	PAD_GPIO1_25,
+	PAD_GPIO1_26,
+	PAD_GPIO1_27,
+	PAD_GPIO1_28,
+	PAD_GPIO1_29,
+	PAD_GPIO1_30,
+} pin_name_t;
+
+
+typedef enum {
+	PAD_UART0_TXD_ALT_TXD       =0,
+	PAD_UART0_TXD_ALT_GPIO2_0   =3,
+	PAD_UART0_RXD_ALT_RXD       =0,
+	PAD_UART0_RXD_ALT_GPIO2_1   =3,
+    PAD_QSPI0_SCLK_ALT_QSPI0_SCK= 0,
+    PAD_QSPI0_SCLK_ALT_PWM0     = 1,
+    PAD_QSPI0_SCLK_ALT_I2S_SDA0 = 2,
+    PAD_QSPI0_SCLK_ALT_GPIO2_2  = 3,
+    PAD_QSPI0_CSN0_ALT_QSPI0_CSN0=0,
+    PAD_QSPI0_CSN0_ALT_PWM1      =1,
+    PAD_QSPI0_CSN0_ALT_I2S_SDA1  =2,
+    PAD_QSPI0_CSN0_ALT_GPIO2_3   =3,
+    PAD_QSPI0_CSN1_ALT_QSPI0_CSN1=0,
+    PAD_QSPI0_CSN1_ALT_PWM2      =1,
+    PAD_QSPI0_CSN1_ALT_I2S_SDA2  =2,
+    PAD_QSPI0_CSN1_ALT_GPIO2_4   =3,
+    PAD_QSPI0_D0_MOSI_ALT_QSPI0_MOSI=0,
+    PAD_QSPI0_D0_MOSI_ALT_PWM3      =1,
+    PAD_QSPI0_D0_MOSI_ALT_I2S_SDA3  =2,
+    PAD_QSPI0_D0_MOSI_ALT_GPIO2_5   =3,
+    PAD_QSPI0_D1_MISO_ALT_QSPI0_MISO=0,
+    PAD_QSPI0_D1_MISO_ALT_QSPI0_PWM4=1,
+    PAD_QSPI0_D1_MISO_ALT_I2S_MCLK  =2,
+    PAD_QSPI0_D1_MISO_ALT_GPIO2_6   =3,
+    PAD_QSPI0_D2_WP_ALT_QSPI0_WP    =0,
+    PAD_QSPI0_D2_WP_ALT_PWM5        =1,
+    PAD_QSPI0_D2_WP_ALT_I2S_SCK     =2,
+    PAD_QSPI0_D2_WP_ALT_GIOP2_7     =3,
+    PAD_QSPI0_D3_HOLD_ALT_QSPI0_HOLD=0,
+    PAD_QSPI0_D3_HOLD_ALT_I2S_WS    =2,
+    PAD_QSPI0_D3_HOLD_ALT_GPIO2_8   =3,
+
+	PAD_UART1_TXD_ALT_TXD           =0,
+	PAD_UART1_TXD_ALT_GPIO0_10      =3,
+	PAD_UART1_RXD_ALT_RXD           =0,
+	PAD_UART1_RXD_ALT_GPIO011       =3,
+
+    PIN_FUNC_GPIO                   =  3U,
+} pin_func_t;
+
+#define CONFIG_GPIO_NUM             3
+#define CONFIG_IRQ_NUM              112
+#define CONFIG_DMA_NUM              1
+
+#define WJ_EFUSE_BASE               0xFFFF210000UL
+#define WJ_EFUSE_SIZE               0x10000U
+
+#define DW_USB_BASE                 0xFFE7040000UL
+#define DW_USB_SIZE                 0x10000U
+
+#define DW_TIMER0_BASE              0xFFEFC32000UL
+#define DW_TIMER0_SIZE              0x14U
+
+#define DW_TIMER1_BASE              (DW_TIMER0_BASE+DW_TIMER0_SIZE)
+#define DW_TIMER1_SIZE              DW_TIMER0_SIZE
+
+#define DW_TIMER2_BASE              0xFFFFC33000UL
+#define DW_TIMER2_SIZE              DW_TIMER1_SIZE
+
+#define DW_TIMER3_BASE              (DW_TIMER2_BASE+DW_TIMER2_SIZE)
+#define DW_TIMER3_SIZE              DW_TIMER2_SIZE
+
+#define DW_UART0_BASE               0xFFE7014000UL
+#define DW_UART0_SIZE               0x4000U
+
+#define DW_UART1_BASE               0xFFE7F00000UL
+#define DW_UART1_SIZE               0x4000U
+
+#define DW_UART2_BASE               0xFFEC010000UL
+#define DW_UART2_SIZE               0x4000U
+
+#define DW_UART3_BASE               0xFFE7F04000UL
+#define DW_UART3_SIZE               0x4000U
+
+#define DW_UART4_BASE               0xFFF7F08000UL
+#define DW_UART4_SIZE               0x4000U
+
+#define DW_UART5_BASE               0xFFF7F0C000UL
+#define DW_UART5_SIZE               0x4000U
+
+#define DW_GPIO0_BASE               0xFFEC005000UL
+#define DW_GPIO0_SIZE               0x1000U
+
+#define DW_GPIO1_BASE               0xFFEC006000UL
+#define DW_GPIO1_SIZE               0x1000U
+
+#define DW_GPIO2_BASE               0xFFE7F34000UL
+#define DW_GPIO2_SIZE               0x4000U
+
+#define DW_GPIO3_BASE               0xFFE7F38000UL
+#define DW_GPIO3_SIZE               0x4000U
+
+#define DW_WDT_BASE                 0xFFEFC30000UL
+#define DW_WDT_BASE_SZIE            0x1000U
+
+#define DW_DMA_BASE                 0xFFEFC00000UL
+#define DW_DMA_BASE_SZIE            0x4000U
+
+#define WJ_IOC_BASE1                0xFFEC007000UL
+#define WJ_IOC_SIZE                 0x1000U
+
+#define WJ_IOC_BASE2                0xFFE7F3C000UL
+#define WJ_IOC_SIZE                 0x1000U
+
+#define WJ_CPR_BASE                 0xFFCB000000UL
+#define WJ_CPR_BASE_SIZE            0x1000000U
+
+#define DW_SPI0_BASE                0xFFF700C000UL
+#define DW_SPI0_BASE_SIZE           0x10000U
+
+#define DW_QSPI0_BASE              0xFFEA000000UL
+#define DW_QSPI0_BASE_SIZE         0x10000U
+
+#define DW_QSPI1_BASE              0xFFE8000000UL
+#define DW_QSPI1_BASE_SIZE         0x10000U
+
+#define DW_I2C0_BASE               0xFFE701C000UL
+#define DW_I2C0_BASE_SIZE          0x4000U
+
+#define DW_I2C1_BASE               0xFFE7F24000UL
+#define DW_I2C1_BASE_SIZE          0x4000U
+
+#define DW_I2C2_BASE               0xFFEC00C000UL
+#define DW_I2C2_BASE_SIZE          0x4000U
+
+#define DW_I2C3_BASE               0xFFFC010000UL
+#define DW_I2C3_BASE_SIZE          0x4000U
+
+#define DW_I2C4_BASE               0xFFE7F28000UL
+#define DW_I2C4_BASE_SIZE          0x4000U
+
+#define DW_I2C5_BASE               0xFFE7F2C000UL
+#define DW_I2C5_BASE_SIZE          0x4000U
+
+#define WJ_MBOX_BASE               0xFFFFC38000UL
+#define WJ_MBOX_SIZE               0x1000U
+
+#define WJ_MBOX1_BASE              0xFFFFC48000UL
+#define WJ_MBOX1_SIZE              0x1000U
+
+#define DW_EMMC_BASE               0xFFE7080000UL
+#define DW_EMMC_SIZE               0x1000U
+
+#define DW_SD_BASE                 0xFFE7090000UL
+#define DW_SD_SIZE                 0x1000U
+
+#define DCD_ISO7816_BASE           0xFFF7F30000ULL
+#define DCD_ISO7816_SIZE           0x4000UL
+
+#define RB_RNG_BASE                0xFFFF300000UL
+#define RB_RNG_SIZE                0x10000U
+
+#define RB_EIP150B_BASE            0xFFFF300000UL
+#define RB_EIP150B_SIZE            0x10000U
+
+
+#define RB_EIP28_BASE              (RB_EIP150B_BASE + 0x4000)
+#define RB_EIP28_SIZE              0x3FFCU
+
+#define RB_EIP120SI_BASE           0xFFFF310000UL
+#define RB_EIP120SI_SIZE           0x10000U
+
+#define RB_EIP120SII_BASE          0xFFFF320000UL
+#define RB_EIP120SII_SIZE          0x10000U
+
+#define RB_EIP120SIII_BASE         0xFFFF330000UL
+#define RB_EIP120SIII_SIZE         0x10000U
+
+#define TEE_SYS_BASE               0xFFFF200000UL
+#define TEE_SYS_SIZE               0x10000U
+
+#define PLIC_BASE                  0xFFD8000000ULL
+
+#define WJ_AON_SYSRST_GEN_BASE     0xFFFFF44000UL
+#define WJ_AON_SYSRST_GEN_SIZE     0x2000U
+#define KEYRAM_BASE                0xFFFF260000UL
+#define KEYRAM_SIZE                0x10000U
+
+#define TEE_SYS_BASE               0xFFFF200000UL
+#define TEE_SYS_SIZE               0x10000U
+#define TEE_SYS_EFUSE_LC_PRELD_OFF 0x64
+#define TEE_SYS_EFUSE_LC_READ_OFF  0x68
+#define TEE_SYS_EFUSE_DBG_KEY1_OFF 0x70
+
+#define IOPMP_EIP120I_BASE         0xFFFF220000UL
+#define IOPMP_EIP120I_SIZE         0x10000
+#define IOPMP_EIP120II_BASE        0xFFFF230000UL
+#define IOPMP_EIP120II_SIZE        0x10000
+#define IOPMP_EIP120III_BASE       0xFFFF240000UL
+#define IOPMP_EIP120III_SIZE       0x10000
+#define IOPMP_TEE_DMAC_BASE        0xFFFF250000UL
+#define IOPMP_TEE_DMAC_SIZE        0x10000
+
+#define IOPMP_EMMC_BASE            0xFFFC028000UL
+#define IOPMP_EMMC_SIZE            0x1000
+#define IOPMP_SDIO0_BASE           0xFFFC029000UL
+#define IOPMP_SDIO0_SIZE           0x1000
+#define IOPMP_SDIO1_BASE           0xFFFC02a000UL
+#define IOPMP_SDIO1_SIZE            0x1000
+
+
+#define CONFIG_MAILBOX_CHANNEL_NUM  4U
+
+#define CONFIG_RTC_FAMILY_D
+
+#define CONFIG_DW_AXI_DMA_8CH_NUM_CHANNELS
+#define SOC_OM_ADDRBASE             0xFFEF018010
+#define SOC_OSC_BOOT_ADDRBASE       0xFFEF010314
+#define SOC_INTERNAL_SRAM_BASEADDR  0xFFE0000000
+#define SOC_INTERNAL_SRAM_SIZE      (1536 * 1024)   //1.5MB
+#define SOC_BROM_BASE_ADDRESS       0xFFFFD00000
+
+#define CONFIG_OTP_BASE_ADDR        0   // FIXME:
+#define CONFIG_OTP_BANK_SIZE        (8 * 1024)
+
+#define AO_SYS_REG_BASE             0xFFFFF48000UL
+#define AO_SYS_REG_SIZE             0x2000U
+
+#define SPIFLASH_BASE               (0x18000000UL)
+
+#define bootsel() \
+    ({ unsigned int __v = (*(volatile uint32_t *) (0xFFEF018010)); __v&0x7; })
+
+#define osc_bootsel() \
+    ({ unsigned int __v = (*(volatile uint32_t *) (0xFFEF010314)); __v&0x1; })
+
+#define FULLMASK_APTEECLK_ADDRBASE	0xFFFF011000
+#define FULLMASK_TEE_PLL_CFG0_OFF	0x60
+#define FULLMASK_TEE_PLL_CFG1_OFF	0x64
+#define FULLMASK_TEE_PLL_CFG3_OFF	0x6c
+#define FULLMASK_PLL_STS_OFF		0x80
+#define FULLMASK_TEESYS_CLK_TEECFG_OFF	0x1cc
+#define FULLMASK_TEESYS_HCLK_SWITCH_SEL	(0x2000U)
+#define FULLMASK_PLL_STS_TEE_PLL_LOCK	(0x400U)
+#define FULLMASK_TEE_PLL_LOCK_TIMEOUT	(0x3U) //unit: 10us
+#define FULLMASK_TEE_PLL_CFG3_CALLOCK_CNT_EN	(0x400)
+#define FULLMASK_TEE_PLL_CFG3_DSKEWCAL_PULSE	(0x200)
+#define FULLMASK_TEE_PLL_CFG3_DSKEWCAL_SWEN	(0x100)
+#define FULLMASK_TEE_PLL_CFG3_DSKEWCAL_RDY	(0x80)
+#define FULLMASK_TEE_PLL_DSKEWCAL_RDY_TIMEOUT	(200U) //unit: 10us
+#define FULLMASK_TEE_PLL_CFG1_PWR_DOWN		0x21000000
+#define FULLMASK_TEE_PLL_CFG1_PWR_ON		0x01000000
+#define FULLMASK_TEE_PLL_CFG0_792M		0x01306301
+
+#define FULLMASK_AONSYSREG_ADDRBASE		0xFFFFF48000
+#define FULLMASK_AONSYSREG_PLL_DSKEW_LOCK_OFF	0x22c
+#define FULLMASK_AONSYSREG_PLL_DSKEW_BYPASS	(0x2U)
+#define FULLMASK_AONSYSREG_RC_READY_OFF		0x7c
+#define FULLMASK_AONSYSREG_RC_READY		(0x1U)
+#define FULLMASK_RC_READY_TIMEOUT		(2U)  //unit: 10us
+
+#define FULLMASK_AONSYSREG_RC_OFF	    0x74
+#define FULLMASK_AONSYSREG_RC_VAL_POS   0
+#define FULLMASK_AONSYSREG_RC_VAL_MSK   0xFFF
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* _SOC_H_ */

+ 211 - 0
lib/sec_library/include/sys_clk.h

@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2017-2020 Alibaba Group Holding Limited
+ */
+
+/******************************************************************************
+ * @file     sys_clk.h
+ * @brief    header file for setting system frequency.
+ * @version  V1.0
+ * @date     9. April 2020
+ ******************************************************************************/
+#ifndef _SYS_CLK_H_
+#define _SYS_CLK_H_
+
+#include <stdint.h>
+#ifdef SEC_LIB_VERSION
+#include "drv/common.h"
+#else
+#include "common.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+    IHS_CLK       = 0U,        /* internal high speed clock */
+    EHS_CLK,                   /* external high speed clock */
+    ILS_CLK,                   /* internal low  speed clock */
+    ELS_CLK,                   /* external low  speed clock */
+    PLL_CLK                    /* PLL clock */
+} clk_src_t;
+
+typedef enum {
+    CPU_300MHZ    = 300000000U,
+    CPU_288MHZ    = 288000000U,
+    CPU_276MHZ    = 276000000U,
+    CPU_270MHZ    = 270000000U,
+    CPU_264MHZ    = 264000000U,
+    CPU_252MHZ    = 252000000U,
+    CPU_245_76MHZ = 245760000U,
+    CPU_240MHZ    = 240000000U,
+    CPU_228MHZ    = 228000000U,
+    CPU_216MHZ    = 216000000U,
+    CPU_204MHZ    = 204000000U,
+    CPU_192MHZ    = 192000000U,
+    CPU_180MHZ    = 180000000U,
+    CPU_168MHZ    = 168000000U,
+    CPU_156MHZ    = 156000000U,
+    CPU_144MHZ    = 144000000U,
+    CPU_135MHZ    = 135000000U,
+    CPU_132MHZ    = 132000000U,
+    CPU_120MHZ    = 120000000U,
+    CPU_108MHZ    = 108000000U,
+    CPU_96MHZ     = 96000000U,
+    CPU_84MHZ     = 84000000U,
+    CPU_72MHZ     = 72000000U,
+    CPU_60MHZ     = 60000000U,
+    CPU_48MHZ     = 48000000U,
+    CPU_36MHZ     = 36000000U,
+    CPU_30MHZ     = 30000000U,
+    CPU_24MHZ     = 24000000U,
+    CPU_20MHZ     = 20000000U,
+    CPU_10MHZ     = 10000000U,
+} sys_freq_t;
+
+
+/* pllclkout : ( pllclkin / 2)*( FN + Frac/4096 ) */
+typedef struct {
+
+    uint32_t pll_is_used;          /* pll is used */
+
+    uint32_t pll_source;           /* select pll input source clock */
+
+    uint32_t pll_src_clk_divider;  /* ratio between pll_srcclk clock and pll_clkin clock */
+
+    uint32_t fn;                   /* integer value of frequency division */
+
+    uint32_t frac;                 /* decimal value of frequency division */
+
+} pll_config_t;
+
+
+typedef struct {
+    uint32_t system_clk;            /* system clock */
+
+    // pll_config_t pll_config;        /* pll config struct */
+
+    uint32_t sys_clk_source;        /* select sysclk source clock */
+
+    uint32_t rtc_clk_source;        /* select rtcclk source clock */
+
+    uint32_t cpu_clk_divider;       /* ratio between fs_mclk clock and mclk clock */
+
+    uint32_t sys_clk_divider;       /* ratio between fs_mclk clock and mclk clock */
+
+    uint32_t ahb_clk_divider;      /* ratio between mclk clock and ahb clock */
+
+    uint32_t apb_clk_divider;      /* ratio between mclk clock and apb clock */
+
+    uint32_t uart_clk_divider;      /* ratio between mclk clock and uart clock */
+
+    uint32_t audio_clk_divider;      /* ratio between mclk clock and audio clock */
+
+    uint32_t vad_clk_divider;      /* ratio between mclk clock and vad clock */
+
+} system_clk_config_t;
+
+typedef enum {
+    CLK_DIV1 = 0U,
+    CLK_DIV2,
+    CLK_DIV3,
+    CLK_DIV4,
+    CLK_DIV5,
+    CLK_DIV6,
+    CLK_DIV7,
+    CLK_DIV8,
+    CLK_DIV9,
+    CLK_DIV10,
+    CLK_DIV11,
+    CLK_DIV12,
+    CLK_DIV13,
+    CLK_DIV14,
+    CLK_DIV15,
+    CLK_DIV16
+} apb_div_t;
+
+typedef enum {
+    PLL_FN_18 = 0U,
+    PLL_FN_19,
+    PLL_FN_20,
+    PLL_FN_21,
+    PLL_FN_22,
+    PLL_FN_23,
+    PLL_FN_24,
+    PLL_FN_25,
+    PLL_FN_26,
+    PLL_FN_27,
+    PLL_FN_28,
+    PLL_FN_29,
+    PLL_FN_30,
+    PLL_FN_31,
+    PLL_FN_32,
+    PLL_FN_33,
+    PLL_FN_34,
+    PLL_FN_35,
+    PLL_FN_36,
+    PLL_FN_37,
+    PLL_FN_38,
+    PLL_FN_39,
+    PLL_FN_40,
+    PLL_FN_41,
+    PLL_FN_42,
+    PLL_FN_43,
+    PLL_FN_44,
+    PLL_FN_45,
+    PLL_FN_46,
+    PLL_FN_47,
+    PLL_FN_48,
+    PLL_FN_49
+} pll_fn_t;
+
+typedef enum {
+    TIM0_CLK = 0U,
+    TIM1_CLK,
+    RTC0_CLK,
+    WDT_CLK,
+    SPI0_CLK,
+    UART0_CLK,
+    IIC0_CLK,
+    PWM_CLK,
+    QSPI0_CLK,
+    PWMR_CLK,
+    EFUSE_CLK,
+    I2S0_CLK,
+    I2S1_CLK,
+    GPIO0_CLK,
+
+    TIM2_CLK = 32U,
+    TIM3_CLK,
+    SPI1_CLK,
+    UART1_CLK,
+    I2S567_CLK,
+    ADC_CLK,
+    ETB_CLK,
+    I2S2_CLK,
+    I2S3_CLK,
+    IOC_CLK,
+    CODEC_CLK
+} clk_module_t;
+
+
+/**
+  \brief       Set the system clock according to the parameter
+  \param[in]   config    system clock config.
+  \return      error code
+*/
+csi_error_t soc_sysclk_config(system_clk_config_t *config);
+
+/**
+  \brief       Set iic reset
+  \param[in]   idx    iic idx.
+  \return      Null
+*/
+void soc_reset_iic(uint32_t idx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CLK_H_ */
+

BIN
lib/sec_library/libsec_library.a